From 7289ad0879627083b2b3e8096298157a403a562e Mon Sep 17 00:00:00 2001 From: SamulKyull Date: Tue, 2 Jul 2024 18:38:31 +0800 Subject: [PATCH] pcie bump to v11 --- bsp/drivers/pcie/Makefile | 1 + bsp/drivers/pcie/pcie-sunxi-dma.c | 184 ++++++++++++++++------- bsp/drivers/pcie/pcie-sunxi-dma.h | 24 ++- bsp/drivers/pcie/pcie-sunxi-ep.c | 31 ++-- bsp/drivers/pcie/pcie-sunxi-plat.c | 234 ++++++++++++++++++----------- bsp/drivers/pcie/pcie-sunxi-rc.c | 33 ++-- bsp/drivers/pcie/pcie-sunxi.h | 13 +- 7 files changed, 336 insertions(+), 184 deletions(-) diff --git a/bsp/drivers/pcie/Makefile b/bsp/drivers/pcie/Makefile index 41732b118a..b798aec9af 100644 --- a/bsp/drivers/pcie/Makefile +++ b/bsp/drivers/pcie/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +ccflag-y += -DDYNAMIC_DEBUG_MODULE ccflags-y += -I $(srctree)/drivers/pci/ diff --git a/bsp/drivers/pcie/pcie-sunxi-dma.c b/bsp/drivers/pcie/pcie-sunxi-dma.c index d2a35ebd84..079c5df338 100644 --- a/bsp/drivers/pcie/pcie-sunxi-dma.c +++ b/bsp/drivers/pcie/pcie-sunxi-dma.c @@ -8,6 +8,8 @@ * */ +#define SUNXI_MODNAME "pcie-edma" +#include #include #include #include @@ -31,107 +33,172 @@ #include #include "pcie-sunxi-dma.h" -static u32 SUNXI_DMA_DEFAULT_CNT = 0x04; /* Default value for DMA wr/rd channel */ - -static u32 SUNXI_DMA_WR_CHN_CNT; -static u32 SUNXI_DMA_RD_CHN_CNT; - -static dma_channel_t *dma_wr_chn; -static dma_channel_t *dma_rd_chn; static struct dma_trx_obj *obj_global; -dma_hdl_t sunxi_pcie_dma_chan_request(enum dma_dir dma_trx) +sunxi_pci_edma_chan_t *sunxi_pcie_dma_chan_request(enum dma_dir dma_trx, void *cb, void *data) { - int i = 0; - dma_channel_t *pchan = NULL; + struct sunxi_pcie *pci = dev_get_drvdata(obj_global->dev); + sunxi_pci_edma_chan_t *edma_chan = NULL; + u32 free_chan; if (dma_trx == PCIE_DMA_WRITE) { - for (i = 0; i < SUNXI_DMA_WR_CHN_CNT; i++) { - pchan = &dma_wr_chn[i]; - - if (!pchan->dma_used) { - pchan->dma_used = 1; - pchan->chnl_num = i; - spin_lock_init(&pchan->lock); - return (dma_hdl_t)pchan; - } + free_chan = find_first_zero_bit(pci->wr_edma_map, pci->num_edma); + + if (free_chan >= pci->num_edma) { + sunxi_err(pci->dev, "No free pcie edma write channel.\n"); + return NULL; } + + set_bit(free_chan, pci->wr_edma_map); + + edma_chan = &pci->dma_wr_chn[free_chan]; + + edma_chan->dma_trx = PCIE_DMA_WRITE; + edma_chan->chnl_num = free_chan; + edma_chan->callback = cb; + edma_chan->callback_param = data; + + return edma_chan; } else if (dma_trx == PCIE_DMA_READ) { - for (i = 0; i < SUNXI_DMA_RD_CHN_CNT; i++) { - pchan = &dma_rd_chn[i]; - - if (pchan->dma_used == 0) { - pchan->dma_used = 1; - pchan->chnl_num = i; - spin_lock_init(&pchan->lock); - return (dma_hdl_t)pchan; - } + free_chan = find_first_zero_bit(pci->rd_edma_map, pci->num_edma); + + if (free_chan >= pci->num_edma) { + sunxi_err(pci->dev, "No free pcie edma read channel.\n"); + return NULL; } + + set_bit(free_chan, pci->rd_edma_map); + + edma_chan = &pci->dma_rd_chn[free_chan]; + + edma_chan->dma_trx = PCIE_DMA_READ; + edma_chan->chnl_num = free_chan; + edma_chan->callback = cb; + edma_chan->callback_param = data; + + return edma_chan; } else { - pr_err("ERR: unsupported type:%d \n", dma_trx); + sunxi_err(pci->dev, "ERR: unsupported type:%d \n", dma_trx); } - return (dma_hdl_t)NULL; + return NULL; } EXPORT_SYMBOL_GPL(sunxi_pcie_dma_chan_request); -int sunxi_pcie_dma_chan_release(u32 channel, enum dma_dir dma_trx) +int sunxi_pcie_dma_chan_release(struct sunxi_pci_edma_chan *edma_chan, enum dma_dir dma_trx) { - if ((channel > SUNXI_DMA_WR_CHN_CNT) || (channel > SUNXI_DMA_RD_CHN_CNT)) { - pr_err("ERR: the channel num:%d is error\n", channel); + struct sunxi_pcie *pci = dev_get_drvdata(obj_global->dev); + + if (edma_chan->chnl_num >= pci->num_edma) { + sunxi_err(pci->dev, "ERR: the channel num:%d is error\n", edma_chan->chnl_num); return -1; } if (PCIE_DMA_WRITE == dma_trx) { - dma_wr_chn[channel].dma_used = 0; - dma_wr_chn[channel].chnl_num = 0; + edma_chan->callback = NULL; + edma_chan->callback_param = NULL; + clear_bit(edma_chan->chnl_num, pci->wr_edma_map); } else if (PCIE_DMA_READ == dma_trx) { - dma_rd_chn[channel].dma_used = 0; - dma_rd_chn[channel].chnl_num = 0; + edma_chan->callback = NULL; + edma_chan->callback_param = NULL; + clear_bit(edma_chan->chnl_num, pci->rd_edma_map); + } else { + sunxi_err(pci->dev, "ERR: unsupported type:%d \n", dma_trx); } return 0; } EXPORT_SYMBOL_GPL(sunxi_pcie_dma_chan_release); +static int sunxi_pcie_init_edma_map(struct sunxi_pcie *pci) +{ + pci->rd_edma_map = devm_bitmap_zalloc(pci->dev, pci->num_edma, GFP_KERNEL); + if (!pci->rd_edma_map) + return -ENOMEM; + + pci->wr_edma_map = devm_bitmap_zalloc(pci->dev, pci->num_edma, GFP_KERNEL); + if (!pci->wr_edma_map) + return -ENOMEM; + + return 0; +} + int sunxi_pcie_dma_get_chan(struct platform_device *pdev) { - int ret, num; + struct sunxi_pcie *pci = platform_get_drvdata(pdev); + sunxi_pci_edma_chan_t *edma_chan = NULL; + int ret, i; - ret = of_property_read_u32(pdev->dev.of_node, "num-edma", &num); + ret = of_property_read_u32(pdev->dev.of_node, "num-edma", &pci->num_edma); if (ret) { - dev_info(&pdev->dev, "PCIe get num-edma failed, use default num=%d\n", - SUNXI_DMA_DEFAULT_CNT); - num = SUNXI_DMA_DEFAULT_CNT; + sunxi_err(&pdev->dev, "Failed to parse the number of edma\n"); + return -EINVAL; + } else { + ret = sunxi_pcie_init_edma_map(pci); + if (ret) + return -EINVAL; } - SUNXI_DMA_WR_CHN_CNT = SUNXI_DMA_RD_CHN_CNT = num; /* set the eDMA wr/rd channel num */ - dma_wr_chn = devm_kcalloc(&pdev->dev, SUNXI_DMA_WR_CHN_CNT, sizeof(*dma_wr_chn), GFP_KERNEL); - dma_rd_chn = devm_kcalloc(&pdev->dev, SUNXI_DMA_RD_CHN_CNT, sizeof(*dma_rd_chn), GFP_KERNEL); - if (!dma_wr_chn || !dma_rd_chn) { - dev_err(&pdev->dev, "PCIe edma kzalloc failed\n"); + pci->dma_wr_chn = devm_kcalloc(&pdev->dev, pci->num_edma, sizeof(sunxi_pci_edma_chan_t), GFP_KERNEL); + pci->dma_rd_chn = devm_kcalloc(&pdev->dev, pci->num_edma, sizeof(sunxi_pci_edma_chan_t), GFP_KERNEL); + if (!pci->dma_wr_chn || !pci->dma_rd_chn) { + sunxi_err(&pdev->dev, "PCIe edma kzalloc failed\n"); return -EINVAL; } + for (i = 0; i < pci->num_edma; i++) { + edma_chan = &pci->dma_wr_chn[i]; + spin_lock_init(&edma_chan->lock); + } + + for (i = 0; i < pci->num_edma; i++) { + edma_chan = &pci->dma_rd_chn[i]; + spin_lock_init(&edma_chan->lock); + } + return 0; } EXPORT_SYMBOL_GPL(sunxi_pcie_dma_get_chan); -int sunxi_pcie_dma_mem_read(phys_addr_t sar_addr, phys_addr_t dar_addr, unsigned int size) +int sunxi_pcie_edma_config_start(struct sunxi_pci_edma_chan *edma_chan) +{ + struct dma_table edma_table = {0}; + int ret; + + if (likely(obj_global->config_dma_trx_func)) { + ret = obj_global->config_dma_trx_func(&edma_table, edma_chan->src_addr, edma_chan->dst_addr, + edma_chan->size, edma_chan->dma_trx, edma_chan); + + if (ret < 0) { + sunxi_err(obj_global->dev, "pcie dma mem read error ! \n"); + return -EINVAL; + } + } else { + sunxi_err(obj_global->dev, "config_dma_trx_func is NULL ! \n"); + return -EINVAL; + } + + obj_global->start_dma_trx_func(&edma_table, obj_global); + + return 0; +} +EXPORT_SYMBOL_GPL(sunxi_pcie_edma_config_start); + +int sunxi_pcie_dma_mem_read(phys_addr_t src_addr, phys_addr_t dst_addr, unsigned int size) { struct dma_table read_table = {0}; int ret; if (likely(obj_global->config_dma_trx_func)) { - ret = obj_global->config_dma_trx_func(&read_table, sar_addr, dar_addr, size, PCIE_DMA_READ); + ret = obj_global->config_dma_trx_func(&read_table, src_addr, dst_addr, size, PCIE_DMA_READ, NULL); if (ret < 0) { - pr_err("pcie dma mem read error ! \n"); + sunxi_err(obj_global->dev, "pcie dma mem read error ! \n"); return -EINVAL; } } else { - pr_err("config_dma_trx_func is NULL ! \n"); + sunxi_err(obj_global->dev, "config_dma_trx_func is NULL ! \n"); return -EINVAL; } @@ -141,20 +208,20 @@ int sunxi_pcie_dma_mem_read(phys_addr_t sar_addr, phys_addr_t dar_addr, unsigned } EXPORT_SYMBOL_GPL(sunxi_pcie_dma_mem_read); -int sunxi_pcie_dma_mem_write(phys_addr_t sar_addr, phys_addr_t dar_addr, unsigned int size) +int sunxi_pcie_dma_mem_write(phys_addr_t src_addr, phys_addr_t dst_addr, unsigned int size) { struct dma_table write_table = {0}; int ret; if (likely(obj_global->config_dma_trx_func)) { - ret = obj_global->config_dma_trx_func(&write_table, sar_addr, dar_addr, size, PCIE_DMA_WRITE); + ret = obj_global->config_dma_trx_func(&write_table, src_addr, dst_addr, size, PCIE_DMA_WRITE, NULL); if (ret < 0) { - pr_err("pcie dma mem write error ! \n"); + sunxi_err(obj_global->dev, "pcie dma mem write error ! \n"); return -EINVAL; } } else { - pr_err("config_dma_trx_func is NULL ! \n"); + sunxi_err(obj_global->dev, "config_dma_trx_func is NULL ! \n"); return -EINVAL; } @@ -186,8 +253,11 @@ EXPORT_SYMBOL_GPL(sunxi_pcie_dma_obj_probe); int sunxi_pcie_dma_obj_remove(struct device *dev) { - memset(dma_wr_chn, 0, sizeof(dma_channel_t) * SUNXI_DMA_WR_CHN_CNT); - memset(dma_rd_chn, 0, sizeof(dma_channel_t) * SUNXI_DMA_RD_CHN_CNT); + struct platform_device *pdev = to_platform_device(dev); + struct sunxi_pcie *pci = platform_get_drvdata(pdev); + + memset(pci->dma_wr_chn, 0, sizeof(sunxi_pci_edma_chan_t) * pci->num_edma); + memset(pci->dma_rd_chn, 0, sizeof(sunxi_pci_edma_chan_t) * pci->num_edma); obj_global->dma_list.next = NULL; obj_global->dma_list.prev = NULL; diff --git a/bsp/drivers/pcie/pcie-sunxi-dma.h b/bsp/drivers/pcie/pcie-sunxi-dma.h index 1c86d321e9..97cc9a27c9 100644 --- a/bsp/drivers/pcie/pcie-sunxi-dma.h +++ b/bsp/drivers/pcie/pcie-sunxi-dma.h @@ -17,6 +17,8 @@ #include #include +#include "pcie-sunxi.h" + #define PCIE_DMA_TABLE_NUM 8 #define PCIE_DMA_TRX_TYPE_NUM 3 @@ -69,13 +71,19 @@ enum dma_dir { PCIE_DMA_READ, }; -typedef int *dma_hdl_t; +typedef void (*sunxi_pcie_edma_callback)(void *param); -typedef struct { - u32 dma_used; +typedef struct sunxi_pci_edma_chan { u32 chnl_num; - spinlock_t lock; /* dma channel lock */ -} dma_channel_t; + spinlock_t lock; + bool cookie; + phys_addr_t src_addr; + phys_addr_t dst_addr; + u32 size; + enum dma_dir dma_trx; + void *callback_param; + sunxi_pcie_edma_callback callback; +} sunxi_pci_edma_chan_t; /* * The Channel Control Register for read and write. @@ -257,13 +265,13 @@ struct dma_trx_obj { struct pcie_misc_dev *pcie_dev; void (*start_dma_trx_func)(struct dma_table *table, struct dma_trx_obj *obj); int (*config_dma_trx_func)(struct dma_table *table, phys_addr_t sar_addr, phys_addr_t dar_addr, - unsigned int size, enum dma_dir dma_trx); + unsigned int size, enum dma_dir dma_trx, sunxi_pci_edma_chan_t *edma_chn); }; struct dma_trx_obj *sunxi_pcie_dma_obj_probe(struct device *dev); int sunxi_pcie_dma_obj_remove(struct device *dev); -int sunxi_pcie_dma_chan_release(u32 channel, enum dma_dir dma_trx); -dma_hdl_t sunxi_pcie_dma_chan_request(enum dma_dir dma_trx); +sunxi_pci_edma_chan_t *sunxi_pcie_dma_chan_request(enum dma_dir dma_trx, void *cb, void *data); +int sunxi_pcie_dma_chan_release(struct sunxi_pci_edma_chan *edma_chan, enum dma_dir dma_trx); int sunxi_pcie_dma_mem_read(phys_addr_t sar_addr, phys_addr_t dar_addr, unsigned int size); int sunxi_pcie_dma_mem_write(phys_addr_t sar_addr, phys_addr_t dar_addr, unsigned int size); int sunxi_pcie_dma_get_chan(struct platform_device *pdev); diff --git a/bsp/drivers/pcie/pcie-sunxi-ep.c b/bsp/drivers/pcie/pcie-sunxi-ep.c index 0fede481f8..b8fbe5995c 100644 --- a/bsp/drivers/pcie/pcie-sunxi-ep.c +++ b/bsp/drivers/pcie/pcie-sunxi-ep.c @@ -7,6 +7,8 @@ * Author: songjundong */ +#define SUNXI_MODNAME "pcie-ep" +#include #include #include #include @@ -163,7 +165,7 @@ static int sunxi_pcie_ep_inbound_atu(struct sunxi_pcie_ep *ep, u8 func_no, int t free_win = ep->bar_to_atu[bar]; if (free_win >= ep->num_ib_windows) { - dev_err(&pci->dev, "No free inbound window\n"); + sunxi_err(pci->dev, "No free inbound window\n"); return -EINVAL; } @@ -185,7 +187,7 @@ static int sunxi_pcie_ep_outbound_atu(struct sunxi_pcie_ep *ep, u8 func_no, free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); if (free_win >= ep->num_ob_windows) { - dev_err(&pci->dev, "No free outbound window\n"); + sunxi_err(pci->dev, "No free outbound window\n"); return -EINVAL; } @@ -355,7 +357,7 @@ static int sunxi_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ret = sunxi_pcie_ep_outbound_atu(ep, func_no, cpu_addr, pci_addr, size); if (ret) { - dev_err(&pci->dev, "Failed to enable address\n"); + sunxi_err(pci->dev, "Failed to enable address\n"); return ret; } @@ -529,13 +531,13 @@ static int sunxi_pcie_parse_ep_dts(struct sunxi_pcie_ep *ep) void *addr; struct resource *res; struct sunxi_pcie *pci = to_sunxi_pcie_from_ep(ep); - struct device *dev = &pci->dev; + struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) { - dev_err(dev, "unable to read *addr_space* property\n"); + sunxi_err(dev, "unable to read *addr_space* property\n"); return -EINVAL; } @@ -546,14 +548,14 @@ static int sunxi_pcie_parse_ep_dts(struct sunxi_pcie_ep *ep) ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); if (ret < 0) { - dev_err(dev, "unable to read *num-ib-windows* property\n"); + sunxi_err(dev, "unable to read *num-ib-windows* property\n"); return ret; } ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); if (ret < 0) { - dev_err(dev, "unable to read *num-ob-windows* property\n"); + sunxi_err(dev, "unable to read *num-ob-windows* property\n"); return ret; } @@ -606,7 +608,7 @@ int sunxi_plat_ep_init_end(struct sunxi_pcie_ep *ep) hdr_type = sunxi_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & PCI_HEADER_TYPE_MASK; if (hdr_type != PCI_HEADER_TYPE_NORMAL) { - dev_err(&pci->dev, + sunxi_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", hdr_type); return -EIO; @@ -637,7 +639,7 @@ int sunxi_pcie_ep_init(struct sunxi_pcie *pci) u8 func_no; struct pci_epc *epc; struct sunxi_pcie_ep *ep = &pci->ep; - struct device *dev = &pci->dev; + struct device *dev = pci->dev; struct device_node *np = dev->of_node; struct sunxi_pcie_ep_func *ep_func; @@ -645,13 +647,13 @@ int sunxi_pcie_ep_init(struct sunxi_pcie *pci) ret = sunxi_pcie_parse_ep_dts(ep); if (ret) { - dev_err(dev, "failed to parse ep dts\n"); + sunxi_err(dev, "failed to parse ep dts\n"); return ret; } epc = devm_pci_epc_create(dev, &sunxi_pcie_epc_ops); if (IS_ERR(epc)) { - dev_err(dev, "failed to create epc device\n"); + sunxi_err(dev, "failed to create epc device\n"); return PTR_ERR(epc); } @@ -681,7 +683,7 @@ int sunxi_pcie_ep_init(struct sunxi_pcie *pci) ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); if (ret < 0) { - dev_err(dev, "Failed to initialize address space\n"); + sunxi_err(dev, "Failed to initialize address space\n"); return ret; } @@ -689,7 +691,7 @@ int sunxi_pcie_ep_init(struct sunxi_pcie *pci) epc->mem->window.page_size); if (!ep->msi_mem) { ret = -ENOMEM; - dev_err(dev, "Failed to reserve memory for MSI\n"); + sunxi_err(dev, "Failed to reserve memory for MSI\n"); goto err_exit_epc_mem; } @@ -711,7 +713,8 @@ EXPORT_SYMBOL_GPL(sunxi_pcie_ep_init); void sunxi_pcie_ep_deinit(struct sunxi_pcie *pci) { - struct pci_epc *epc = &pci->ep->epc; + struct pci_epc *epc = pci->ep.epc; + struct sunxi_pcie_ep *ep = &pci->ep; pci_epc_mem_exit(epc); pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, epc->mem->window.page_size); diff --git a/bsp/drivers/pcie/pcie-sunxi-plat.c b/bsp/drivers/pcie/pcie-sunxi-plat.c index eea451b88c..50f6ab4533 100644 --- a/bsp/drivers/pcie/pcie-sunxi-plat.c +++ b/bsp/drivers/pcie/pcie-sunxi-plat.c @@ -12,6 +12,8 @@ * published by the Free Software Foundation. */ +#define SUNXI_MODNAME "pcie" +#include #include #include #include @@ -36,7 +38,7 @@ #include "pcie-sunxi-dma.h" #include "pcie-sunxi.h" -#define SUNXI_PCIE_MODULE_VERSION "1.0.8" +#define SUNXI_PCIE_MODULE_VERSION "1.0.11" void sunxi_pcie_writel(u32 val, struct sunxi_pcie *pcie, u32 offset) { @@ -112,7 +114,7 @@ static void sunxi_pcie_plat_set_mode(struct sunxi_pcie *pci) sunxi_pcie_writel(val, pci, PCIE_LTSSM_CTRL); break; default: - dev_err(pci->dev, "unsupported device type:%d\n", pci->drvdata->mode); + sunxi_err(pci->dev, "unsupported device type:%d\n", pci->drvdata->mode); break; } } @@ -196,7 +198,7 @@ void sunxi_pcie_write_dbi(struct sunxi_pcie *pci, u32 reg, size_t size, u32 val) ret = sunxi_pcie_cfg_write(pci->dbi_base + reg, size, val); if (ret) - dev_err(pci->dev, "Write DBI address failed\n"); + sunxi_err(pci->dev, "Write DBI address failed\n"); } EXPORT_SYMBOL_GPL(sunxi_pcie_write_dbi); @@ -207,7 +209,7 @@ u32 sunxi_pcie_read_dbi(struct sunxi_pcie *pci, u32 reg, size_t size) ret = sunxi_pcie_cfg_read(pci->dbi_base + reg, size, &val); if (ret) - dev_err(pci->dev, "Read DBI address failed\n"); + sunxi_err(pci->dev, "Read DBI address failed\n"); return val; } @@ -268,7 +270,7 @@ void sunxi_pcie_plat_set_rate(struct sunxi_pcie *pci) val |= PORT_LINK_MODE_4_LANES; break; default: - dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->lanes); + sunxi_err(pci->dev, "num-lanes %u: invalid value\n", pci->lanes); return; } sunxi_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); @@ -309,6 +311,12 @@ static const struct sunxi_pcie_of_data sunxi_pcie_rc_v210_of_data = { .cpu_pcie_addr_quirk = true, }; +static const struct sunxi_pcie_of_data sunxi_pcie_rc_v210_v2_of_data = { + .mode = SUNXI_PCIE_RC_TYPE, + .has_pcie_slv_clk = true, + .need_pcie_rst = true, +}; + static const struct sunxi_pcie_of_data sunxi_pcie_rc_v300_of_data = { .mode = SUNXI_PCIE_RC_TYPE, }; @@ -324,6 +332,10 @@ static const struct of_device_id sunxi_pcie_plat_of_match[] = { .compatible = "allwinner,sunxi-pcie-v210-rc", .data = &sunxi_pcie_rc_v210_of_data, }, + { + .compatible = "allwinner,sunxi-pcie-v210-v2-rc", + .data = &sunxi_pcie_rc_v210_v2_of_data, + }, { .compatible = "allwinner,sunxi-pcie-v300-rc", .data = &sunxi_pcie_rc_v300_of_data, @@ -397,22 +409,22 @@ static int sunxi_pcie_plat_enable_power(struct sunxi_pcie *pci) ret = regulator_set_voltage(pci->pcie3v3, 3300000, 3300000); if (ret) - dev_warn(dev, "failed to set regulator voltage\n"); + sunxi_warn(dev, "failed to set regulator voltage\n"); ret = regulator_enable(pci->pcie3v3); if (ret) - dev_err(dev, "failed to enable pcie3v3 regulator\n"); + sunxi_err(dev, "failed to enable pcie3v3 regulator\n"); if (IS_ERR_OR_NULL(pci->pcie1v8)) return 1; ret = regulator_set_voltage(pci->pcie1v8, 1800000, 1800000); if (ret) - dev_warn(dev, "failed to set regulator voltage\n"); + sunxi_warn(dev, "failed to set regulator voltage\n"); ret = regulator_enable(pci->pcie1v8); if (ret) - dev_err(dev, "failed to enable pcie1v8 regulator\n"); + sunxi_err(dev, "failed to enable pcie1v8 regulator\n"); return ret; } @@ -426,14 +438,14 @@ static int sunxi_pcie_plat_disable_power(struct sunxi_pcie *pci) ret = regulator_disable(pci->pcie3v3); if (ret) - dev_err(pci->dev, "fail to disable pcie3v3 regulator\n"); + sunxi_err(pci->dev, "fail to disable pcie3v3 regulator\n"); if (IS_ERR_OR_NULL(pci->pcie1v8)) return ret; ret = regulator_disable(pci->pcie1v8); if (ret) - dev_err(pci->dev, "fail to disable pcie1v8 regulator\n"); + sunxi_err(pci->dev, "fail to disable pcie1v8 regulator\n"); return ret; } @@ -442,64 +454,88 @@ static int sunxi_pcie_plat_clk_setup(struct sunxi_pcie *pci) { int ret; - ret = clk_prepare_enable(pci->pcie_ref); + + + ret = clk_prepare_enable(pci->pcie_aux); if (ret) { - dev_err(pci->dev, "cannot prepare/enable ref clock\n"); + sunxi_err(pci->dev, "cannot prepare/enable aux clock\n"); return ret; } - ret = clk_set_rate(pci->pcie_ref, 100000000); - if (ret) { - dev_err(pci->dev, "failed to set clock freq 100M!\n"); - goto err0; + + if (pci->drvdata->has_pcie_slv_clk) { + ret = clk_prepare_enable(pci->pcie_slv); + if (ret) { + sunxi_err(pci->dev, "cannot prepare/enable slv clock\n"); + goto err0; + } } - ret = clk_prepare_enable(pci->pcie_aux); - if (ret) { - dev_err(pci->dev, "cannot prepare/enable aux clock\n"); - goto err0; + if (pci->drvdata->need_pcie_rst) { + ret = reset_control_deassert(pci->pcie_rst); + if (ret) { + sunxi_err(pci->dev, "cannot reset pcie\n"); + goto err1; + } + + ret = reset_control_deassert(pci->pwrup_rst); + if (ret) { + sunxi_err(pci->dev, "cannot pwrup_reset pcie\n"); + goto err1; + } } + return 0; +err1: + if (pci->drvdata->has_pcie_slv_clk) + clk_disable_unprepare(pci->pcie_slv); err0: - clk_disable_unprepare(pci->pcie_ref); + clk_disable_unprepare(pci->pcie_aux); return ret; } static void sunxi_pcie_plat_clk_exit(struct sunxi_pcie *pci) { + if (pci->drvdata->need_pcie_rst) { + reset_control_assert(pci->pcie_rst); + reset_control_assert(pci->pwrup_rst); + } + if (pci->drvdata->has_pcie_slv_clk) + clk_disable_unprepare(pci->pcie_slv); clk_disable_unprepare(pci->pcie_aux); - clk_disable_unprepare(pci->pcie_ref); } static int sunxi_pcie_plat_clk_get(struct platform_device *pdev, struct sunxi_pcie *pci) { - int ret; - - pci->pcie_ref = devm_clk_get(&pdev->dev, "pclk_ref"); - if (IS_ERR(pci->pcie_ref)) { - dev_err(&pdev->dev, "failed to clk pclk_ref\n"); - return PTR_ERR(pci->pcie_ref); + pci->pcie_aux = devm_clk_get(&pdev->dev, "pclk_aux"); + if (IS_ERR(pci->pcie_aux)) { + sunxi_err(&pdev->dev, "fail to get pclk_aux\n"); + return PTR_ERR(pci->pcie_aux); } - pci->pcie_per = devm_clk_get(&pdev->dev, "pclk_per"); - if (IS_ERR(pci->pcie_per)) { - dev_err(&pdev->dev, "failed to get pclk_per\n"); - return PTR_ERR(pci->pcie_per); + if (pci->drvdata->has_pcie_slv_clk) { + pci->pcie_slv = devm_clk_get(&pdev->dev, "pclk_slv"); + if (IS_ERR(pci->pcie_slv)) { + sunxi_err(&pdev->dev, "fail to get pclk_slv\n"); + return PTR_ERR(pci->pcie_slv); + } } - ret = clk_set_parent(pci->pcie_ref, pci->pcie_per); - if (ret) { - dev_err(&pdev->dev, "failed to set parent\n"); - return -EINVAL; - } + if (pci->drvdata->need_pcie_rst) { + pci->pcie_rst = devm_reset_control_get(&pdev->dev, "pclk_rst"); + if (IS_ERR(pci->pcie_rst)) { + sunxi_err(&pdev->dev, "fail to get pclk_rst\n"); + return PTR_ERR(pci->pcie_rst); + } - pci->pcie_aux = devm_clk_get(&pdev->dev, "pclk_aux"); - if (IS_ERR(pci->pcie_aux)) { - dev_err(&pdev->dev, "fail to get pclk_aux\n"); - return PTR_ERR(pci->pcie_aux); + pci->pwrup_rst = devm_reset_control_get(&pdev->dev, "pwrup_rst"); + if (IS_ERR(pci->pwrup_rst)) { + sunxi_err(&pdev->dev, "fail to get pwrup_rst\n"); + return PTR_ERR(pci->pwrup_rst); + } } return 0; @@ -511,7 +547,7 @@ static int sunxi_pcie_plat_combo_phy_init(struct sunxi_pcie *pci) ret = phy_init(pci->phy); if (ret) { - dev_err(pci->dev, "fail to init phy, err %d\n", ret); + sunxi_err(pci->dev, "fail to init phy, err %d\n", ret); return ret; } @@ -532,14 +568,30 @@ static irqreturn_t sunxi_pcie_plat_sii_handler(int irq, void *arg) return IRQ_HANDLED; } -static void sunxi_pcie_plat_dma_handle_interrupt(u32 ch, enum dma_dir dma_trx) +static void sunxi_pcie_plat_dma_handle_interrupt(struct sunxi_pcie *pci, u32 ch, enum dma_dir dma_trx) { - int ret; - - ret = sunxi_pcie_dma_chan_release(ch, dma_trx); - if (unlikely(ret < 0)) { - pr_err("%s is error release chnl%d !\n", __func__, ch); + sunxi_pci_edma_chan_t *edma_chan = NULL; + sunxi_pcie_edma_callback cb = NULL; + void *cb_data = NULL; + + if (dma_trx == PCIE_DMA_WRITE) { + edma_chan = &pci->dma_wr_chn[ch]; + cb = edma_chan->callback; + cb_data = edma_chan->callback_param; + if (cb) + cb(cb_data); + } else if (dma_trx == PCIE_DMA_READ) { + edma_chan = &pci->dma_rd_chn[ch]; + cb = edma_chan->callback; + cb_data = edma_chan->callback_param; + if (cb) + cb(cb_data); + } else { + sunxi_err(pci->dev, "ERR: unsupported type:%d \n", dma_trx); } + + if (edma_chan->cookie) + sunxi_pcie_dma_chan_release(edma_chan, dma_trx); } #define SUNXI_PCIE_DMA_IRQ_HANDLER(name, chn, dir) \ @@ -557,14 +609,14 @@ static irqreturn_t sunxi_pcie_##name##_irq_handler \ clr.doneclr = BIT(chn); \ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + \ (dir ? PCIE_DMA_RD_INT_CLEAR : PCIE_DMA_WR_INT_CLEAR), clr.dword);\ - sunxi_pcie_plat_dma_handle_interrupt(chn, dir); \ + sunxi_pcie_plat_dma_handle_interrupt(pci, chn, dir); \ } \ \ if (sta.abort & BIT(chn)) { \ clr.abortclr = BIT(chn); \ sunxi_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + \ (dir ? PCIE_DMA_RD_INT_CLEAR : PCIE_DMA_WR_INT_CLEAR), clr.dword);\ - dev_err(pci->dev, "DMA %s channel %d is abort\n", \ + sunxi_err(pci->dev, "DMA %s channel %d is abort\n", \ dir ? "read":"write", chn); \ } \ \ @@ -649,32 +701,38 @@ static void sunxi_pcie_plat_dma_start(struct dma_table *table, struct dma_trx_ob } } -static int sunxi_pcie_plat_dma_config(struct dma_table *table, phys_addr_t sar_addr, phys_addr_t dar_addr, - unsigned int size, enum dma_dir dma_trx) +static int sunxi_pcie_plat_dma_config(struct dma_table *table, phys_addr_t src_addr, phys_addr_t dst_addr, + unsigned int size, enum dma_dir dma_trx, sunxi_pci_edma_chan_t *edma_chn) { - dma_channel_t *chn = NULL; + sunxi_pci_edma_chan_t *chn = NULL; table->ctx_reg.ctrllo.lie = 0x1; table->ctx_reg.ctrllo.rie = 0x0; table->ctx_reg.ctrllo.td = 0x1; table->ctx_reg.ctrlhi.dword = 0x0; table->ctx_reg.xfersize = size; - table->ctx_reg.sarptrlo = (u32)(sar_addr & 0xffffffff); - table->ctx_reg.sarptrhi = (u32)(sar_addr >> 32); - table->ctx_reg.darptrlo = (u32)(dar_addr & 0xffffffff); - table->ctx_reg.darptrhi = (u32)(dar_addr >> 32); + table->ctx_reg.sarptrlo = (u32)(src_addr & 0xffffffff); + table->ctx_reg.sarptrhi = (u32)(src_addr >> 32); + table->ctx_reg.darptrlo = (u32)(dst_addr & 0xffffffff); + table->ctx_reg.darptrhi = (u32)(dst_addr >> 32); table->start.stop = 0x0; table->dir = dma_trx; - chn = (dma_channel_t *)sunxi_pcie_dma_chan_request(dma_trx); - if (!chn) { - pr_err("pcie request %s channel error! \n", (dma_trx ? "DMA_READ" : "DMA_WRITE")); - return -ENOMEM; - } + if (!edma_chn) { + chn = (sunxi_pci_edma_chan_t *)sunxi_pcie_dma_chan_request(dma_trx, NULL, NULL); + if (!chn) { + sunxi_err(NULL, "pcie request %s channel error! \n", (dma_trx ? "DMA_READ" : "DMA_WRITE")); + return -ENOMEM; + } - table->start.chnl = chn->chnl_num; + chn->cookie = true; + table->start.chnl = chn->chnl_num; + table->weilo.dword = (PCIE_WEIGHT << (5 * chn->chnl_num)); + } else { + table->start.chnl = edma_chn->chnl_num; + table->weilo.dword = (PCIE_WEIGHT << (5 * edma_chn->chnl_num)); + } - table->weilo.dword = (PCIE_WEIGHT << (5 * chn->chnl_num)); table->enb.enb = 0x1; return 0; } @@ -690,7 +748,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_plat_sii_handler, IRQF_SHARED, "pcie-sii", &sunxi_pcie->pp); if (ret) { - dev_err(&pdev->dev, "PCIe failed to request linkup IRQ\n"); + sunxi_err(&pdev->dev, "PCIe failed to request linkup IRQ\n"); return ret; } @@ -705,7 +763,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w0_irq_handler, IRQF_SHARED, "pcie-dma-w0", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -716,7 +774,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w1_irq_handler, IRQF_SHARED, "pcie-dma-w1", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -727,7 +785,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w2_irq_handler, IRQF_SHARED, "pcie-dma-w2", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -738,7 +796,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_w3_irq_handler, IRQF_SHARED, "pcie-dma-w3", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -749,7 +807,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r0_irq_handler, IRQF_SHARED, "pcie-dma-r0", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -760,7 +818,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r1_irq_handler, IRQF_SHARED, "pcie-dma-r1", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -771,7 +829,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r2_irq_handler, IRQF_SHARED, "pcie-dma-r2", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -782,7 +840,7 @@ static int sunxi_pcie_plat_request_irq(struct sunxi_pcie *sunxi_pcie, struct pla ret = devm_request_irq(&pdev->dev, irq, sunxi_pcie_dma_r3_irq_handler, IRQF_SHARED, "pcie-dma-r3", sunxi_pcie); if (ret) { - dev_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); + sunxi_err(&pdev->dev, "failed to request PCIe DMA IRQ\n"); return ret; } @@ -794,7 +852,7 @@ static int sunxi_pcie_plat_dma_init(struct sunxi_pcie *pci) pci->dma_obj = sunxi_pcie_dma_obj_probe(pci->dev); if (IS_ERR(pci->dma_obj)) { - dev_err(pci->dev, "failed to prepare dma obj probe\n"); + sunxi_err(pci->dev, "failed to prepare dma obj probe\n"); return -EINVAL; } @@ -820,13 +878,13 @@ static int sunxi_pcie_plat_parse_dts_res(struct platform_device *pdev, struct su dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); if (!dbi_res) { - dev_err(&pdev->dev, "get pcie dbi failed\n"); + sunxi_err(&pdev->dev, "get pcie dbi failed\n"); return -ENODEV; } pci->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_res); if (IS_ERR(pci->dbi_base)) { - dev_err(&pdev->dev, "ioremap pcie dbi failed\n"); + sunxi_err(&pdev->dev, "ioremap pcie dbi failed\n"); return PTR_ERR(pci->dbi_base); } @@ -835,27 +893,27 @@ static int sunxi_pcie_plat_parse_dts_res(struct platform_device *pdev, struct su pci->link_gen = of_pci_get_max_link_speed(pdev->dev.of_node); if (pci->link_gen < 0) { - dev_warn(&pdev->dev, "get pcie speed Gen failed\n"); + sunxi_warn(&pdev->dev, "get pcie speed Gen failed\n"); pci->link_gen = 0x1; } pci->rst_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pci->rst_gpio)) - dev_warn(&pdev->dev, "Failed to get \"reset-gpios\"\n"); + sunxi_warn(&pdev->dev, "Failed to get \"reset-gpios\"\n"); else gpiod_direction_output(pci->rst_gpio, 1); pci->pcie3v3 = devm_regulator_get_optional(&pdev->dev, "pcie3v3"); if (IS_ERR(pci->pcie3v3)) - dev_warn(&pdev->dev, "no pcie3v3 regulator found\n"); + sunxi_warn(&pdev->dev, "no pcie3v3 regulator found\n"); pci->pcie1v8 = devm_regulator_get_optional(&pdev->dev, "pcie1v8"); if (IS_ERR(pci->pcie1v8)) - dev_warn(&pdev->dev, "no pcie1v8 regulator found\n"); + sunxi_warn(&pdev->dev, "no pcie1v8 regulator found\n"); ret = of_property_read_u32(np, "num-lanes", &pci->lanes); if (ret) { - dev_err(&pdev->dev, "Failed to parse the number of lanes\n"); + sunxi_err(&pdev->dev, "Failed to parse the number of lanes\n"); return -EINVAL; } @@ -863,7 +921,7 @@ static int sunxi_pcie_plat_parse_dts_res(struct platform_device *pdev, struct su ret = sunxi_pcie_plat_clk_get(pdev, pci); if (ret) { - dev_err(&pdev->dev, "pcie get clk init failed\n"); + sunxi_err(&pdev->dev, "pcie get clk init failed\n"); return -ENODEV; } @@ -945,7 +1003,7 @@ static int sunxi_pcie_plat_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { - dev_err(&pdev->dev, "pm_runtime_get_sync failed\n"); + sunxi_err(&pdev->dev, "pm_runtime_get_sync failed\n"); goto err1; } @@ -968,7 +1026,7 @@ static int sunxi_pcie_plat_probe(struct platform_device *pdev) ret = sunxi_pcie_ep_init(pci); break; default: - dev_err(&pdev->dev, "INVALID device type %d\n", pci->drvdata->mode); + sunxi_err(&pdev->dev, "INVALID device type %d\n", pci->drvdata->mode); ret = -EINVAL; break; } @@ -976,7 +1034,7 @@ static int sunxi_pcie_plat_probe(struct platform_device *pdev) if (ret) goto err3; - dev_info(&pdev->dev, "driver version: %s\n", SUNXI_PCIE_MODULE_VERSION); + sunxi_info(&pdev->dev, "driver version: %s\n", SUNXI_PCIE_MODULE_VERSION); return 0; @@ -1012,7 +1070,7 @@ static int sunxi_pcie_plat_remove(struct platform_device *pdev) sunxi_pcie_ep_deinit(pci); break; default: - dev_err(&pdev->dev, "unspport device type %d\n", pci->drvdata->mode); + sunxi_err(&pdev->dev, "unspport device type %d\n", pci->drvdata->mode); break; } @@ -1068,7 +1126,7 @@ static int sunxi_pcie_plat_resume(struct device *dev) /* TODO */ break; default: - dev_err(pci->dev, "unsupport device type %d\n", pci->drvdata->mode); + sunxi_err(pci->dev, "unsupport device type %d\n", pci->drvdata->mode); break; } diff --git a/bsp/drivers/pcie/pcie-sunxi-rc.c b/bsp/drivers/pcie/pcie-sunxi-rc.c index 5fe485638a..7ad0a8fa54 100644 --- a/bsp/drivers/pcie/pcie-sunxi-rc.c +++ b/bsp/drivers/pcie/pcie-sunxi-rc.c @@ -18,6 +18,9 @@ * GNU General Public License for more details. * */ + +#define SUNXI_MODNAME "pcie-rc" +#include #include #include #include @@ -119,7 +122,7 @@ static int sunxi_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, raw_spin_unlock_irqrestore(&pp->lock, flags); if (unlikely(hwirq < 0)) { - dev_err(pp->dev, "failed to alloc hwirq\n"); + sunxi_err(pp->dev, "failed to alloc hwirq\n"); return -ENOSPC; } @@ -151,7 +154,7 @@ static const struct irq_domain_ops sunxi_msi_domain_ops = { }; static struct msi_domain_info sunxi_msi_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI), .chip = &sunxi_msi_top_chip, }; @@ -162,14 +165,14 @@ static int sunxi_allocate_msi_domains(struct sunxi_pcie_port *pp) pp->irq_domain = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, &sunxi_msi_domain_ops, pp); if (!pp->irq_domain) { - dev_err(pp->dev, "failed to create IRQ domain\n"); + sunxi_err(pp->dev, "failed to create IRQ domain\n"); return -ENOMEM; } irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); pp->msi_domain = pci_msi_create_irq_domain(fwnode, &sunxi_msi_info, pp->irq_domain); if (!pp->msi_domain) { - dev_err(pp->dev, "failed to create MSI domain\n"); + sunxi_err(pp->dev, "failed to create MSI domain\n"); irq_domain_remove(pp->irq_domain); return -ENOMEM; } @@ -334,7 +337,7 @@ int sunxi_pcie_host_init(struct sunxi_pcie_port *pp) pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, pp->cfg0_base, pp->cfg0_size); if (!pp->va_cfg0_base) { - dev_err(dev, "Error with ioremap in function\n"); + sunxi_err(dev, "Error with ioremap in function\n"); return -ENOMEM; } } @@ -368,7 +371,7 @@ int sunxi_pcie_host_init(struct sunxi_pcie_port *pp) if (IS_ENABLED(CONFIG_PCI_MSI) && !pp->has_its) sunxi_free_msi_domains(pp); - dev_err(pp->dev, "Failed to probe host bridge\n"); + sunxi_err(pp->dev, "Failed to probe host bridge\n"); return ret; } @@ -439,7 +442,7 @@ void sunxi_pcie_host_setup_rc(struct sunxi_pcie_port *pp) sunxi_pcie_prog_outbound_atu(pp, atu_idx, PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); else - dev_err(pp->dev, "Resources exceed number of ATU entries (%d)", + sunxi_err(pp->dev, "Resources exceed number of ATU entries (%d)", pp->num_ob_windows); } @@ -469,7 +472,7 @@ static int sunxi_pcie_host_wait_for_speed_change(struct sunxi_pcie *pci) usleep_range(SPEED_CHANGE_USLEEP_MIN, SPEED_CHANGE_USLEEP_MAX); } - dev_err(pci->dev, "Speed change timeout\n"); + sunxi_err(pci->dev, "Speed change timeout\n"); return -ETIMEDOUT; } @@ -494,9 +497,9 @@ int sunxi_pcie_host_speed_change(struct sunxi_pcie *pci, int gen) ret = sunxi_pcie_host_wait_for_speed_change(pci); if (!ret) - dev_info(pci->dev, "PCIe speed of Gen%d\n", gen); + sunxi_info(pci->dev, "PCIe speed of Gen%d\n", gen); else - dev_info(pci->dev, "PCIe speed of Gen1\n"); + sunxi_info(pci->dev, "PCIe speed of Gen1\n"); sunxi_pcie_dbi_ro_wr_dis(pci); return 0; @@ -548,7 +551,7 @@ static int sunxi_pcie_host_wait_for_link(struct sunxi_pcie_port *pp) for (retries = 0; retries < LINK_WAIT_MAX_RETRIE; retries++) { if (sunxi_pcie_host_link_up(pp)) { - dev_info(pp->dev, "pcie link up success\n"); + sunxi_info(pp->dev, "pcie link up success\n"); return 0; } usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); @@ -562,7 +565,7 @@ int sunxi_pcie_host_establish_link(struct sunxi_pcie *pci) struct sunxi_pcie_port *pp = &pci->pp; if (sunxi_pcie_host_link_up(pp)) { - dev_info(pci->dev, "pcie is already link up\n"); + sunxi_info(pci->dev, "pcie is already link up\n"); return 0; } @@ -610,7 +613,7 @@ int sunxi_pcie_host_add_port(struct sunxi_pcie *pci, struct platform_device *pde ret = of_property_read_u32(pp->dev->of_node, "num-ob-windows", &pp->num_ob_windows); if (ret) { - dev_err(&pdev->dev, "failed to parse num-ob-windows\n"); + sunxi_err(&pdev->dev, "failed to parse num-ob-windows\n"); return -EINVAL; } @@ -624,7 +627,7 @@ int sunxi_pcie_host_add_port(struct sunxi_pcie *pci, struct platform_device *pde ret = devm_request_irq(&pdev->dev, pp->msi_irq, sunxi_pcie_host_msi_irq_handler, IRQF_SHARED, "pcie-msi", pp); if (ret) { - dev_err(&pdev->dev, "failed to request MSI IRQ\n"); + sunxi_err(&pdev->dev, "failed to request MSI IRQ\n"); return ret; } } @@ -634,7 +637,7 @@ int sunxi_pcie_host_add_port(struct sunxi_pcie *pci, struct platform_device *pde ret = sunxi_pcie_host_init(pp); if (ret) { - dev_err(&pdev->dev, "failed to initialize host\n"); + sunxi_err(&pdev->dev, "failed to initialize host\n"); return ret; } diff --git a/bsp/drivers/pcie/pcie-sunxi.h b/bsp/drivers/pcie/pcie-sunxi.h index 00b995e4cb..e245bb0a50 100644 --- a/bsp/drivers/pcie/pcie-sunxi.h +++ b/bsp/drivers/pcie/pcie-sunxi.h @@ -219,6 +219,8 @@ struct sunxi_pcie_of_data { enum sunxi_pcie_device_mode mode; u32 func_offset; bool cpu_pcie_addr_quirk; + bool has_pcie_slv_clk; + bool need_pcie_rst; }; struct sunxi_pcie_ep_func { @@ -275,6 +277,8 @@ struct sunxi_pcie_port { bool cpu_pcie_addr_quirk; }; +struct sunxi_pci_edma_chan; + struct sunxi_pcie { struct device *dev; void __iomem *dbi_base; @@ -287,15 +291,20 @@ struct sunxi_pcie { int link_gen; struct sunxi_pcie_port pp; struct sunxi_pcie_ep ep; - struct clk *pcie_ref; - struct clk *pcie_per; struct clk *pcie_aux; + struct clk *pcie_slv; struct reset_control *pcie_rst; + struct reset_control *pwrup_rst; struct phy *phy; struct dma_trx_obj *dma_obj; const struct sunxi_pcie_of_data *drvdata; struct gpio_desc *rst_gpio; u32 lanes; + u32 num_edma; + unsigned long *rd_edma_map; + unsigned long *wr_edma_map; + struct sunxi_pci_edma_chan *dma_wr_chn; + struct sunxi_pci_edma_chan *dma_rd_chn; struct regulator *pcie1v8; struct regulator *pcie3v3; };