drivers: net: ipq9574: Update EDMA Driver

This patch optimizes and updates the EDMA Driver as
required for ipq9574 platform similar to the Kernel
Host EDMA Driver.

-> Config TX_MOD and RX_MOD timers
-> Config DMAR_CTRL
-> Config Service code bypass for TX_DESC
-> Update the SRCINFO and DST_PORT configs
-> Make changes to include secondary and primary DESC's
into the same corresponding ring structure
-> Increase the TX_BUFF Size to 2048
-> Update to use RX_DESC RING 23
-> Remove unused fields from the EDMA structures
-> Update the description for the EDMA structure fields

Change-Id: I3f7f1c11cdd87687c38774a4930c9bee90857203
Signed-off-by: Selvam Sathappan Periakaruppan <quic_speriaka@quicinc.com>
This commit is contained in:
Selvam Sathappan Periakaruppan 2021-12-08 23:45:59 +05:30
parent 8bf3539016
commit 1d2a2dc57a
3 changed files with 327 additions and 357 deletions

View file

@ -29,6 +29,7 @@
#define IPQ9574_EDMA_REG_RXDESC2FILL_MAP_0 0x14
#define IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1 0x18
#define IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2 0x1c
#define IPQ9574_EDMA_REG_DMAR_CTRL 0x48
#define IPQ9574_EDMA_REG_MISC_INT_STAT 0x5c
#define IPQ9574_EDMA_REG_MISC_INT_MASK 0x60
#define IPQ9574_EDMA_REG_TXDESC2CMPL_MAP_0 0x8c
@ -50,6 +51,7 @@
#define IPQ9574_EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_MOD_TIMER(n) (0x9900c + (0x1000 * n))
#define IPQ9574_EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * n))
@ -66,13 +68,41 @@
#define IPQ9574_EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * n))
#define IPQ9574_EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * n))
#define IPQ9574_EDMA_QID2RID_TABLE_MEM(q) (0xb9000 + (0x4 * q))
/*
* EDMA_REG_DMAR_CTRL register
*/
#define IPQ9574_EDMA_DMAR_REQ_PRI_MASK 0x7
#define IPQ9574_EDMA_DMAR_REQ_PRI_SHIFT 0x0
#define IPQ9574_EDMA_DMAR_BURST_LEN_MASK 0x1
#define IPQ9574_EDMA_DMAR_BURST_LEN_SHIFT 3
#define IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK 0x1f
#define IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT 4
#define IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK 0x7
#define IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT 9
#define IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK 0x7
#define IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT 12
#define IPQ9574_EDMA_DMAR_REQ_PRI_SET(x) (((x) & IPQ9574_EDMA_DMAR_REQ_PRI_MASK) \
<< IPQ9574_EDMA_DMAR_REQ_PRI_SHIFT)
#define IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(x) (((x) & IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK) \
<< IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT)
#define IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(x) (((x) & IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK) \
<< IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT)
#define IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(x) (((x) & IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK) \
<< IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT)
#define IPQ9574_EDMA_DMAR_BURST_LEN_SET(x) (((x) & IPQ9574_EDMA_DMAR_BURST_LEN_MASK) \
<< IPQ9574_EDMA_DMAR_BURST_LEN_SHIFT)
#define IPQ9574_EDMA_BURST_LEN_ENABLE 0x0
/*
* EDMA_REG_PORT_CTRL register
*/
#define IPQ9574_EDMA_PORT_CTRL_EN 0x3
#define IPQ9574_EDMA_PORT_CTRL_EN 0x3
/*
* EDMA_REG_TXDESC_PROD_IDX register
@ -170,6 +200,18 @@
#define IPQ9574_EDMA_RXDESC_INT_MASK_PKT_INT 0x1
#define IPQ9574_EDMA_MASK_INT_DISABLE 0x0
/*
* EDMA_REG_RX_MOD_TIMER register
*/
#define IPQ9574_EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
#define IPQ9574_EDMA_RX_MOD_TIMER_INIT_SHIFT 0
/*
* EDMA_REG_TX_MOD_TIMER register
*/
#define IPQ9574_EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
#define IPQ9574_EDMA_TX_MOD_TIMER_INIT_SHIFT 0
/*
* TXDESC shift values
*/
@ -179,10 +221,27 @@
#define IPQ9574_EDMA_TXDESC_DATA_OFFSET_MASK 0xfff
#define IPQ9574_EDMA_TXDESC_DATA_LENGTH_SHIFT 0
#define IPQ9574_EDMA_TXDESC_DATA_LENGTH_MASK 0x3ffff
#define IPQ9574_EDMA_TXDESC_DATA_LENGTH_MASK 0x1ffff
#define IPQ9574_EDMA_TXDESC_SERVICE_CODE_SHIFT 16
#define IPQ9574_EDMA_TXDESC_SERVICE_CODE_MASK (0x1FF << IPQ9574_EDMA_TXDESC_SERVICE_CODE_SHIFT)
#define IPQ9574_EDMA_TXDESC_SERVICE_CODE_SET(x) (((x) << IPQ9574_EDMA_TXDESC_SERVICE_CODE_SHIFT) & IPQ9574_EDMA_TXDESC_SERVICE_CODE_MASK)
#define IPQ9574_EDMA_SC_BYPASS 1
#define IPQ9574_EDMA_DST_PORT_TYPE 2
#define IPQ9574_EDMA_DST_PORT_TYPE_SHIFT 28
#define IPQ9574_EDMA_DST_PORT_TYPE_MASK (0xf << IPQ9574_EDMA_DST_PORT_TYPE_SHIFT)
#define IPQ9574_EDMA_DST_PORT_ID_SHIFT 16
#define IPQ9574_EDMA_DST_PORT_ID_MASK (0xfff << IPQ9574_EDMA_DST_PORT_ID_SHIFT)
#define IPQ9574_EDMA_DST_PORT_TYPE_SET(x) (((x) << IPQ9574_EDMA_DST_PORT_TYPE_SHIFT) & IPQ9574_EDMA_DST_PORT_TYPE_MASK)
#define IPQ9574_EDMA_DST_PORT_ID_SET(x) (((x) << EDMA_DST_PORT_ID_SHIFT) & EDMA_DST_PORT_ID_MASK)
#define IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_PORTID 0x2000
#define IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_SHIFT 8
#define IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_MASK 0xf000
#define IPQ9574_EDMA_RXDESC_PORTNUM_BITS 0x0FFF
#define IPQ9574_EDMA_PREHDR_DSTINFO_PORTID_IND 0x20
#define IPQ9574_EDMA_PREHDR_PORTNUM_BITS 0x0fff
#define IPQ9574_EDMA_RING_DMA_MASK 0xffffffff
/*
@ -190,9 +249,14 @@
*/
#define IPQ9574_EDMA_RXDESC_PKT_SIZE_MASK 0x3ffff
#define IPQ9574_EDMA_RXDESC_PKT_SIZE_SHIFT 0
#define IPQ9574_EDMA_RXDESC_SRC_INFO_GET(x) (x & 0xFFFF)
#define IPQ9574_EDMA_RXDESC_RING_INT_STATUS_MASK 0x3
#define IPQ9574_EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3
#define IPQ9574_EDMA_TXCMPL_RETMODE_OPAQUE 0x0
#define IPQ9574_EDMA_RXFILL_RING_INT_STATUS_MASK 0x1
#define IPQ9574_EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3
#define IPQ9574_EDMA_TXCMPL_RETMODE_OPAQUE 0x0
#define IPQ9574_EDMA_TX_NE_INT_EN 0x2
#define IPQ9574_EDMA_RX_NE_INT_EN 0x2
#define IPQ9574_EDMA_TX_INITIAL_PROD_IDX 0x0
#endif /* __EDMA_REGS__ */

View file

@ -113,7 +113,6 @@ int ipq9574_edma_alloc_rx_buffer(struct ipq9574_edma_hw *ehw,
cons = reg_data & IPQ9574_EDMA_RXFILL_CONS_IDX_MASK;
while (1) {
counter = next;
if (++counter == rxfill_ring->count)
@ -141,8 +140,8 @@ int ipq9574_edma_alloc_rx_buffer(struct ipq9574_edma_hw *ehw,
* Save buffer size in RXFILL descriptor
*/
rxfill_desc->rdes1 |= (IPQ9574_EDMA_RX_BUFF_SIZE <<
IPQ9574_EDMA_RXFILL_BUF_SIZE_SHIFT) &
IPQ9574_EDMA_RXFILL_BUF_SIZE_MASK;
IPQ9574_EDMA_RXFILL_BUF_SIZE_SHIFT) &
IPQ9574_EDMA_RXFILL_BUF_SIZE_MASK;
num_alloc++;
next = counter;
}
@ -160,6 +159,8 @@ int ipq9574_edma_alloc_rx_buffer(struct ipq9574_edma_hw *ehw,
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXFILL_PROD_IDX(
rxfill_ring->id), reg_data);
rxfill_ring->prod_idx = reg_data;
pr_debug("%s: num_alloc = %d\n", __func__, num_alloc);
}
@ -272,13 +273,13 @@ uint32_t ipq9574_edma_clean_rx(struct ipq9574_edma_common_info *c_info,
/*
* Check src_info from Rx Descriptor
*/
if (IPQ9574_EDMA_RXPH_SRC_INFO_TYPE_GET(rxdesc_desc->rdes4) ==
IPQ9574_EDMA_PREHDR_DSTINFO_PORTID_IND) {
src_port_num = rxdesc_desc->rdes4 &
IPQ9574_EDMA_PREHDR_PORTNUM_BITS;
src_port_num = IPQ9574_EDMA_RXDESC_SRC_INFO_GET(rxdesc_desc->rdes4);
if ((src_port_num & IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_MASK) ==
IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_PORTID) {
src_port_num &= IPQ9574_EDMA_RXDESC_PORTNUM_BITS;
} else {
pr_warn("WARN: src_info_type:0x%x. Drop skb:%p\n",
IPQ9574_EDMA_RXPH_SRC_INFO_TYPE_GET(rxdesc_desc->rdes4),
(src_port_num & IPQ9574_EDMA_RXDESC_SRCINFO_TYPE_MASK),
skb);
goto next_rx_desc;
}
@ -348,7 +349,7 @@ static int ipq9574_edma_rx_complete(struct ipq9574_edma_common_info *c_info)
}
/*
* Set RXDESC ring interrupt mask
* Enable RX EDMA interrupt masks
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
@ -358,7 +359,7 @@ static int ipq9574_edma_rx_complete(struct ipq9574_edma_common_info *c_info)
}
/*
* Set TXCMPL ring interrupt mask
* Enable TX EDMA interrupt masks
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
@ -367,16 +368,6 @@ static int ipq9574_edma_rx_complete(struct ipq9574_edma_common_info *c_info)
ehw->txcmpl_intr_mask);
}
/*
* Set RXFILL ring interrupt mask
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXFILL_INT_MASK(
rxfill_ring->id),
ehw->rxfill_intr_mask);
}
/*
* Read Misc intr status
*/
@ -450,8 +441,20 @@ static int ipq9574_eth_snd(struct eth_device *dev, void *packet, int length)
*/
txdesc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, hw_next_to_use);
txdesc->tdes1 = 0;
txdesc->tdes2 = 0;
txdesc->tdes3 = 0;
txdesc->tdes4 = 0;
txdesc->tdes5 = 0;
txdesc->tdes6 = 0;
txdesc->tdes7 = 0;
skb = (uchar *)txdesc->tdes0;
/*
* Set SC BYPASS
*/
txdesc->tdes1 |= IPQ9574_EDMA_TXDESC_SERVICE_CODE_SET(IPQ9574_EDMA_SC_BYPASS);
pr_debug("%s: txdesc->tdes0 (buffer addr) = 0x%x length = %d \
prod_idx = %d cons_idx = %d\n",
__func__, txdesc->tdes0, length,
@ -464,9 +467,15 @@ static int ipq9574_eth_snd(struct eth_device *dev, void *packet, int length)
#else
/*
* Populate Tx dst info, port id is macid in dp_dev
* We have separate netdev for each port in Kernel but that is not the
* case in U-Boot.
* This part needs to be fixed to support multiple ports in non bridged
* mode during when all the ports are currently under same netdev.
*
* Currently mac port no. is fixed as 3 for the purpose of testing
*/
txdesc->tdes4 |= (((IPQ9574_EDMA_PREHDR_DSTINFO_PORTID_IND << 8) |
(IPQ9574_EDMA_MAC_PORT_NO & 0x0fff)) << 16);
txdesc->tdes4 |= (IPQ9574_EDMA_DST_PORT_TYPE_SET(IPQ9574_EDMA_DST_PORT_TYPE) |
IPQ9574_EDMA_DST_PORT_ID_SET(IPQ9574_EDMA_MAC_PORT_NO));
#endif
/*
@ -600,11 +609,9 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
{
struct ipq9574_edma_txcmpl_ring *txcmpl_ring;
struct ipq9574_edma_txdesc_ring *txdesc_ring;
struct ipq9574_edma_sec_txdesc_ring *sec_txdesc_ring;
struct ipq9574_edma_rxfill_ring *rxfill_ring;
struct ipq9574_edma_rxdesc_ring *rxdesc_ring;
struct ipq9574_edma_sec_rxdesc_ring *sec_rxdesc_ring;
struct ipq9574_edma_txdesc_desc *tx_desc;
struct ipq9574_edma_txdesc_desc *txdesc_desc;
struct ipq9574_edma_rxfill_desc *rxfill_desc;
int i, j, index;
void *tx_buf;
@ -640,6 +647,9 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
/*
* Allocate buffers for each of the desc
*/
for (j = 0; j < rxfill_ring->count; j++) {
rxfill_desc = IPQ9574_EDMA_RXFILL_DESC(rxfill_ring, j);
rxfill_desc->rdes0 = virt_to_phys(rx_buf);
@ -652,26 +662,6 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
}
}
/*
* Allocate secondary RxDesc ring descriptors
*/
for (i = 0; i < ehw->sec_rxdesc_rings; i++) {
sec_rxdesc_ring = &ehw->sec_rxdesc_ring[i];
sec_rxdesc_ring->count = EDMA_RING_SIZE;
sec_rxdesc_ring->id = ehw->sec_rxdesc_ring_start + i;
sec_rxdesc_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_RX_SEC_DESC_SIZE * sec_rxdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (sec_rxdesc_ring->desc == NULL) {
pr_info("%s: sec_rxdesc_ring->desc alloc error\n", __func__);
return -ENOMEM;
}
sec_rxdesc_ring->dma = virt_to_phys(sec_rxdesc_ring->desc);
pr_debug("sec rxdesc ring id = %d, sec rxdesc ring ptr = %p, sec rxdesc ring dma = %u\n",
sec_rxdesc_ring->id, sec_rxdesc_ring->desc, (unsigned int)
sec_rxdesc_ring->dma);
}
/*
* Allocate RxDesc ring descriptors
*/
@ -685,7 +675,6 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
* Number of fill rings are lesser than the descriptor rings
* Share the fill rings across descriptor rings.
*/
index = ehw->rxfill_ring_start + (i % ehw->rxfill_rings);
rxdesc_ring->rxfill =
&ehw->rxfill_ring[index - ehw->rxfill_ring_start];
@ -694,7 +683,6 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
rxdesc_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_RXDESC_DESC_SIZE * rxdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (rxdesc_ring->desc == NULL) {
pr_info("%s: rxdesc_ring->desc alloc error\n", __func__);
return -ENOMEM;
@ -703,47 +691,21 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
pr_debug("rxdesc ring id = %d, rxdesc ring ptr = %p, rxdesc ring dma = %u\n",
rxdesc_ring->id, rxdesc_ring->desc, (unsigned int)
rxdesc_ring->dma);
}
/*
* Allocate TxCmpl ring descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
txcmpl_ring->count = EDMA_RING_SIZE;
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
txcmpl_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_TXCMPL_DESC_SIZE * txcmpl_ring->count,
/*
* Allocate secondary Rx ring descriptors
*/
rxdesc_ring->sdesc = (void *)noncached_alloc(
IPQ9574_EDMA_RX_SEC_DESC_SIZE * rxdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (txcmpl_ring->desc == NULL) {
pr_info("%s: txcmpl_ring->desc alloc error\n", __func__);
if (rxdesc_ring->sdesc == NULL) {
pr_info("%s: rxdesc_ring->sdesc alloc error\n", __func__);
return -ENOMEM;
}
txcmpl_ring->dma = virt_to_phys(txcmpl_ring->desc);
pr_debug("txcmpl ring id = %d, txcmpl ring ptr = %p, txcmpl ring dma = %u\n",
txcmpl_ring->id, txcmpl_ring->desc, (unsigned int)
txcmpl_ring->dma);
}
/*
* Allocate secondary TxDesc ring descriptors
*/
for (i = 0; i < ehw->sec_txdesc_rings; i++) {
sec_txdesc_ring = &ehw->sec_txdesc_ring[i];
sec_txdesc_ring->count = EDMA_RING_SIZE;
sec_txdesc_ring->id = ehw->sec_txdesc_ring_start + i;
sec_txdesc_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_TX_SEC_DESC_SIZE * sec_txdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (sec_txdesc_ring->desc == NULL) {
pr_info("%s: sec_txdesc_ring->desc alloc error\n", __func__);
return -ENOMEM;
}
sec_txdesc_ring->dma = virt_to_phys(sec_txdesc_ring->desc);
pr_debug("sec txdesc ring id = %d, sec txdesc ring ptr = %p, sec txdesc ring dma = %u\n",
sec_txdesc_ring->id, sec_txdesc_ring->desc, (unsigned int)
sec_txdesc_ring->dma);
rxdesc_ring->sdma = virt_to_phys(rxdesc_ring->sdesc);
pr_debug("sec rxdesc ring id = %d, sec rxdesc ring ptr = %p, sec rxdesc ring dma = %u\n",
rxdesc_ring->id, rxdesc_ring->sdesc, (unsigned int)
rxdesc_ring->sdma);
}
/*
@ -774,21 +736,60 @@ static int ipq9574_edma_setup_ring_resources(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
/*
* Allocate buffers for each of the desc
*/
for (j = 0; j < txdesc_ring->count; j++) {
tx_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, j);
tx_desc->tdes0 = virt_to_phys(tx_buf);
tx_desc->tdes1 = 0;
tx_desc->tdes2 = 0;
tx_desc->tdes3 = 0;
tx_desc->tdes4 = 0;
tx_desc->tdes5 = 0;
tx_desc->tdes6 = 0;
tx_desc->tdes7 = 0;
txdesc_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, j);
txdesc_desc->tdes0 = virt_to_phys(tx_buf);
txdesc_desc->tdes1 = 0;
txdesc_desc->tdes2 = 0;
txdesc_desc->tdes3 = 0;
txdesc_desc->tdes4 = 0;
txdesc_desc->tdes5 = 0;
txdesc_desc->tdes6 = 0;
txdesc_desc->tdes7 = 0;
tx_buf += IPQ9574_EDMA_TX_BUFF_SIZE;
pr_debug("Ring %d: txdesc ring dis0 ptr = %p, txdesc ring dis0 dma = %u\n",
j, tx_desc, (unsigned int)tx_desc->tdes0);
j, txdesc_desc, (unsigned int)txdesc_desc->tdes0);
}
/*
* Allocate secondary Tx ring descriptors
*/
txdesc_ring->sdesc = (void *)noncached_alloc(
IPQ9574_EDMA_TX_SEC_DESC_SIZE * txdesc_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (txdesc_ring->sdesc == NULL) {
pr_info("%s: txdesc_ring->sdesc alloc error\n", __func__);
return -ENOMEM;
}
txdesc_ring->sdma = virt_to_phys(txdesc_ring->sdesc);
pr_debug("txdesc sec desc ring id = %d, txdesc ring ptr = %p, txdesc ring dma = %u\n",
txdesc_ring->id, txdesc_ring->sdesc, (unsigned int)
txdesc_ring->sdma);
}
/*
* Allocate TxCmpl ring descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
txcmpl_ring->count = EDMA_RING_SIZE;
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
txcmpl_ring->desc = (void *)noncached_alloc(
IPQ9574_EDMA_TXCMPL_DESC_SIZE * txcmpl_ring->count,
CONFIG_SYS_CACHELINE_SIZE);
if (txcmpl_ring->desc == NULL) {
pr_info("%s: txcmpl_ring->desc alloc error\n", __func__);
return -ENOMEM;
}
txcmpl_ring->dma = virt_to_phys(txcmpl_ring->desc);
pr_debug("txcmpl ring id = %d, txcmpl ring ptr = %p, txcmpl ring dma = %u\n",
txcmpl_ring->id, txcmpl_ring->desc, (unsigned int)
txcmpl_ring->dma);
}
pr_info("%s: successfull\n", __func__);
@ -807,13 +808,18 @@ static void ipq9574_edma_free_desc(struct ipq9574_edma_common_info *c_info)
struct ipq9574_edma_txdesc_ring *txdesc_ring;
struct ipq9574_edma_rxfill_ring *rxfill_ring;
struct ipq9574_edma_rxdesc_ring *rxdesc_ring;
struct ipq9574_edma_txdesc_desc *tx_desc;
struct ipq9574_edma_txdesc_desc *txdesc_desc;
struct ipq9574_edma_rxfill_desc *rxfill_desc;
int i;
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
if (rxfill_ring->desc)
if (rxfill_ring->desc) {
rxfill_desc = IPQ9574_EDMA_RXFILL_DESC(rxfill_ring, 0);
if (rxfill_desc->rdes0)
ipq9574_free_mem((void *)rxfill_desc->rdes0);
ipq9574_free_mem(rxfill_ring->desc);
}
}
for (i = 0; i < ehw->rxdesc_rings; i++) {
@ -832,9 +838,9 @@ static void ipq9574_edma_free_desc(struct ipq9574_edma_common_info *c_info)
for (i = 0; i < ehw->txdesc_rings; i++) {
txdesc_ring = &ehw->txdesc_ring[i];
if (txdesc_ring->desc) {
tx_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, 0);
if (tx_desc->tdes0)
ipq9574_free_mem((void *)tx_desc->tdes0);
txdesc_desc = IPQ9574_EDMA_TXDESC_DESC(txdesc_ring, 0);
if (txdesc_desc->tdes0)
ipq9574_free_mem((void *)txdesc_desc->tdes0);
ipq9574_free_mem(txdesc_ring->desc);
}
}
@ -898,15 +904,15 @@ static void ipq9574_edma_disable_intr(struct ipq9574_edma_hw *ehw)
/*
* Disable interrupts
*/
for (i = 0; i < IPQ9574_EDMA_MAX_TXCMPL_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_INT_MASK(i), 0);
for (i = 0; i < IPQ9574_EDMA_MAX_RXFILL_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXFILL_INT_MASK(i), 0);
for (i = 0; i < IPQ9574_EDMA_MAX_RXDESC_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RX_INT_CTRL(i), 0);
for (i = 0; i < IPQ9574_EDMA_MAX_TXCMPL_RINGS; i++)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_INT_MASK(i), 0);
/*
* Clear MISC interrupt mask
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_MISC_INT_MASK,
IPQ9574_EDMA_MASK_INT_DISABLE);
}
@ -1345,10 +1351,6 @@ static void ipq9574_edma_set_ring_values(struct ipq9574_edma_hw *edma_hw)
edma_hw->txdesc_rings = IPQ9574_EDMA_TX_DESC_RING_NOS;
edma_hw->txdesc_ring_end = IPQ9574_EDMA_TX_DESC_RING_SIZE;
edma_hw->sec_txdesc_ring_start = IPQ9574_EDMA_SEC_TX_DESC_RING_START;
edma_hw->sec_txdesc_rings = IPQ9574_EDMA_SEC_TX_DESC_RING_NOS;
edma_hw->sec_txdesc_ring_end = IPQ9574_EDMA_SEC_TX_DESC_RING_SIZE;
edma_hw->txcmpl_ring_start = IPQ9574_EDMA_TX_CMPL_RING_START;
edma_hw->txcmpl_rings = IPQ9574_EDMA_RX_FILL_RING_NOS;
edma_hw->txcmpl_ring_end = IPQ9574_EDMA_TX_CMPL_RING_SIZE;
@ -1361,10 +1363,6 @@ static void ipq9574_edma_set_ring_values(struct ipq9574_edma_hw *edma_hw)
edma_hw->rxdesc_rings = IPQ9574_EDMA_RX_DESC_RING_NOS;
edma_hw->rxdesc_ring_end = IPQ9574_EDMA_RX_DESC_RING_SIZE;
edma_hw->sec_rxdesc_ring_start = IPQ9574_EDMA_SEC_RX_DESC_RING_START;
edma_hw->sec_rxdesc_rings = IPQ9574_EDMA_SEC_RX_DESC_RING_NOS;
edma_hw->sec_rxdesc_ring_end = IPQ9574_EDMA_SEC_RX_DESC_RING_SIZE;
pr_info("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n",
edma_hw->txdesc_rings, edma_hw->txdesc_ring_start,
(edma_hw->txdesc_ring_start + edma_hw->txdesc_rings - 1),
@ -1402,15 +1400,6 @@ static int ipq9574_edma_alloc_rings(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
ehw->sec_rxdesc_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_sec_rxdesc_ring) *
ehw->sec_rxdesc_rings),
CONFIG_SYS_CACHELINE_SIZE);
if (!ehw->sec_rxdesc_ring) {
pr_info("%s: sec_rxdesc_ring alloc error\n", __func__);
return -ENOMEM;
}
ehw->txdesc_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_txdesc_ring) *
ehw->txdesc_rings),
@ -1420,15 +1409,6 @@ static int ipq9574_edma_alloc_rings(struct ipq9574_edma_hw *ehw)
return -ENOMEM;
}
ehw->sec_txdesc_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_sec_txdesc_ring) *
ehw->sec_txdesc_rings),
CONFIG_SYS_CACHELINE_SIZE);
if (!ehw->sec_txdesc_ring) {
pr_info("%s: txdesc_ring alloc error\n", __func__);
return -ENOMEM;
}
ehw->txcmpl_ring = (void *)noncached_alloc((sizeof(
struct ipq9574_edma_txcmpl_ring) *
ehw->txcmpl_rings),
@ -1475,17 +1455,6 @@ static int ipq9574_edma_init_rings(struct ipq9574_edma_hw *ehw)
return 0;
}
/*
* ipq9574_edma_configure_sec_txdesc_ring()
* Configure one secondary TxDesc ring
*/
static void ipq9574_edma_configure_sec_txdesc_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_sec_txdesc_ring *sec_txdesc_ring)
{
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_BA2(sec_txdesc_ring->id),
(uint32_t)(sec_txdesc_ring->dma & 0xffffffff));
}
/*
* ipq9574_edma_configure_txdesc_ring()
* Configure one TxDesc ring
@ -1493,9 +1462,6 @@ static void ipq9574_edma_configure_sec_txdesc_ring(struct ipq9574_edma_hw *ehw,
static void ipq9574_edma_configure_txdesc_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_txdesc_ring *txdesc_ring)
{
uint32_t data;
uint16_t hw_cons_idx;
/*
* Configure TXDESC ring
*/
@ -1503,24 +1469,17 @@ static void ipq9574_edma_configure_txdesc_ring(struct ipq9574_edma_hw *ehw,
(uint32_t)(txdesc_ring->dma &
IPQ9574_EDMA_RING_DMA_MASK));
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_BA2(txdesc_ring->id),
(uint32_t)(txdesc_ring->sdma &
IPQ9574_EDMA_RING_DMA_MASK));
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_RING_SIZE(
txdesc_ring->id), (uint32_t)(txdesc_ring->count &
IPQ9574_EDMA_TXDESC_RING_SIZE_MASK));
data = ipq9574_edma_reg_read(IPQ9574_EDMA_REG_TXDESC_CONS_IDX(
txdesc_ring->id));
data &= ~(IPQ9574_EDMA_TXDESC_CONS_IDX_MASK);
hw_cons_idx = data;
data = ipq9574_edma_reg_read(IPQ9574_EDMA_REG_TXDESC_PROD_IDX(
txdesc_ring->id));
data &= ~(IPQ9574_EDMA_TXDESC_PROD_IDX_MASK);
data |= hw_cons_idx & IPQ9574_EDMA_TXDESC_PROD_IDX_MASK;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_PROD_IDX(
txdesc_ring->id), data);
txdesc_ring->id),
IPQ9574_EDMA_TX_INITIAL_PROD_IDX);
}
/*
@ -1530,6 +1489,8 @@ static void ipq9574_edma_configure_txdesc_ring(struct ipq9574_edma_hw *ehw,
static void ipq9574_edma_configure_txcmpl_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_txcmpl_ring *txcmpl_ring)
{
uint32_t tx_mod_timer;
/*
* Configure TxCmpl ring base address
*/
@ -1547,20 +1508,20 @@ static void ipq9574_edma_configure_txcmpl_ring(struct ipq9574_edma_hw *ehw,
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id),
IPQ9574_EDMA_TXCMPL_RETMODE_OPAQUE);
/*
* Configure the default timer mitigation value
*/
tx_mod_timer = (IPQ9574_EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id) &
IPQ9574_EDMA_TX_MOD_TIMER_INIT_MASK)
<< IPQ9574_EDMA_TX_MOD_TIMER_INIT_SHIFT;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id),
tx_mod_timer);
/*
* Enable ring. Set ret mode to 'opaque'.
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TX_INT_CTRL(txcmpl_ring->id),
0x2);
}
/*
* ipq9574_edma_configure_sec_rxdesc_ring()
* Configure one secondary RxDesc ring
*/
static void ipq9574_edma_configure_sec_rxdesc_ring(struct ipq9574_edma_hw *ehw,
struct ipq9574_edma_sec_rxdesc_ring *sec_rxdesc_ring)
{
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_BA2(sec_rxdesc_ring->id),
(uint32_t)(sec_rxdesc_ring->dma & 0xffffffff));
IPQ9574_EDMA_TX_NE_INT_EN);
}
/*
@ -1573,7 +1534,10 @@ static void ipq9574_edma_configure_rxdesc_ring(struct ipq9574_edma_hw *ehw,
uint32_t data;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_BA(rxdesc_ring->id),
(uint32_t)(rxdesc_ring->dma & 0xffffffff));
(uint32_t)(rxdesc_ring->dma & IPQ9574_EDMA_RING_DMA_MASK));
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_BA2(rxdesc_ring->id),
(uint32_t)(rxdesc_ring->sdma & IPQ9574_EDMA_RING_DMA_MASK));
data = rxdesc_ring->count & IPQ9574_EDMA_RXDESC_RING_SIZE_MASK;
data |= (ehw->rx_payload_offset & IPQ9574_EDMA_RXDESC_PL_OFFSET_MASK) <<
@ -1582,11 +1546,20 @@ static void ipq9574_edma_configure_rxdesc_ring(struct ipq9574_edma_hw *ehw,
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC_RING_SIZE(
rxdesc_ring->id), data);
/*
* Configure the default timer mitigation value
*/
data = (IPQ9574_EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id) &
IPQ9574_EDMA_RX_MOD_TIMER_INIT_MASK)
<< IPQ9574_EDMA_RX_MOD_TIMER_INIT_SHIFT;
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id),
data);
/*
* Enable ring. Set ret mode to 'opaque'.
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RX_INT_CTRL(rxdesc_ring->id),
0x2);
IPQ9574_EDMA_RX_NE_INT_EN);
}
/*
@ -1621,12 +1594,6 @@ static void ipq9574_edma_configure_rings(struct ipq9574_edma_hw *ehw)
for (i = 0; i < ehw->txdesc_rings; i++)
ipq9574_edma_configure_txdesc_ring(ehw, &ehw->txdesc_ring[i]);
/*
* Configure secondary TXDESC ring
*/
for (i = 0; i < ehw->sec_txdesc_rings; i++)
ipq9574_edma_configure_sec_txdesc_ring(ehw, &ehw->sec_txdesc_ring[i]);
/*
* Configure TXCMPL ring
*/
@ -1645,12 +1612,6 @@ static void ipq9574_edma_configure_rings(struct ipq9574_edma_hw *ehw)
for (i = 0; i < ehw->rxdesc_rings; i++)
ipq9574_edma_configure_rxdesc_ring(ehw, &ehw->rxdesc_ring[i]);
/*
* Configure secondary RXDESC ring
*/
for (i = 0; i < ehw->rxdesc_rings; i++)
ipq9574_edma_configure_sec_rxdesc_ring(ehw, &ehw->sec_rxdesc_ring[i]);
pr_info("%s: successfull\n", __func__);
}
@ -1673,7 +1634,7 @@ void ipq9574_edma_hw_reset(void)
int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
{
int ret, desc_index;
uint32_t i, reg;
uint32_t i, reg, ring_id;
volatile uint32_t data;
struct ipq9574_edma_rxdesc_ring *rxdesc_ring = NULL;
@ -1688,8 +1649,7 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
*/
ehw->rxfill_intr_mask = IPQ9574_EDMA_RXFILL_INT_MASK;
ehw->rxdesc_intr_mask = IPQ9574_EDMA_RXDESC_INT_MASK_PKT_INT;
ehw->txcmpl_intr_mask = IPQ9574_EDMA_TX_INT_MASK_PKT_INT |
IPQ9574_EDMA_TX_INT_MASK_UGT_INT;
ehw->txcmpl_intr_mask = IPQ9574_EDMA_TX_INT_MASK_PKT_INT;
ehw->misc_intr_mask = 0xff;
ehw->rx_payload_offset = 0x0;
@ -1784,17 +1744,19 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
/*
* Set PPE QID to EDMA Rx ring mapping.
* When coming up use only queue 0.
* HOST EDMA rings.
* Each entry can hold mapping for 8 PPE queues and entry size is
* Each entry can hold mapping for 4 PPE queues and entry size is
* 4 bytes
*/
desc_index = ehw->rxdesc_ring_start;
desc_index = (ehw->rxdesc_ring_start & 0x1F);
reg = IPQ9574_EDMA_QID2RID_TABLE_MEM(0);
data = 0;
data |= (desc_index & 0xF);
data = ((desc_index << 0) & 0xFF) |
(((desc_index + 1) << 8) & 0xff00) |
(((desc_index + 2) << 16) & 0xff0000) |
(((desc_index + 3) << 24) & 0xff000000);
ipq9574_edma_reg_write(reg, data);
pr_debug("Configure QID2RID reg:0x%x to 0x%x\n", reg, data);
pr_debug("Configure QID2RID(0) reg:0x%x to 0x%x\n", reg, data);
/*
* Set RXDESC2FILL_MAP_xx reg.
@ -1806,31 +1768,27 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1, 0);
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2, 0);
for (i = ehw->rxdesc_ring_start;
i < ehw->rxdesc_ring_end; i++) {
if ((i >= 0) && (i <= 9))
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
ring_id = rxdesc_ring->id;
if ((ring_id >= 0) && (ring_id <= 9))
reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_0;
else if ((i >= 10) && (i <= 19))
else if ((ring_id >= 10) && (ring_id <= 19))
reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1;
else
reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2;
rxdesc_ring = &ehw->rxdesc_ring[i - ehw->rxdesc_ring_start];
pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
rxdesc_ring->id, rxdesc_ring->rxfill->id);
ring_id, rxdesc_ring->rxfill->id);
/*
* Set the Rx fill descriptor ring number in the mapping
* register.
* E.g. If (rxfill ring)rxdesc_ring->rxfill->id = 7, (rxdesc ring)i = 13.
* reg = IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1
* data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3);
* data |= (0x7 << 9); - This sets 111 at 9th bit of
* register IPQ9574_EDMA_REG_RXDESC2FILL_MAP_1
*/
data = ipq9574_edma_reg_read(reg);
data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3);
data |= (rxdesc_ring->rxfill->id & 0x7) << ((ring_id % 10) * 3);
ipq9574_edma_reg_write(reg, data);
}
@ -1841,6 +1799,23 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
pr_debug("EDMA_REG_RXDESC2FILL_MAP_2: 0x%x\n",
ipq9574_edma_reg_read(IPQ9574_EDMA_REG_RXDESC2FILL_MAP_2));
/*
* Configure DMA request priority, DMA read burst length,
* and AXI write size.
*/
data = IPQ9574_EDMA_DMAR_BURST_LEN_SET(IPQ9574_EDMA_BURST_LEN_ENABLE)
| IPQ9574_EDMA_DMAR_REQ_PRI_SET(0)
| IPQ9574_EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(31)
| IPQ9574_EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(7)
| IPQ9574_EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(7);
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_DMAR_CTRL, data);
/*
* Enable MISC interrupt mask
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_MISC_INT_MASK,
ehw->misc_intr_mask);
/*
* Enable EDMA
*/
@ -1871,12 +1846,6 @@ int ipq9574_edma_hw_init(struct ipq9574_edma_hw *ehw)
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_TXDESC_CTRL(i), data);
}
/*
* Enable MISC interrupt
*/
ipq9574_edma_reg_write(IPQ9574_EDMA_REG_MISC_INT_MASK,
ehw->misc_intr_mask);
pr_info("%s: successfull\n", __func__);
return 0;
}

View file

@ -38,21 +38,20 @@
#define IPQ9574_EDMA_START_GMACS IPQ9574_NSS_DP_START_PHY_PORT
#define IPQ9574_EDMA_MAX_GMACS IPQ9574_NSS_DP_MAX_PHY_PORTS
#define IPQ9574_EDMA_TX_BUFF_SIZE 1572
#define IPQ9574_EDMA_TX_BUFF_SIZE 2048
#define IPQ9574_EDMA_RX_BUFF_SIZE 2048
/* Max number of rings of each type is defined with below macro */
#define IPQ9574_EDMA_MAX_TXCMPL_RINGS 32 /* Max TxCmpl rings */
#define IPQ9574_EDMA_MAX_TXDESC_RINGS 32 /* Max TxDesc rings */
#define IPQ9574_EDMA_MAX_RXDESC_RINGS 24 /* Max RxDesc rings */
#define IPQ9574_EDMA_MAX_RXFILL_RINGS 8 /* Max RxFill rings */
#define IPQ9574_EDMA_MAX_TXDESC_RINGS 32 /* Max TxDesc rings */
#define IPQ9574_EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
#define IPQ9574_EDMA_RXFILL_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_rxfill_desc)
#define IPQ9574_EDMA_RXDESC_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_rxdesc_desc)
#define IPQ9574_EDMA_TXDESC_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_txdesc_desc)
#define IPQ9574_EDMA_TXCMPL_DESC(R, i) IPQ9574_EDMA_GET_DESC(R, i, struct ipq9574_edma_txcmpl_desc)
#define IPQ9574_EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) (((rxph & 0xffff) >> 8) & 0xf0)
#define IPQ9574_EDMA_DEV 1
#define IPQ9574_EDMA_TX_QUEUE 1
@ -65,26 +64,16 @@
#define IPQ9574_EDMA_TX_DESC_RING_SIZE \
(IPQ9574_EDMA_TX_DESC_RING_START + IPQ9574_EDMA_TX_DESC_RING_NOS)
#define IPQ9574_EDMA_SEC_TX_DESC_RING_START 31
#define IPQ9574_EDMA_SEC_TX_DESC_RING_NOS 1
#define IPQ9574_EDMA_SEC_TX_DESC_RING_SIZE \
(IPQ9574_EDMA_SEC_TX_DESC_RING_START + IPQ9574_EDMA_SEC_TX_DESC_RING_NOS)
#define IPQ9574_EDMA_TX_CMPL_RING_START 31
#define IPQ9574_EDMA_TX_CMPL_RING_NOS 1
#define IPQ9574_EDMA_TX_CMPL_RING_SIZE \
(IPQ9574_EDMA_TX_CMPL_RING_START + IPQ9574_EDMA_TX_CMPL_RING_NOS)
#define IPQ9574_EDMA_RX_DESC_RING_START 15
#define IPQ9574_EDMA_RX_DESC_RING_START 23
#define IPQ9574_EDMA_RX_DESC_RING_NOS 1
#define IPQ9574_EDMA_RX_DESC_RING_SIZE \
(IPQ9574_EDMA_RX_DESC_RING_START + IPQ9574_EDMA_RX_DESC_RING_NOS)
#define IPQ9574_EDMA_SEC_RX_DESC_RING_START 15
#define IPQ9574_EDMA_SEC_RX_DESC_RING_NOS 1
#define IPQ9574_EDMA_SEC_RX_DESC_RING_SIZE \
(IPQ9574_EDMA_SEC_RX_DESC_RING_START + IPQ9574_EDMA_SEC_RX_DESC_RING_NOS)
#define IPQ9574_EDMA_RX_FILL_RING_START 7
#define IPQ9574_EDMA_RX_FILL_RING_NOS 1
#define IPQ9574_EDMA_RX_FILL_RING_SIZE \
@ -96,129 +85,89 @@
* RxDesc descriptor
*/
struct ipq9574_edma_rxdesc_desc {
uint32_t rdes0;
/* buffer_address_lo */
uint32_t rdes1;
/* valid toggle, more, int_pri, drop_prec, reserved x 3,
* tunnel_type, tunnel_term_ind, cpu_code_valid, known_ind,
* wifi_qos_flag, wifi_qos, buffer_address_hi */
uint32_t rdes2;
/* opaque_lo */
uint32_t rdes3;
/* opaque_hi */
uint32_t rdes4;
/* dst_info, src_info */
uint32_t rdes5;
/* dspcp, pool_id, data_lengh */
uint32_t rdes6;
/* hash_value, hash_flag, l3_csum_status, l4_csum_status,
* data_offset */
uint32_t rdes7;
/* l4_offset, l3_offset, pid, CVLAN flag, SVLAN flag, PPPOE flag
* service_code */
};
/*
* RxFill descriptor
*/
struct ipq9574_edma_rxfill_desc {
uint32_t rdes0;
/* buffer_address_lo */
uint32_t rdes1;
/* buffer_size, reserved x 1, buffer_address_hi */
uint32_t rdes2;
/* opaque_lo */
uint32_t rdes3;
/* opaque_hu */
};
/*
* TxDesc descriptor
*/
struct ipq9574_edma_txdesc_desc {
uint32_t tdes0;
/* buffer_address_lo */
uint32_t tdes1;
/* reserved x 1, more, int_pri, drop_prec, reserved x 4,
* buff_recycling, fake_mac_header,ptp_tag_flag, pri_valid,
* buffer_address_high_bits_tbi, buffer_address_hi */
uint32_t tdes2;
/* opaque_lo */
uint32_t tdes3;
/* opaque_hi */
uint32_t tdes4;
/* dst_info, src_info */
uint32_t tdes5;
/* adv_offload_en, vlan_offload_en, frm_fmt_indication_en,
* edit_offload_en, csum_mode, ip_csum_en, tso_en, pool_id,
* data_lengh */
uint32_t tdes6;
/* mss/hash_value/pip_tag, hash_flag, reserved x 2,
* data_offset */
uint32_t tdes7;
/* l4_offset, l3_offset, reserved, prot_type, l2_type,
* CVLAN flag, SVLAN flag, PPPOE flag, service_code */
};
/*
* TxCmpl descriptor
*/
struct ipq9574_edma_txcmpl_desc {
uint32_t tdes0;
/* buffer_address_lo */
uint32_t tdes1;
/* buffer_size, reserved x 1, buffer_address_hi */
uint32_t tdes2;
/* opaque_lo */
uint32_t tdes3;
/* opaque_hu */
uint32_t rdes0; /* Contains buffer address */
uint32_t rdes1; /* Contains more bit, priority bit, service code */
uint32_t rdes2; /* Contains opaque */
uint32_t rdes3; /* Contains opaque high bits */
uint32_t rdes4; /* Contains destination and source information */
uint32_t rdes5; /* Contains WiFi QoS, data length */
uint32_t rdes6; /* Contains hash value, check sum status */
uint32_t rdes7; /* Contains DSCP, packet offsets */
};
/*
* EDMA Rx Secondary Descriptor
*/
struct ipq9574_edma_rx_sec_desc {
uint32_t rx_sec0;
uint32_t rx_sec1;
uint32_t rx_sec2;
uint32_t rx_sec3;
uint32_t rx_sec4;
uint32_t rx_sec5;
uint32_t rx_sec6;
uint32_t rx_sec7;
uint32_t rx_sec0; /* Contains timestamp */
uint32_t rx_sec1; /* Contains secondary checksum status */
uint32_t rx_sec2; /* Contains QoS tag */
uint32_t rx_sec3; /* Contains flow index details */
uint32_t rx_sec4; /* Contains secondary packet offsets */
uint32_t rx_sec5; /* Contains multicast bit, checksum */
uint32_t rx_sec6; /* Contains SVLAN, CVLAN */
uint32_t rx_sec7; /* Contains secondary SVLAN, CVLAN */
};
/*
* RxFill descriptor
*/
struct ipq9574_edma_rxfill_desc {
uint32_t rdes0; /* Contains buffer address */
uint32_t rdes1; /* Contains buffer size */
uint32_t rdes2; /* Contains opaque */
uint32_t rdes3; /* Contains opaque high bits */
};
/*
* TxDesc descriptor
*/
struct ipq9574_edma_txdesc_desc {
uint32_t tdes0; /* Low 32-bit of buffer address */
uint32_t tdes1; /* Buffer recycling, PTP tag flag, PRI valid flag */
uint32_t tdes2; /* Low 32-bit of opaque value */
uint32_t tdes3; /* High 32-bit of opaque value */
uint32_t tdes4; /* Source/Destination port info */
uint32_t tdes5; /* VLAN offload, csum_mode, ip_csum_en, tso_en, data length */
uint32_t tdes6; /* MSS/hash_value/PTP tag, data offset */
uint32_t tdes7; /* L4/L3 offset, PROT type, L2 type, CVLAN/SVLAN tag, service code */
};
/*
* EDMA Tx Secondary Descriptor
*/
struct ipq9574_edma_tx_sec_desc {
uint32_t tx_sec0;
uint32_t tx_sec1;
uint32_t rx_sec2;
uint32_t rx_sec3;
uint32_t rx_sec4;
uint32_t rx_sec5;
uint32_t rx_sec6;
uint32_t rx_sec7;
uint32_t tx_sec0; /* Reserved */
uint32_t tx_sec1; /* Custom csum offset, payload offset, TTL/NAT action */
uint32_t rx_sec2; /* NAPT translated port, DSCP value, TTL value */
uint32_t rx_sec3; /* Flow index value and valid flag */
uint32_t rx_sec4; /* Reserved */
uint32_t rx_sec5; /* Reserved */
uint32_t rx_sec6; /* CVLAN/SVLAN command */
uint32_t rx_sec7; /* CVLAN/SVLAN tag value */
};
/*
* secondary Tx descriptor ring
* TxCmpl descriptor
*/
struct ipq9574_edma_sec_txdesc_ring {
uint32_t id; /* TXDESC ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors */
struct ipq9574_edma_txcmpl_desc {
uint32_t tdes0; /* Low 32-bit opaque value */
uint32_t tdes1; /* High 32-bit opaque value */
uint32_t tdes2; /* More fragment, transmit ring id, pool id */
uint32_t tdes3; /* Error indications */
};
/*
* Tx descriptor ring
*/
struct ipq9574_edma_txdesc_ring {
uint32_t prod_idx; /* Producer index */
uint32_t avail_desc; /* Number of available descriptor to process */
uint32_t id; /* TXDESC ring number */
void *desc; /* descriptor ring virtual address */
struct ipq9574_edma_txdesc_desc *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
struct ipq9574_edma_tx_sec_desc *sdesc; /* Secondary descriptor ring virtual addr */
dma_addr_t sdma; /* Secondary descriptor ring physical address */
uint16_t count; /* number of descriptors */
};
@ -226,10 +175,12 @@ struct ipq9574_edma_txdesc_ring {
* TxCmpl ring
*/
struct ipq9574_edma_txcmpl_ring {
uint32_t cons_idx; /* Consumer index */
uint32_t avail_pkt; /* Number of available packets to process */
struct ipq9574_edma_txcmpl_desc *desc; /* descriptor ring virtual address */
uint32_t id; /* TXCMPL ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
uint32_t count; /* Number of descriptors in the ring */
};
/*
@ -237,19 +188,10 @@ struct ipq9574_edma_txcmpl_ring {
*/
struct ipq9574_edma_rxfill_ring {
uint32_t id; /* RXFILL ring number */
void *desc; /* descriptor ring virtual address */
uint32_t count; /* number of descriptors in the ring */
uint32_t prod_idx; /* Ring producer index */
struct ipq9574_edma_rxfill_desc *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
* secondary RxDesc ring
*/
struct ipq9574_edma_sec_rxdesc_ring {
uint32_t id; /* RXDESC ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
@ -257,10 +199,13 @@ struct ipq9574_edma_sec_rxdesc_ring {
*/
struct ipq9574_edma_rxdesc_ring {
uint32_t id; /* RXDESC ring number */
uint32_t count; /* number of descriptors in the ring */
uint32_t cons_idx; /* Ring consumer index */
struct ipq9574_edma_rxdesc_desc *desc; /* Primary descriptor ring virtual addr */
struct ipq9574_edma_sec_rxdesc_ring *sdesc; /* Secondary desc ring VA */
struct ipq9574_edma_rxfill_ring *rxfill; /* RXFILL ring used */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
dma_addr_t dma; /* Primary descriptor ring physical address */
dma_addr_t sdma; /* Secondary descriptor ring physical address */
};
enum ipq9574_edma_tx {
@ -292,17 +237,12 @@ struct ipq9574_edma_hw {
uint32_t flags; /* internal flags */
int active; /* status */
struct ipq9574_edma_txdesc_ring *txdesc_ring; /* Tx Descriptor Ring, SW is producer */
struct ipq9574_edma_sec_txdesc_ring *sec_txdesc_ring; /* secondary Tx Descriptor Ring, SW is producer */
struct ipq9574_edma_txcmpl_ring *txcmpl_ring; /* Tx Completion Ring, SW is consumer */
struct ipq9574_edma_rxdesc_ring *rxdesc_ring; /* Rx Descriptor Ring, SW is consumer */
struct ipq9574_edma_sec_rxdesc_ring *sec_rxdesc_ring; /* secondary Rx Descriptor Ring, SW is consumer */
struct ipq9574_edma_rxfill_ring *rxfill_ring; /* Rx Fill Ring, SW is producer */
uint32_t txdesc_rings; /* Number of TxDesc rings */
uint32_t txdesc_ring_start; /* Id of first TXDESC ring */
uint32_t txdesc_ring_end; /* Id of the last TXDESC ring */
uint32_t sec_txdesc_rings; /* Number of secondary TxDesc rings */
uint32_t sec_txdesc_ring_start; /* Id of first secondary TxDesc ring */
uint32_t sec_txdesc_ring_end; /* Id of last secondary TxDesc ring */
uint32_t txcmpl_rings; /* Number of TxCmpl rings */
uint32_t txcmpl_ring_start; /* Id of first TXCMPL ring */
uint32_t txcmpl_ring_end; /* Id of last TXCMPL ring */
@ -312,9 +252,6 @@ struct ipq9574_edma_hw {
uint32_t rxdesc_rings; /* Number of RxDesc rings */
uint32_t rxdesc_ring_start; /* Id of first RxDesc ring */
uint32_t rxdesc_ring_end; /* Id of last RxDesc ring */
uint32_t sec_rxdesc_rings; /* Number of secondary RxDesc rings */
uint32_t sec_rxdesc_ring_start; /* Id of first secondary RxDesc ring */
uint32_t sec_rxdesc_ring_end; /* Id of last secondary RxDesc ring */
uint32_t tx_intr_mask; /* tx interrupt mask */
uint32_t rx_intr_mask; /* rx interrupt mask */
uint32_t rxfill_intr_mask; /* Rx fill ring interrupt mask */