boot: airoha: apply network driver fixes
Some checks are pending
Build Kernel / Build all affected Kernels (push) Waiting to run
Build all core packages / Build all core packages for selected target (push) Waiting to run

This series improve network reliability.

Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
Link: https://github.com/openwrt/openwrt/pull/20295
Signed-off-by: Robert Marko <robimarko@gmail.com>
This commit is contained in:
Mikhail Kshevetskiy 2025-10-04 03:02:28 +03:00 committed by Robert Marko
parent e06725222e
commit 1949fb996a
6 changed files with 380 additions and 133 deletions

View file

@ -0,0 +1,32 @@
From a11420dac873fbd5b8a81192571d914f01bee26d Mon Sep 17 00:00:00 2001
From: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
Date: Wed, 9 Jul 2025 12:28:07 +0300
Subject: [PATCH 1/5] drivers/net/airoha_eth: add missing terminator for
compatible devices list
Compatible device list must have a terminator. If terminator is missed
the u-boot driver subsystem will access random data placed after the
list in the memory.
The issue can be observed with the "dm compat" command.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
---
drivers/net/airoha_eth.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
index b3fd1ab9064..db34ec48c81 100644
--- a/drivers/net/airoha_eth.c
+++ b/drivers/net/airoha_eth.c
@@ -1052,6 +1052,7 @@ static const struct udevice_id airoha_eth_ids[] = {
{ .compatible = "airoha,an7583-eth",
.data = (ulong)&an7583_data,
},
+ { }
};
static const struct eth_ops airoha_eth_ops = {
--
2.51.0

View file

@ -1,133 +0,0 @@
From 0343b5c2a754ca20f5155a8f3c6d58e887b9dd4f Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Tue, 20 May 2025 16:32:31 +0200
Subject: [PATCH] net: airoha: Fix spurious Airoha Ethernet stall
It was reported that sometimes the Airoha Ethernet driver stall and a
reset is needed to actually receive packet.
This seems to be related in the logic with how the CPU and DMA counter
are handled for the RX path. The problem seems to be more evident when
multiple device are connected to the Ethernet port.
To handle this, drop local tracking of the current CPU/DMA counter and
base everything on the current register by reading them and using the
descriptor directly.
Fixes: ee0f4afa982e ("net: airoha: Add Airoha Ethernet driver")
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
---
drivers/net/airoha_eth.c | 32 +++++++++++++++++---------------
1 file changed, 17 insertions(+), 15 deletions(-)
--- a/drivers/net/airoha_eth.c
+++ b/drivers/net/airoha_eth.c
@@ -286,7 +286,6 @@ struct airoha_qdma_fwd_desc {
struct airoha_queue {
struct airoha_qdma_desc *desc;
- u16 head;
int ndesc;
};
@@ -452,7 +451,6 @@ static int airoha_qdma_init_rx_queue(str
unsigned long dma_addr;
q->ndesc = ndesc;
- q->head = 0;
q->desc = dma_alloc_coherent(q->ndesc * sizeof(*q->desc), &dma_addr);
if (!q->desc)
@@ -471,7 +469,7 @@ static int airoha_qdma_init_rx_queue(str
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
- FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, 0));
return 0;
}
@@ -499,7 +497,6 @@ static int airoha_qdma_init_tx_queue(str
unsigned long dma_addr;
q->ndesc = size;
- q->head = 0;
q->desc = dma_alloc_coherent(q->ndesc * sizeof(*q->desc), &dma_addr);
if (!q->desc)
@@ -510,9 +507,9 @@ static int airoha_qdma_init_tx_queue(str
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
- FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
- FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
return 0;
}
@@ -898,8 +895,10 @@ static int airoha_eth_send(struct udevic
qid = 0;
q = &qdma->q_tx[qid];
- desc = &q->desc[q->head];
- index = (q->head + 1) % q->ndesc;
+
+ index = airoha_qdma_rr(qdma, REG_TX_CPU_IDX(qid));
+ desc = &q->desc[index];
+ index = (index + 1) % q->ndesc;
fport = 1;
@@ -934,7 +933,6 @@ static int airoha_eth_send(struct udevic
if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
return -EAGAIN;
- q->head = index;
airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(0),
IRQ_CLEAR_LEN_MASK, 1);
@@ -947,12 +945,15 @@ static int airoha_eth_recv(struct udevic
struct airoha_qdma *qdma = &eth->qdma[0];
struct airoha_qdma_desc *desc;
struct airoha_queue *q;
+ int qid, index;
u16 length;
- int qid;
qid = 0;
q = &qdma->q_rx[qid];
- desc = &q->desc[q->head];
+
+ index = airoha_qdma_rr(qdma, REG_RX_CPU_IDX(qid));
+ index = (index + 1) % q->ndesc;
+ desc = &q->desc[index];
dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
DMA_FROM_DEVICE);
@@ -974,7 +975,7 @@ static int arht_eth_free_pkt(struct udev
struct airoha_eth *eth = dev_get_priv(dev);
struct airoha_qdma *qdma = &eth->qdma[0];
struct airoha_queue *q;
- int qid;
+ int qid, index;
if (!packet)
return 0;
@@ -984,11 +985,12 @@ static int arht_eth_free_pkt(struct udev
dma_map_single(packet, length, DMA_TO_DEVICE);
- airoha_qdma_reset_rx_desc(q, q->head, packet);
+ index = airoha_qdma_rr(qdma, REG_RX_DMA_IDX(qid));
+ airoha_qdma_reset_rx_desc(q, index, packet);
+ index = (index + 1) % q->ndesc;
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
- q->head = (q->head + 1) % q->ndesc;
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, index));
return 0;
}

View file

@ -0,0 +1,107 @@
From 8fce1cfe775e1f3b5d7cecb8bdcc8271bf9f799c Mon Sep 17 00:00:00 2001
From: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
Date: Wed, 9 Jul 2025 12:28:08 +0300
Subject: [PATCH 2/5] drivers/net/airoha_eth: fix packet transmission errors
The dma_map_single() function calls one of the functions
* invalidate_dcache_range(),
* flush_dcache_range().
Both of them expect that 'vaddr' is aligned to the ARCH_DMA_MINALIGN
boundary. Unfortunately, RX/TX descriptors are 32-byte long. Thus they
might not be aligned to the ARCH_DMA_MINALIGN boundary. Data flushing
(or invalidating) might do nothing in this case.
The same applies to dma_unmap_single() function.
In the TX path case the issue might prevent package transmission (filled
TX descriptor was not flushed).
To fix an issue a special wrappers for
* dma_map_single(),
* dma_unmap_single()
functions were created. The patch fix flushing/invalidatiog for the
RX path as well.
The bug appears on 32-bit airoha platform, but should be present on
64-bit as well.
The code was tested both on 32-bit and 64-bit airoha boards.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
---
drivers/net/airoha_eth.c | 33 +++++++++++++++++++++++++++------
1 file changed, 27 insertions(+), 6 deletions(-)
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
index db34ec48c81..aae6922f3c7 100644
--- a/drivers/net/airoha_eth.c
+++ b/drivers/net/airoha_eth.c
@@ -397,6 +397,27 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
#define airoha_switch_rmw(eth, offset, mask, val) \
airoha_rmw((eth)->switch_regs, (offset), (mask), (val))
+static inline dma_addr_t dma_map_unaligned(void *vaddr, size_t len,
+ enum dma_data_direction dir)
+{
+ uintptr_t start, end;
+
+ start = ALIGN_DOWN((uintptr_t)vaddr, ARCH_DMA_MINALIGN);
+ end = ALIGN((uintptr_t)(vaddr + len), ARCH_DMA_MINALIGN);
+
+ return dma_map_single((void *)start, end - start, dir);
+}
+
+static inline void dma_unmap_unaligned(dma_addr_t addr, size_t len,
+ enum dma_data_direction dir)
+{
+ uintptr_t start, end;
+
+ start = ALIGN_DOWN((uintptr_t)addr, ARCH_DMA_MINALIGN);
+ end = ALIGN((uintptr_t)(addr + len), ARCH_DMA_MINALIGN);
+ dma_unmap_single(start, end - start, dir);
+}
+
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
@@ -434,7 +455,7 @@ static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN);
WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
- dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
+ dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE);
}
static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
@@ -916,14 +937,14 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length)
WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
- dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
+ dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
for (i = 0; i < 100; i++) {
- dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
- DMA_FROM_DEVICE);
+ dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc),
+ DMA_FROM_DEVICE);
if (desc->ctrl & QDMA_DESC_DONE_MASK)
break;
@@ -954,8 +975,8 @@ static int airoha_eth_recv(struct udevice *dev, int flags, uchar **packetp)
q = &qdma->q_rx[qid];
desc = &q->desc[q->head];
- dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
- DMA_FROM_DEVICE);
+ dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc),
+ DMA_FROM_DEVICE);
if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
return -EAGAIN;
--
2.51.0

View file

@ -0,0 +1,135 @@
From 352c071bc18855238565cc6417a4c15a4e24bad8 Mon Sep 17 00:00:00 2001
From: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
Date: Wed, 9 Jul 2025 12:28:09 +0300
Subject: [PATCH 3/5] drivers/net/airoha_eth: fix stalling in package
receiving
ARCH_DMA_MINALIGN is 64 for ARMv7a/ARMv8a architectures, but RX/TX
descriptors are 32 bytes long. So they may not be aligned on an
ARCH_DMA_MINALIGN boundary. In case of RX path, this may cause the
following problem
1) Assume that a packet has arrived and the EVEN rx descriptor has been
updated with the incoming data. The driver will invalidate and check
the corresponding rx descriptor.
2) Now suppose the next descriptor (ODD) has not yet completed.
Please note that all even descriptors starts on 64-byte boundary,
and the odd ones are NOT aligned on 64-byte boundary.
Inspecting even descriptor, we will read the entire CPU cache line
(64 bytes). So we read and sore in CPU cache also the next (odd)
descriptor.
3) Now suppose the next packet (for the odd rx descriptor) arrived
while the first packet was being processed. So we have new data
in memory but old data in cache.
4) After packet processing (in arht_eth_free_pkt() function) we will
cleanup the descriptor and put it back to rx queue.
This will call flush_dcache_range() function for the even descriptor,
so the odd one will be flushed as well (it is in the same cache line).
So the old data will be written to the next rx descriptor.
5) We get a freeze. The next descriptor is empty (so the driver is
waiting for packets), but the hardware will continue to receive
packets on other available descriptors. This will continue until
the last available rx descriptor is full. Then the hardware will
also freeze.
The problem will be solved if:
* do nothing in even descriptor case,
* return 2 descriptor to the queue (current and previous) in the odd
descriptor case.
If the current descriptor is even nothing will be done, so no issue
will arrise.
If the current descriptor is odd, then the previous descriptor is on
the same cache line. Both (current and previous) descriptors are not
currently in use, so issue will not arrise as well.
WARNING: The following restrictions on PKTBUFSRX must be held:
* PKTBUFSRX is even,
* PKTBUFSRX >= 4. Observations shows that PKTBUFSRX must be at least 8.
The bug appears on 32-bit airoha platform, but should be present on
64-bit as well.
The code was tested both on 32-bit and 64-bit airoha boards.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
---
drivers/net/airoha_eth.c | 33 ++++++++++++++++++++++++++-------
1 file changed, 26 insertions(+), 7 deletions(-)
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
index aae6922f3c7..44d4773bc5d 100644
--- a/drivers/net/airoha_eth.c
+++ b/drivers/net/airoha_eth.c
@@ -435,13 +435,14 @@ static int airoha_fe_init(struct airoha_eth *eth)
return 0;
}
-static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
- uchar *rx_packet)
+static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index)
{
struct airoha_qdma_desc *desc;
+ uchar *rx_packet;
u32 val;
desc = &q->desc[index];
+ rx_packet = net_rx_packets[index];
index = (index + 1) % q->ndesc;
dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
@@ -463,7 +464,7 @@ static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
int i;
for (i = 0; i < q->ndesc; i++)
- airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
+ airoha_qdma_reset_rx_desc(q, i);
}
static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
@@ -1003,12 +1004,30 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
qid = 0;
q = &qdma->q_rx[qid];
- dma_map_single(packet, length, DMA_TO_DEVICE);
+ /*
+ * Due to cpu cache issue the airoha_qdma_reset_rx_desc() function
+ * will always touch 2 descriptors placed on the same cacheline:
+ * - if current descriptor is even, then current and next
+ * descriptors will be touched
+ * - if current descriptor is odd, then current and previous
+ * descriptors will be touched
+ *
+ * Thus, to prevent possible destroying of rx queue, we should:
+ * - do nothing in the even descriptor case,
+ * - utilize 2 descriptors (current and previous one) in the
+ * odd descriptor case.
+ *
+ * WARNING: Observations shows that PKTBUFSRX must be even and
+ * larger than 7 for reliable driver operations.
+ */
+ if (q->head & 0x01) {
+ airoha_qdma_reset_rx_desc(q, q->head - 1);
+ airoha_qdma_reset_rx_desc(q, q->head);
- airoha_qdma_reset_rx_desc(q, q->head, packet);
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ }
- airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
q->head = (q->head + 1) % q->ndesc;
return 0;
--
2.51.0

View file

@ -0,0 +1,64 @@
From dc0ae3455f4344403e293c9b385653ad3fddb0b1 Mon Sep 17 00:00:00 2001
From: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
Date: Wed, 9 Jul 2025 12:28:10 +0300
Subject: [PATCH 4/5] drivers/net/airoha_eth: enable hw padding of short tx
packets
Transmission of short packets does not work good for XFI (GDM2) and
HSGMII (GDM3) interfaces. The issue can be solved with:
- padding of short packets to 60 bytes
- setting of PAD_EN bit in the corresponding REG_GDM_FWD_CFG(n)
register.
The issue should present for the lan switch (GDM1) as well, but it does
does not appear due to unknown reason.
This patch set PAD_EN bit for the used GDM.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
---
drivers/net/airoha_eth.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
index 53c722379c9..b2f73c7dbb7 100644
--- a/drivers/net/airoha_eth.c
+++ b/drivers/net/airoha_eth.c
@@ -116,6 +116,7 @@
(_n) == 2 ? GDM2_BASE : GDM1_BASE)
#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_PAD_EN BIT(28)
#define GDM_DROP_CRC_ERR BIT(23)
#define GDM_IP4_CKSUM BIT(22)
#define GDM_TCP_CKSUM BIT(21)
@@ -423,8 +424,11 @@ static void airoha_fe_maccr_init(struct airoha_eth *eth)
int p;
for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
- /* Disable any kind of CRC drop or offload */
- airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), 0);
+ /*
+ * Disable any kind of CRC drop or offload.
+ * Enable padding of short TX packets to 60 bytes.
+ */
+ airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), GDM_PAD_EN);
}
}
@@ -920,6 +924,11 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length)
u32 val;
int i;
+ /*
+ * There is no need to pad short TX packets to 60 bytes since the
+ * GDM_PAD_EN bit set in the corresponding REG_GDM_FWD_CFG(n) register.
+ */
+
dma_addr = dma_map_single(packet, length, DMA_TO_DEVICE);
qid = 0;
--
2.51.0

View file

@ -0,0 +1,42 @@
From 75d82c8878b2ffff489fbc7a5c0381f8f6484ec2 Mon Sep 17 00:00:00 2001
From: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
Date: Fri, 3 Oct 2025 05:28:41 +0300
Subject: [PATCH 5/5] net: airoha: increase the number of rx network buffers
According to commit 997786bbf473 ("drivers/net/airoha_eth: fix stalling
in package receiving") the minimal possible value of SYS_RX_ETH_BUFFER
is 4. Unfortunately it's too small for reliable ping.
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
---
configs/an7581_evb_defconfig | 1 +
configs/an7583_evb_defconfig | 1 +
2 files changed, 2 insertions(+)
diff --git a/configs/an7581_evb_defconfig b/configs/an7581_evb_defconfig
index c74247e13db..aa1a30aad6a 100644
--- a/configs/an7581_evb_defconfig
+++ b/configs/an7581_evb_defconfig
@@ -44,6 +44,7 @@ CONFIG_ENV_IS_IN_MMC=y
CONFIG_SYS_RELOC_GD_ENV_ADDR=y
CONFIG_ENV_VARS_UBOOT_RUNTIME_CONFIG=y
CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_SYS_RX_ETH_BUFFER=8
CONFIG_REGMAP=y
CONFIG_SYSCON=y
CONFIG_CLK=y
diff --git a/configs/an7583_evb_defconfig b/configs/an7583_evb_defconfig
index 057104b93af..c67444ae8bf 100644
--- a/configs/an7583_evb_defconfig
+++ b/configs/an7583_evb_defconfig
@@ -44,6 +44,7 @@ CONFIG_ENV_IS_IN_MMC=y
CONFIG_SYS_RELOC_GD_ENV_ADDR=y
CONFIG_ENV_VARS_UBOOT_RUNTIME_CONFIG=y
CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_SYS_RX_ETH_BUFFER=8
CONFIG_REGMAP=y
CONFIG_SYSCON=y
CONFIG_CLK=y
--
2.51.0