<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">From: Lorenzo Bianconi &lt;lorenzo@kernel.org&gt;
Date: Fri, 20 May 2022 20:11:30 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in
 mtk_desc_to_tx_buf

This is a preliminary patch to add mt7986 ethernet support.

Tested-by: Sam Shih &lt;sam.shih@mediatek.com&gt;
Signed-off-by: Lorenzo Bianconi &lt;lorenzo@kernel.org&gt;
Signed-off-by: David S. Miller &lt;davem@davemloft.net&gt;
---

--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -901,10 +901,11 @@ static inline void *mtk_qdma_phys_to_vir
 	return ret + (desc - ring-&gt;phys);
 }
 
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
-						    struct mtk_tx_dma *txd)
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+					     struct mtk_tx_dma *txd,
+					     u32 txd_size)
 {
-	int idx = txd - ring-&gt;dma;
+	int idx = ((void *)txd - (void *)ring-&gt;dma) / txd_size;
 
 	return &amp;ring-&gt;buf[idx];
 }
@@ -1026,6 +1027,7 @@ static int mtk_tx_map(struct sk_buff *sk
 	};
 	struct mtk_mac *mac = netdev_priv(dev);
 	struct mtk_eth *eth = mac-&gt;hw;
+	const struct mtk_soc_data *soc = eth-&gt;soc;
 	struct mtk_tx_dma *itxd, *txd;
 	struct mtk_tx_dma *itxd_pdma, *txd_pdma;
 	struct mtk_tx_buf *itx_buf, *tx_buf;
@@ -1037,7 +1039,7 @@ static int mtk_tx_map(struct sk_buff *sk
 	if (itxd == ring-&gt;last_free)
 		return -ENOMEM;
 
-	itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc-&gt;txrx.txd_size);
 	memset(itx_buf, 0, sizeof(*itx_buf));
 
 	txd_info.addr = dma_map_single(eth-&gt;dma_dev, skb-&gt;data, txd_info.size,
@@ -1065,7 +1067,7 @@ static int mtk_tx_map(struct sk_buff *sk
 		while (frag_size) {
 			bool new_desc = true;
 
-			if (MTK_HAS_CAPS(eth-&gt;soc-&gt;caps, MTK_QDMA) ||
+			if (MTK_HAS_CAPS(soc-&gt;caps, MTK_QDMA) ||
 			    (i &amp; 0x1)) {
 				txd = mtk_qdma_phys_to_virt(ring, txd-&gt;txd2);
 				txd_pdma = qdma_to_pdma(ring, txd);
@@ -1089,7 +1091,8 @@ static int mtk_tx_map(struct sk_buff *sk
 
 			mtk_tx_set_dma_desc(dev, txd, &amp;txd_info);
 
-			tx_buf = mtk_desc_to_tx_buf(ring, txd);
+			tx_buf = mtk_desc_to_tx_buf(ring, txd,
+						    soc-&gt;txrx.txd_size);
 			if (new_desc)
 				memset(tx_buf, 0, sizeof(*tx_buf));
 			tx_buf-&gt;skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
@@ -1108,7 +1111,7 @@ static int mtk_tx_map(struct sk_buff *sk
 	/* store skb to cleanup */
 	itx_buf-&gt;skb = skb;
 
-	if (!MTK_HAS_CAPS(eth-&gt;soc-&gt;caps, MTK_QDMA)) {
+	if (!MTK_HAS_CAPS(soc-&gt;caps, MTK_QDMA)) {
 		if (k &amp; 0x1)
 			txd_pdma-&gt;txd2 |= TX_DMA_LS0;
 		else
@@ -1126,7 +1129,7 @@ static int mtk_tx_map(struct sk_buff *sk
 	 */
 	wmb();
 
-	if (MTK_HAS_CAPS(eth-&gt;soc-&gt;caps, MTK_QDMA)) {
+	if (MTK_HAS_CAPS(soc-&gt;caps, MTK_QDMA)) {
 		if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
 		    !netdev_xmit_more())
 			mtk_w32(eth, txd-&gt;txd2, MTK_QTX_CTX_PTR);
@@ -1140,13 +1143,13 @@ static int mtk_tx_map(struct sk_buff *sk
 
 err_dma:
 	do {
-		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc-&gt;txrx.txd_size);
 
 		/* unmap dma */
 		mtk_tx_unmap(eth, tx_buf, false);
 
 		itxd-&gt;txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
-		if (!MTK_HAS_CAPS(eth-&gt;soc-&gt;caps, MTK_QDMA))
+		if (!MTK_HAS_CAPS(soc-&gt;caps, MTK_QDMA))
 			itxd_pdma-&gt;txd2 = TX_DMA_DESP2_DEF;
 
 		itxd = mtk_qdma_phys_to_virt(ring, itxd-&gt;txd2);
@@ -1460,7 +1463,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
 		if ((desc-&gt;txd3 &amp; TX_DMA_OWNER_CPU) == 0)
 			break;
 
-		tx_buf = mtk_desc_to_tx_buf(ring, desc);
+		tx_buf = mtk_desc_to_tx_buf(ring, desc,
+					    eth-&gt;soc-&gt;txrx.txd_size);
 		if (tx_buf-&gt;flags &amp; MTK_TX_FLAGS_FPORT1)
 			mac = 1;
 
</pre></body></html>