<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">From b0a43db9087a21d96e1a0b716b8d9963064b2d58 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi &lt;lorenzo@kernel.org&gt;
Date: Sat, 19 Oct 2019 10:13:27 +0200
Subject: [PATCH 7/7] net: mvneta: add XDP_TX support

Implement XDP_TX verdict and ndo_xdp_xmit net_device_ops function
pointer

Signed-off-by: Lorenzo Bianconi &lt;lorenzo@kernel.org&gt;
Signed-off-by: David S. Miller &lt;davem@davemloft.net&gt;
---
 drivers/net/ethernet/marvell/mvneta.c | 128 ++++++++++++++++++++++++--
 1 file changed, 121 insertions(+), 7 deletions(-)

--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1813,16 +1813,19 @@ static void mvneta_txq_bufs_free(struct
 
 		mvneta_txq_inc_get(txq);
 
-		if (!IS_TSO_HEADER(txq, tx_desc-&gt;buf_phys_addr))
+		if (!IS_TSO_HEADER(txq, tx_desc-&gt;buf_phys_addr) &amp;&amp;
+		    buf-&gt;type != MVNETA_TYPE_XDP_TX)
 			dma_unmap_single(pp-&gt;dev-&gt;dev.parent,
 					 tx_desc-&gt;buf_phys_addr,
 					 tx_desc-&gt;data_size, DMA_TO_DEVICE);
-		if (!buf-&gt;skb)
-			continue;
-
-		bytes_compl += buf-&gt;skb-&gt;len;
-		pkts_compl++;
-		dev_kfree_skb_any(buf-&gt;skb);
+		if (buf-&gt;type == MVNETA_TYPE_SKB &amp;&amp; buf-&gt;skb) {
+			bytes_compl += buf-&gt;skb-&gt;len;
+			pkts_compl++;
+			dev_kfree_skb_any(buf-&gt;skb);
+		} else if (buf-&gt;type == MVNETA_TYPE_XDP_TX ||
+			   buf-&gt;type == MVNETA_TYPE_XDP_NDO) {
+			xdp_return_frame(buf-&gt;xdpf);
+		}
 	}
 
 	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -1987,6 +1990,111 @@ int mvneta_rx_refill_queue(struct mvneta
 }
 
 static int
+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
+			struct xdp_frame *xdpf, bool dma_map)
+{
+	struct mvneta_tx_desc *tx_desc;
+	struct mvneta_tx_buf *buf;
+	dma_addr_t dma_addr;
+
+	if (txq-&gt;count &gt;= txq-&gt;tx_stop_threshold)
+		return MVNETA_XDP_DROPPED;
+
+	tx_desc = mvneta_txq_next_desc_get(txq);
+
+	buf = &amp;txq-&gt;buf[txq-&gt;txq_put_index];
+	if (dma_map) {
+		/* ndo_xdp_xmit */
+		dma_addr = dma_map_single(pp-&gt;dev-&gt;dev.parent, xdpf-&gt;data,
+					  xdpf-&gt;len, DMA_TO_DEVICE);
+		if (dma_mapping_error(pp-&gt;dev-&gt;dev.parent, dma_addr)) {
+			mvneta_txq_desc_put(txq);
+			return MVNETA_XDP_DROPPED;
+		}
+		buf-&gt;type = MVNETA_TYPE_XDP_NDO;
+	} else {
+		struct page *page = virt_to_page(xdpf-&gt;data);
+
+		dma_addr = page_pool_get_dma_addr(page) +
+			   sizeof(*xdpf) + xdpf-&gt;headroom;
+		dma_sync_single_for_device(pp-&gt;dev-&gt;dev.parent, dma_addr,
+					   xdpf-&gt;len, DMA_BIDIRECTIONAL);
+		buf-&gt;type = MVNETA_TYPE_XDP_TX;
+	}
+	buf-&gt;xdpf = xdpf;
+
+	tx_desc-&gt;command = MVNETA_TXD_FLZ_DESC;
+	tx_desc-&gt;buf_phys_addr = dma_addr;
+	tx_desc-&gt;data_size = xdpf-&gt;len;
+
+	mvneta_update_stats(pp, 1, xdpf-&gt;len, true);
+	mvneta_txq_inc_put(txq);
+	txq-&gt;pending++;
+	txq-&gt;count++;
+
+	return MVNETA_XDP_TX;
+}
+
+static int
+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
+{
+	struct mvneta_tx_queue *txq;
+	struct netdev_queue *nq;
+	struct xdp_frame *xdpf;
+	int cpu;
+	u32 ret;
+
+	xdpf = convert_to_xdp_frame(xdp);
+	if (unlikely(!xdpf))
+		return MVNETA_XDP_DROPPED;
+
+	cpu = smp_processor_id();
+	txq = &amp;pp-&gt;txqs[cpu % txq_number];
+	nq = netdev_get_tx_queue(pp-&gt;dev, txq-&gt;id);
+
+	__netif_tx_lock(nq, cpu);
+	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+	if (ret == MVNETA_XDP_TX)
+		mvneta_txq_pend_desc_add(pp, txq, 0);
+	__netif_tx_unlock(nq);
+
+	return ret;
+}
+
+static int
+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+		struct xdp_frame **frames, u32 flags)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	int cpu = smp_processor_id();
+	struct mvneta_tx_queue *txq;
+	struct netdev_queue *nq;
+	int i, drops = 0;
+	u32 ret;
+
+	if (unlikely(flags &amp; ~XDP_XMIT_FLAGS_MASK))
+		return -EINVAL;
+
+	txq = &amp;pp-&gt;txqs[cpu % txq_number];
+	nq = netdev_get_tx_queue(pp-&gt;dev, txq-&gt;id);
+
+	__netif_tx_lock(nq, cpu);
+	for (i = 0; i &lt; num_frame; i++) {
+		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+		if (ret != MVNETA_XDP_TX) {
+			xdp_return_frame_rx_napi(frames[i]);
+			drops++;
+		}
+	}
+
+	if (unlikely(flags &amp; XDP_XMIT_FLUSH))
+		mvneta_txq_pend_desc_add(pp, txq, 0);
+	__netif_tx_unlock(nq);
+
+	return num_frame - drops;
+}
+
+static int
 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 	       struct bpf_prog *prog, struct xdp_buff *xdp)
 {
@@ -2008,6 +2116,11 @@ mvneta_run_xdp(struct mvneta_port *pp, s
 		}
 		break;
 	}
+	case XDP_TX:
+		ret = mvneta_xdp_xmit_back(pp, xdp);
+		if (ret != MVNETA_XDP_TX)
+			xdp_return_buff(xdp);
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(act);
 		/* fall through */
@@ -4581,6 +4694,7 @@ static const struct net_device_ops mvnet
 	.ndo_get_stats64     = mvneta_get_stats64,
 	.ndo_do_ioctl        = mvneta_ioctl,
 	.ndo_bpf             = mvneta_xdp,
+	.ndo_xdp_xmit        = mvneta_xdp_xmit,
 };
 
 static const struct ethtool_ops mvneta_eth_tool_ops = {
</pre></body></html>