Skip to content

Commit 06059a1

Browse files
Geetha sowjanyadavem330
Geetha sowjanya
authored andcommitted
octeontx2-pf: Add XDP support to netdev PF
Adds XDP_PASS, XDP_TX, XDP_DROP and XDP_REDIRECT support for netdev PF. Signed-off-by: Geetha sowjanya <[email protected]> Signed-off-by: Sunil Goutham <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 85212a1 commit 06059a1

File tree

6 files changed

+322
-34
lines changed

6 files changed

+322
-34
lines changed

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c

+22-13
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
718718
int timeout = 1000;
719719

720720
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
721-
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
721+
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
722722
incr = (u64)qidx << 32;
723723
while (timeout) {
724724
val = otx2_atomic64_add(incr, ptr);
@@ -835,17 +835,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
835835
if (err)
836836
return err;
837837

838-
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
839-
TSO_HEADER_SIZE);
840-
if (err)
841-
return err;
838+
if (qidx < pfvf->hw.tx_queues) {
839+
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
840+
TSO_HEADER_SIZE);
841+
if (err)
842+
return err;
843+
}
842844

843845
sq->sqe_base = sq->sqe->base;
844846
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
845847
if (!sq->sg)
846848
return -ENOMEM;
847849

848-
if (pfvf->ptp) {
850+
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
849851
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
850852
sizeof(*sq->timestamps));
851853
if (err)
@@ -871,20 +873,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
871873
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
872874
{
873875
struct otx2_qset *qset = &pfvf->qset;
876+
int err, pool_id, non_xdp_queues;
874877
struct nix_aq_enq_req *aq;
875878
struct otx2_cq_queue *cq;
876-
int err, pool_id;
877879

878880
cq = &qset->cq[qidx];
879881
cq->cq_idx = qidx;
882+
non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
880883
if (qidx < pfvf->hw.rx_queues) {
881884
cq->cq_type = CQ_RX;
882885
cq->cint_idx = qidx;
883886
cq->cqe_cnt = qset->rqe_cnt;
884-
} else {
887+
if (pfvf->xdp_prog)
888+
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
889+
} else if (qidx < non_xdp_queues) {
885890
cq->cq_type = CQ_TX;
886891
cq->cint_idx = qidx - pfvf->hw.rx_queues;
887892
cq->cqe_cnt = qset->sqe_cnt;
893+
} else {
894+
cq->cq_type = CQ_XDP;
895+
cq->cint_idx = qidx - non_xdp_queues;
896+
cq->cqe_cnt = qset->sqe_cnt;
888897
}
889898
cq->cqe_size = pfvf->qset.xqe_size;
890899

@@ -991,7 +1000,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
9911000
}
9921001

9931002
/* Initialize TX queues */
994-
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
1003+
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
9951004
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
9961005

9971006
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -1038,7 +1047,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
10381047

10391048
/* Set RQ/SQ/CQ counts */
10401049
nixlf->rq_cnt = pfvf->hw.rx_queues;
1041-
nixlf->sq_cnt = pfvf->hw.tx_queues;
1050+
nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
10421051
nixlf->cq_cnt = pfvf->qset.cq_cnt;
10431052
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
10441053
nixlf->rss_grps = MAX_RSS_GROUPS;
@@ -1076,7 +1085,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
10761085
int sqb, qidx;
10771086
u64 iova, pa;
10781087

1079-
for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1088+
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
10801089
sq = &qset->sq[qidx];
10811090
if (!sq->sqb_ptrs)
10821091
continue;
@@ -1288,7 +1297,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
12881297
stack_pages =
12891298
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
12901299

1291-
for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1300+
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
12921301
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
12931302
/* Initialize aura context */
12941303
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1308,7 +1317,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
13081317
goto fail;
13091318

13101319
/* Allocate pointers and free them to aura/pool */
1311-
for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1320+
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
13121321
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
13131322
pool = &pfvf->qset.pool[pool_id];
13141323

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h

+4
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,8 @@ struct otx2_hw {
171171
struct otx2_rss_info rss_info;
172172
u16 rx_queues;
173173
u16 tx_queues;
174+
u16 xdp_queues;
175+
u16 tot_tx_queues;
174176
u16 max_queues;
175177
u16 pool_cnt;
176178
u16 rqpool_cnt;
@@ -345,6 +347,7 @@ struct otx2_nic {
345347
u64 flags;
346348
u64 *cq_op_addr;
347349

350+
struct bpf_prog *xdp_prog;
348351
struct otx2_qset qset;
349352
struct otx2_hw hw;
350353
struct pci_dev *pdev;
@@ -857,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
857860
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
858861
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
859862
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
863+
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
860864
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
861865
/* tc support */
862866
int otx2_init_tc(struct otx2_nic *nic);

drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c

+130-5
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@
1313
#include <linux/if_vlan.h>
1414
#include <linux/iommu.h>
1515
#include <net/ip.h>
16+
#include <linux/bpf.h>
17+
#include <linux/bpf_trace.h>
1618

1719
#include "otx2_reg.h"
1820
#include "otx2_common.h"
@@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
4850

4951
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
5052
{
53+
struct otx2_nic *pf = netdev_priv(netdev);
5154
bool if_up = netif_running(netdev);
5255
int err = 0;
5356

57+
if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
58+
netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
59+
netdev->mtu);
60+
return -EINVAL;
61+
}
5462
if (if_up)
5563
otx2_stop(netdev);
5664

@@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
11801188
}
11811189

11821190
/* SQ */
1183-
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1191+
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
11841192
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
11851193
val = otx2_atomic64_add((qidx << 44), ptr);
11861194
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
12831291
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
12841292
/* Free SQB pointers */
12851293
otx2_sq_free_sqbs(pf);
1286-
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1294+
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
12871295
sq = &qset->sq[qidx];
12881296
qmem_free(pf->dev, sq->sqe);
12891297
qmem_free(pf->dev, sq->tso_hdrs);
@@ -1332,7 +1340,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
13321340
* so, aura count = pool count.
13331341
*/
13341342
hw->rqpool_cnt = hw->rx_queues;
1335-
hw->sqpool_cnt = hw->tx_queues;
1343+
hw->sqpool_cnt = hw->tot_tx_queues;
13361344
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
13371345

13381346
pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
@@ -1541,7 +1549,7 @@ int otx2_open(struct net_device *netdev)
15411549

15421550
netif_carrier_off(netdev);
15431551

1544-
pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
1552+
pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
15451553
/* RQ and SQs are mapped to different CQs,
15461554
* so find out max CQ IRQs (i.e CINTs) needed.
15471555
*/
@@ -1561,7 +1569,7 @@ int otx2_open(struct net_device *netdev)
15611569
if (!qset->cq)
15621570
goto err_free_mem;
15631571

1564-
qset->sq = kcalloc(pf->hw.tx_queues,
1572+
qset->sq = kcalloc(pf->hw.tot_tx_queues,
15651573
sizeof(struct otx2_snd_queue), GFP_KERNEL);
15661574
if (!qset->sq)
15671575
goto err_free_mem;
@@ -1582,11 +1590,20 @@ int otx2_open(struct net_device *netdev)
15821590
/* RQ0 & SQ0 are mapped to CINT0 and so on..
15831591
* 'cq_ids[0]' points to RQ's CQ and
15841592
* 'cq_ids[1]' points to SQ's CQ and
1593+
* 'cq_ids[2]' points to XDP's CQ and
15851594
*/
15861595
cq_poll->cq_ids[CQ_RX] =
15871596
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
15881597
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
15891598
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1599+
if (pf->xdp_prog)
1600+
cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
1601+
(qidx + pf->hw.rx_queues +
1602+
pf->hw.tx_queues) :
1603+
CINT_INVALID_CQ;
1604+
else
1605+
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
1606+
15901607
cq_poll->dev = (void *)pf;
15911608
netif_napi_add(netdev, &cq_poll->napi,
15921609
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -2291,6 +2308,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
22912308
return 0;
22922309
}
22932310

2311+
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
2312+
int qidx)
2313+
{
2314+
struct page *page;
2315+
u64 dma_addr;
2316+
int err = 0;
2317+
2318+
dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
2319+
offset_in_page(xdpf->data), xdpf->len,
2320+
DMA_TO_DEVICE);
2321+
if (dma_mapping_error(pf->dev, dma_addr))
2322+
return -ENOMEM;
2323+
2324+
err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
2325+
if (!err) {
2326+
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
2327+
page = virt_to_page(xdpf->data);
2328+
put_page(page);
2329+
return -ENOMEM;
2330+
}
2331+
return 0;
2332+
}
2333+
2334+
static int otx2_xdp_xmit(struct net_device *netdev, int n,
2335+
struct xdp_frame **frames, u32 flags)
2336+
{
2337+
struct otx2_nic *pf = netdev_priv(netdev);
2338+
int qidx = smp_processor_id();
2339+
struct otx2_snd_queue *sq;
2340+
int drops = 0, i;
2341+
2342+
if (!netif_running(netdev))
2343+
return -ENETDOWN;
2344+
2345+
qidx += pf->hw.tx_queues;
2346+
sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
2347+
2348+
/* Abort xmit if xdp queue is not */
2349+
if (unlikely(!sq))
2350+
return -ENXIO;
2351+
2352+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2353+
return -EINVAL;
2354+
2355+
for (i = 0; i < n; i++) {
2356+
struct xdp_frame *xdpf = frames[i];
2357+
int err;
2358+
2359+
err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
2360+
if (err)
2361+
drops++;
2362+
}
2363+
return n - drops;
2364+
}
2365+
2366+
static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
2367+
{
2368+
struct net_device *dev = pf->netdev;
2369+
bool if_up = netif_running(pf->netdev);
2370+
struct bpf_prog *old_prog;
2371+
2372+
if (prog && dev->mtu > MAX_XDP_MTU) {
2373+
netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
2374+
return -EOPNOTSUPP;
2375+
}
2376+
2377+
if (if_up)
2378+
otx2_stop(pf->netdev);
2379+
2380+
old_prog = xchg(&pf->xdp_prog, prog);
2381+
2382+
if (old_prog)
2383+
bpf_prog_put(old_prog);
2384+
2385+
if (pf->xdp_prog)
2386+
bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
2387+
2388+
/* Network stack and XDP shared same rx queues.
2389+
* Use separate tx queues for XDP and network stack.
2390+
*/
2391+
if (pf->xdp_prog)
2392+
pf->hw.xdp_queues = pf->hw.rx_queues;
2393+
else
2394+
pf->hw.xdp_queues = 0;
2395+
2396+
pf->hw.tot_tx_queues += pf->hw.xdp_queues;
2397+
2398+
if (if_up)
2399+
otx2_open(pf->netdev);
2400+
2401+
return 0;
2402+
}
2403+
2404+
static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2405+
{
2406+
struct otx2_nic *pf = netdev_priv(netdev);
2407+
2408+
switch (xdp->command) {
2409+
case XDP_SETUP_PROG:
2410+
return otx2_xdp_setup(pf, xdp->prog);
2411+
default:
2412+
return -EINVAL;
2413+
}
2414+
}
2415+
22942416
static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
22952417
int req_perm)
22962418
{
@@ -2358,6 +2480,8 @@ static const struct net_device_ops otx2_netdev_ops = {
23582480
.ndo_set_vf_mac = otx2_set_vf_mac,
23592481
.ndo_set_vf_vlan = otx2_set_vf_vlan,
23602482
.ndo_get_vf_config = otx2_get_vf_config,
2483+
.ndo_bpf = otx2_xdp,
2484+
.ndo_xdp_xmit = otx2_xdp_xmit,
23612485
.ndo_setup_tc = otx2_setup_tc,
23622486
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
23632487
};
@@ -2499,6 +2623,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
24992623
hw->pdev = pdev;
25002624
hw->rx_queues = qcount;
25012625
hw->tx_queues = qcount;
2626+
hw->tot_tx_queues = qcount;
25022627
hw->max_queues = qcount;
25032628

25042629
num_vec = pci_msix_vec_count(pdev);

0 commit comments

Comments
 (0)