13
13
#include <linux/if_vlan.h>
14
14
#include <linux/iommu.h>
15
15
#include <net/ip.h>
16
+ #include <linux/bpf.h>
17
+ #include <linux/bpf_trace.h>
16
18
17
19
#include "otx2_reg.h"
18
20
#include "otx2_common.h"
@@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
48
50
49
51
static int otx2_change_mtu (struct net_device * netdev , int new_mtu )
50
52
{
53
+ struct otx2_nic * pf = netdev_priv (netdev );
51
54
bool if_up = netif_running (netdev );
52
55
int err = 0 ;
53
56
57
+ if (pf -> xdp_prog && new_mtu > MAX_XDP_MTU ) {
58
+ netdev_warn (netdev , "Jumbo frames not yet supported with XDP, current MTU %d.\n" ,
59
+ netdev -> mtu );
60
+ return - EINVAL ;
61
+ }
54
62
if (if_up )
55
63
otx2_stop (netdev );
56
64
@@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1180
1188
}
1181
1189
1182
1190
/* SQ */
1183
- for (qidx = 0 ; qidx < pf -> hw .tx_queues ; qidx ++ ) {
1191
+ for (qidx = 0 ; qidx < pf -> hw .tot_tx_queues ; qidx ++ ) {
1184
1192
ptr = otx2_get_regaddr (pf , NIX_LF_SQ_OP_INT );
1185
1193
val = otx2_atomic64_add ((qidx << 44 ), ptr );
1186
1194
otx2_write64 (pf , NIX_LF_SQ_OP_INT , (qidx << 44 ) |
@@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
1283
1291
otx2_ctx_disable (& pf -> mbox , NIX_AQ_CTYPE_SQ , false);
1284
1292
/* Free SQB pointers */
1285
1293
otx2_sq_free_sqbs (pf );
1286
- for (qidx = 0 ; qidx < pf -> hw .tx_queues ; qidx ++ ) {
1294
+ for (qidx = 0 ; qidx < pf -> hw .tot_tx_queues ; qidx ++ ) {
1287
1295
sq = & qset -> sq [qidx ];
1288
1296
qmem_free (pf -> dev , sq -> sqe );
1289
1297
qmem_free (pf -> dev , sq -> tso_hdrs );
@@ -1332,7 +1340,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
1332
1340
* so, aura count = pool count.
1333
1341
*/
1334
1342
hw -> rqpool_cnt = hw -> rx_queues ;
1335
- hw -> sqpool_cnt = hw -> tx_queues ;
1343
+ hw -> sqpool_cnt = hw -> tot_tx_queues ;
1336
1344
hw -> pool_cnt = hw -> rqpool_cnt + hw -> sqpool_cnt ;
1337
1345
1338
1346
pf -> max_frs = pf -> netdev -> mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN ;
@@ -1541,7 +1549,7 @@ int otx2_open(struct net_device *netdev)
1541
1549
1542
1550
netif_carrier_off (netdev );
1543
1551
1544
- pf -> qset .cq_cnt = pf -> hw .rx_queues + pf -> hw .tx_queues ;
1552
+ pf -> qset .cq_cnt = pf -> hw .rx_queues + pf -> hw .tot_tx_queues ;
1545
1553
/* RQ and SQs are mapped to different CQs,
1546
1554
* so find out max CQ IRQs (i.e CINTs) needed.
1547
1555
*/
@@ -1561,7 +1569,7 @@ int otx2_open(struct net_device *netdev)
1561
1569
if (!qset -> cq )
1562
1570
goto err_free_mem ;
1563
1571
1564
- qset -> sq = kcalloc (pf -> hw .tx_queues ,
1572
+ qset -> sq = kcalloc (pf -> hw .tot_tx_queues ,
1565
1573
sizeof (struct otx2_snd_queue ), GFP_KERNEL );
1566
1574
if (!qset -> sq )
1567
1575
goto err_free_mem ;
@@ -1582,11 +1590,20 @@ int otx2_open(struct net_device *netdev)
1582
1590
/* RQ0 & SQ0 are mapped to CINT0 and so on..
1583
1591
* 'cq_ids[0]' points to RQ's CQ and
1584
1592
* 'cq_ids[1]' points to SQ's CQ and
1593
+ * 'cq_ids[2]' points to XDP's CQ and
1585
1594
*/
1586
1595
cq_poll -> cq_ids [CQ_RX ] =
1587
1596
(qidx < pf -> hw .rx_queues ) ? qidx : CINT_INVALID_CQ ;
1588
1597
cq_poll -> cq_ids [CQ_TX ] = (qidx < pf -> hw .tx_queues ) ?
1589
1598
qidx + pf -> hw .rx_queues : CINT_INVALID_CQ ;
1599
+ if (pf -> xdp_prog )
1600
+ cq_poll -> cq_ids [CQ_XDP ] = (qidx < pf -> hw .xdp_queues ) ?
1601
+ (qidx + pf -> hw .rx_queues +
1602
+ pf -> hw .tx_queues ) :
1603
+ CINT_INVALID_CQ ;
1604
+ else
1605
+ cq_poll -> cq_ids [CQ_XDP ] = CINT_INVALID_CQ ;
1606
+
1590
1607
cq_poll -> dev = (void * )pf ;
1591
1608
netif_napi_add (netdev , & cq_poll -> napi ,
1592
1609
otx2_napi_handler , NAPI_POLL_WEIGHT );
@@ -2291,6 +2308,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
2291
2308
return 0 ;
2292
2309
}
2293
2310
2311
+ static int otx2_xdp_xmit_tx (struct otx2_nic * pf , struct xdp_frame * xdpf ,
2312
+ int qidx )
2313
+ {
2314
+ struct page * page ;
2315
+ u64 dma_addr ;
2316
+ int err = 0 ;
2317
+
2318
+ dma_addr = otx2_dma_map_page (pf , virt_to_page (xdpf -> data ),
2319
+ offset_in_page (xdpf -> data ), xdpf -> len ,
2320
+ DMA_TO_DEVICE );
2321
+ if (dma_mapping_error (pf -> dev , dma_addr ))
2322
+ return - ENOMEM ;
2323
+
2324
+ err = otx2_xdp_sq_append_pkt (pf , dma_addr , xdpf -> len , qidx );
2325
+ if (!err ) {
2326
+ otx2_dma_unmap_page (pf , dma_addr , xdpf -> len , DMA_TO_DEVICE );
2327
+ page = virt_to_page (xdpf -> data );
2328
+ put_page (page );
2329
+ return - ENOMEM ;
2330
+ }
2331
+ return 0 ;
2332
+ }
2333
+
2334
+ static int otx2_xdp_xmit (struct net_device * netdev , int n ,
2335
+ struct xdp_frame * * frames , u32 flags )
2336
+ {
2337
+ struct otx2_nic * pf = netdev_priv (netdev );
2338
+ int qidx = smp_processor_id ();
2339
+ struct otx2_snd_queue * sq ;
2340
+ int drops = 0 , i ;
2341
+
2342
+ if (!netif_running (netdev ))
2343
+ return - ENETDOWN ;
2344
+
2345
+ qidx += pf -> hw .tx_queues ;
2346
+ sq = pf -> xdp_prog ? & pf -> qset .sq [qidx ] : NULL ;
2347
+
2348
+ /* Abort xmit if xdp queue is not */
2349
+ if (unlikely (!sq ))
2350
+ return - ENXIO ;
2351
+
2352
+ if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
2353
+ return - EINVAL ;
2354
+
2355
+ for (i = 0 ; i < n ; i ++ ) {
2356
+ struct xdp_frame * xdpf = frames [i ];
2357
+ int err ;
2358
+
2359
+ err = otx2_xdp_xmit_tx (pf , xdpf , qidx );
2360
+ if (err )
2361
+ drops ++ ;
2362
+ }
2363
+ return n - drops ;
2364
+ }
2365
+
2366
+ static int otx2_xdp_setup (struct otx2_nic * pf , struct bpf_prog * prog )
2367
+ {
2368
+ struct net_device * dev = pf -> netdev ;
2369
+ bool if_up = netif_running (pf -> netdev );
2370
+ struct bpf_prog * old_prog ;
2371
+
2372
+ if (prog && dev -> mtu > MAX_XDP_MTU ) {
2373
+ netdev_warn (dev , "Jumbo frames not yet supported with XDP\n" );
2374
+ return - EOPNOTSUPP ;
2375
+ }
2376
+
2377
+ if (if_up )
2378
+ otx2_stop (pf -> netdev );
2379
+
2380
+ old_prog = xchg (& pf -> xdp_prog , prog );
2381
+
2382
+ if (old_prog )
2383
+ bpf_prog_put (old_prog );
2384
+
2385
+ if (pf -> xdp_prog )
2386
+ bpf_prog_add (pf -> xdp_prog , pf -> hw .rx_queues - 1 );
2387
+
2388
+ /* Network stack and XDP shared same rx queues.
2389
+ * Use separate tx queues for XDP and network stack.
2390
+ */
2391
+ if (pf -> xdp_prog )
2392
+ pf -> hw .xdp_queues = pf -> hw .rx_queues ;
2393
+ else
2394
+ pf -> hw .xdp_queues = 0 ;
2395
+
2396
+ pf -> hw .tot_tx_queues += pf -> hw .xdp_queues ;
2397
+
2398
+ if (if_up )
2399
+ otx2_open (pf -> netdev );
2400
+
2401
+ return 0 ;
2402
+ }
2403
+
2404
+ static int otx2_xdp (struct net_device * netdev , struct netdev_bpf * xdp )
2405
+ {
2406
+ struct otx2_nic * pf = netdev_priv (netdev );
2407
+
2408
+ switch (xdp -> command ) {
2409
+ case XDP_SETUP_PROG :
2410
+ return otx2_xdp_setup (pf , xdp -> prog );
2411
+ default :
2412
+ return - EINVAL ;
2413
+ }
2414
+ }
2415
+
2294
2416
static int otx2_set_vf_permissions (struct otx2_nic * pf , int vf ,
2295
2417
int req_perm )
2296
2418
{
@@ -2358,6 +2480,8 @@ static const struct net_device_ops otx2_netdev_ops = {
2358
2480
.ndo_set_vf_mac = otx2_set_vf_mac ,
2359
2481
.ndo_set_vf_vlan = otx2_set_vf_vlan ,
2360
2482
.ndo_get_vf_config = otx2_get_vf_config ,
2483
+ .ndo_bpf = otx2_xdp ,
2484
+ .ndo_xdp_xmit = otx2_xdp_xmit ,
2361
2485
.ndo_setup_tc = otx2_setup_tc ,
2362
2486
.ndo_set_vf_trust = otx2_ndo_set_vf_trust ,
2363
2487
};
@@ -2499,6 +2623,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2499
2623
hw -> pdev = pdev ;
2500
2624
hw -> rx_queues = qcount ;
2501
2625
hw -> tx_queues = qcount ;
2626
+ hw -> tot_tx_queues = qcount ;
2502
2627
hw -> max_queues = qcount ;
2503
2628
2504
2629
num_vec = pci_msix_vec_count (pdev );
0 commit comments