43
43
#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
44
44
ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
45
45
46
+ #define TSNEP_TX_TYPE_SKB BIT(0)
47
+ #define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
48
+ #define TSNEP_TX_TYPE_XDP_TX BIT(2)
49
+ #define TSNEP_TX_TYPE_XDP_NDO BIT(3)
50
+
46
51
static void tsnep_enable_irq (struct tsnep_adapter * adapter , u32 mask )
47
52
{
48
53
iowrite32 (mask , adapter -> addr + ECM_INT_ENABLE );
@@ -306,10 +311,12 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
306
311
struct tsnep_tx_entry * entry = & tx -> entry [index ];
307
312
308
313
entry -> properties = 0 ;
314
+ /* xdpf is union with skb */
309
315
if (entry -> skb ) {
310
316
entry -> properties = length & TSNEP_DESC_LENGTH_MASK ;
311
317
entry -> properties |= TSNEP_DESC_INTERRUPT_FLAG ;
312
- if (skb_shinfo (entry -> skb )-> tx_flags & SKBTX_IN_PROGRESS )
318
+ if ((entry -> type & TSNEP_TX_TYPE_SKB ) &&
319
+ (skb_shinfo (entry -> skb )-> tx_flags & SKBTX_IN_PROGRESS ))
313
320
entry -> properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG ;
314
321
315
322
/* toggle user flag to prevent false acknowledge
@@ -378,15 +385,19 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
378
385
for (i = 0 ; i < count ; i ++ ) {
379
386
entry = & tx -> entry [(tx -> write + i ) % TSNEP_RING_SIZE ];
380
387
381
- if (i == 0 ) {
388
+ if (! i ) {
382
389
len = skb_headlen (skb );
383
390
dma = dma_map_single (dmadev , skb -> data , len ,
384
391
DMA_TO_DEVICE );
392
+
393
+ entry -> type = TSNEP_TX_TYPE_SKB ;
385
394
} else {
386
395
len = skb_frag_size (& skb_shinfo (skb )-> frags [i - 1 ]);
387
396
dma = skb_frag_dma_map (dmadev ,
388
397
& skb_shinfo (skb )-> frags [i - 1 ],
389
398
0 , len , DMA_TO_DEVICE );
399
+
400
+ entry -> type = TSNEP_TX_TYPE_SKB_FRAG ;
390
401
}
391
402
if (dma_mapping_error (dmadev , dma ))
392
403
return - ENOMEM ;
@@ -413,12 +424,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
413
424
entry = & tx -> entry [(index + i ) % TSNEP_RING_SIZE ];
414
425
415
426
if (entry -> len ) {
416
- if (i == 0 )
427
+ if (entry -> type & TSNEP_TX_TYPE_SKB )
417
428
dma_unmap_single (dmadev ,
418
429
dma_unmap_addr (entry , dma ),
419
430
dma_unmap_len (entry , len ),
420
431
DMA_TO_DEVICE );
421
- else
432
+ else if (entry -> type &
433
+ (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO ))
422
434
dma_unmap_page (dmadev ,
423
435
dma_unmap_addr (entry , dma ),
424
436
dma_unmap_len (entry , len ),
@@ -472,7 +484,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
472
484
473
485
for (i = 0 ; i < count ; i ++ )
474
486
tsnep_tx_activate (tx , (tx -> write + i ) % TSNEP_RING_SIZE , length ,
475
- i == ( count - 1 ) );
487
+ i == count - 1 );
476
488
tx -> write = (tx -> write + count ) % TSNEP_RING_SIZE ;
477
489
478
490
skb_tx_timestamp (skb );
@@ -490,6 +502,110 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
490
502
return NETDEV_TX_OK ;
491
503
}
492
504
505
+ static int tsnep_xdp_tx_map (struct xdp_frame * xdpf , struct tsnep_tx * tx ,
506
+ struct skb_shared_info * shinfo , int count , u32 type )
507
+ {
508
+ struct device * dmadev = tx -> adapter -> dmadev ;
509
+ struct tsnep_tx_entry * entry ;
510
+ struct page * page ;
511
+ skb_frag_t * frag ;
512
+ unsigned int len ;
513
+ int map_len = 0 ;
514
+ dma_addr_t dma ;
515
+ void * data ;
516
+ int i ;
517
+
518
+ frag = NULL ;
519
+ len = xdpf -> len ;
520
+ for (i = 0 ; i < count ; i ++ ) {
521
+ entry = & tx -> entry [(tx -> write + i ) % TSNEP_RING_SIZE ];
522
+ if (type & TSNEP_TX_TYPE_XDP_NDO ) {
523
+ data = unlikely (frag ) ? skb_frag_address (frag ) :
524
+ xdpf -> data ;
525
+ dma = dma_map_single (dmadev , data , len , DMA_TO_DEVICE );
526
+ if (dma_mapping_error (dmadev , dma ))
527
+ return - ENOMEM ;
528
+
529
+ entry -> type = TSNEP_TX_TYPE_XDP_NDO ;
530
+ } else {
531
+ page = unlikely (frag ) ? skb_frag_page (frag ) :
532
+ virt_to_page (xdpf -> data );
533
+ dma = page_pool_get_dma_addr (page );
534
+ if (unlikely (frag ))
535
+ dma += skb_frag_off (frag );
536
+ else
537
+ dma += sizeof (* xdpf ) + xdpf -> headroom ;
538
+ dma_sync_single_for_device (dmadev , dma , len ,
539
+ DMA_BIDIRECTIONAL );
540
+
541
+ entry -> type = TSNEP_TX_TYPE_XDP_TX ;
542
+ }
543
+
544
+ entry -> len = len ;
545
+ dma_unmap_addr_set (entry , dma , dma );
546
+
547
+ entry -> desc -> tx = __cpu_to_le64 (dma );
548
+
549
+ map_len += len ;
550
+
551
+ if (i + 1 < count ) {
552
+ frag = & shinfo -> frags [i ];
553
+ len = skb_frag_size (frag );
554
+ }
555
+ }
556
+
557
+ return map_len ;
558
+ }
559
+
560
+ /* This function requires __netif_tx_lock is held by the caller. */
561
+ static bool tsnep_xdp_xmit_frame_ring (struct xdp_frame * xdpf ,
562
+ struct tsnep_tx * tx , u32 type )
563
+ {
564
+ struct skb_shared_info * shinfo = xdp_get_shared_info_from_frame (xdpf );
565
+ struct tsnep_tx_entry * entry ;
566
+ int count , length , retval , i ;
567
+
568
+ count = 1 ;
569
+ if (unlikely (xdp_frame_has_frags (xdpf )))
570
+ count += shinfo -> nr_frags ;
571
+
572
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
573
+ * will be available for normal TX path and queue is stopped there if
574
+ * necessary
575
+ */
576
+ if (tsnep_tx_desc_available (tx ) < (MAX_SKB_FRAGS + 1 + count ))
577
+ return false;
578
+
579
+ entry = & tx -> entry [tx -> write ];
580
+ entry -> xdpf = xdpf ;
581
+
582
+ retval = tsnep_xdp_tx_map (xdpf , tx , shinfo , count , type );
583
+ if (retval < 0 ) {
584
+ tsnep_tx_unmap (tx , tx -> write , count );
585
+ entry -> xdpf = NULL ;
586
+
587
+ tx -> dropped ++ ;
588
+
589
+ return false;
590
+ }
591
+ length = retval ;
592
+
593
+ for (i = 0 ; i < count ; i ++ )
594
+ tsnep_tx_activate (tx , (tx -> write + i ) % TSNEP_RING_SIZE , length ,
595
+ i == count - 1 );
596
+ tx -> write = (tx -> write + count ) % TSNEP_RING_SIZE ;
597
+
598
+ /* descriptor properties shall be valid before hardware is notified */
599
+ dma_wmb ();
600
+
601
+ return true;
602
+ }
603
+
604
+ static void tsnep_xdp_xmit_flush (struct tsnep_tx * tx )
605
+ {
606
+ iowrite32 (TSNEP_CONTROL_TX_ENABLE , tx -> addr + TSNEP_CONTROL );
607
+ }
608
+
493
609
static bool tsnep_tx_poll (struct tsnep_tx * tx , int napi_budget )
494
610
{
495
611
struct tsnep_tx_entry * entry ;
@@ -517,12 +633,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
517
633
dma_rmb ();
518
634
519
635
count = 1 ;
520
- if (skb_shinfo (entry -> skb )-> nr_frags > 0 )
636
+ if ((entry -> type & TSNEP_TX_TYPE_SKB ) &&
637
+ skb_shinfo (entry -> skb )-> nr_frags > 0 )
521
638
count += skb_shinfo (entry -> skb )-> nr_frags ;
639
+ else if (!(entry -> type & TSNEP_TX_TYPE_SKB ) &&
640
+ xdp_frame_has_frags (entry -> xdpf ))
641
+ count += xdp_get_shared_info_from_frame (entry -> xdpf )-> nr_frags ;
522
642
523
643
length = tsnep_tx_unmap (tx , tx -> read , count );
524
644
525
- if ((skb_shinfo (entry -> skb )-> tx_flags & SKBTX_IN_PROGRESS ) &&
645
+ if ((entry -> type & TSNEP_TX_TYPE_SKB ) &&
646
+ (skb_shinfo (entry -> skb )-> tx_flags & SKBTX_IN_PROGRESS ) &&
526
647
(__le32_to_cpu (entry -> desc_wb -> properties ) &
527
648
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG )) {
528
649
struct skb_shared_hwtstamps hwtstamps ;
@@ -542,7 +663,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
542
663
skb_tstamp_tx (entry -> skb , & hwtstamps );
543
664
}
544
665
545
- napi_consume_skb (entry -> skb , napi_budget );
666
+ if (entry -> type & TSNEP_TX_TYPE_SKB )
667
+ napi_consume_skb (entry -> skb , napi_budget );
668
+ else
669
+ xdp_return_frame_rx_napi (entry -> xdpf );
670
+ /* xdpf is union with skb */
546
671
entry -> skb = NULL ;
547
672
548
673
tx -> read = (tx -> read + count ) % TSNEP_RING_SIZE ;
@@ -560,7 +685,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
560
685
561
686
__netif_tx_unlock (nq );
562
687
563
- return ( budget != 0 ) ;
688
+ return budget != 0 ;
564
689
}
565
690
566
691
static bool tsnep_tx_pending (struct tsnep_tx * tx )
@@ -1316,6 +1441,55 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
1316
1441
return ns_to_ktime (timestamp );
1317
1442
}
1318
1443
1444
+ static struct tsnep_tx * tsnep_xdp_get_tx (struct tsnep_adapter * adapter , u32 cpu )
1445
+ {
1446
+ if (cpu >= TSNEP_MAX_QUEUES )
1447
+ cpu &= TSNEP_MAX_QUEUES - 1 ;
1448
+
1449
+ while (cpu >= adapter -> num_tx_queues )
1450
+ cpu -= adapter -> num_tx_queues ;
1451
+
1452
+ return & adapter -> tx [cpu ];
1453
+ }
1454
+
1455
+ static int tsnep_netdev_xdp_xmit (struct net_device * dev , int n ,
1456
+ struct xdp_frame * * xdp , u32 flags )
1457
+ {
1458
+ struct tsnep_adapter * adapter = netdev_priv (dev );
1459
+ u32 cpu = smp_processor_id ();
1460
+ struct netdev_queue * nq ;
1461
+ struct tsnep_tx * tx ;
1462
+ int nxmit ;
1463
+ bool xmit ;
1464
+
1465
+ if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
1466
+ return - EINVAL ;
1467
+
1468
+ tx = tsnep_xdp_get_tx (adapter , cpu );
1469
+ nq = netdev_get_tx_queue (adapter -> netdev , tx -> queue_index );
1470
+
1471
+ __netif_tx_lock (nq , cpu );
1472
+
1473
+ for (nxmit = 0 ; nxmit < n ; nxmit ++ ) {
1474
+ xmit = tsnep_xdp_xmit_frame_ring (xdp [nxmit ], tx ,
1475
+ TSNEP_TX_TYPE_XDP_NDO );
1476
+ if (!xmit )
1477
+ break ;
1478
+
1479
+ /* avoid transmit queue timeout since we share it with the slow
1480
+ * path
1481
+ */
1482
+ txq_trans_cond_update (nq );
1483
+ }
1484
+
1485
+ if (flags & XDP_XMIT_FLUSH )
1486
+ tsnep_xdp_xmit_flush (tx );
1487
+
1488
+ __netif_tx_unlock (nq );
1489
+
1490
+ return nxmit ;
1491
+ }
1492
+
1319
1493
static const struct net_device_ops tsnep_netdev_ops = {
1320
1494
.ndo_open = tsnep_netdev_open ,
1321
1495
.ndo_stop = tsnep_netdev_close ,
@@ -1327,6 +1501,7 @@ static const struct net_device_ops tsnep_netdev_ops = {
1327
1501
.ndo_set_features = tsnep_netdev_set_features ,
1328
1502
.ndo_get_tstamp = tsnep_netdev_get_tstamp ,
1329
1503
.ndo_setup_tc = tsnep_tc_setup ,
1504
+ .ndo_xdp_xmit = tsnep_netdev_xdp_xmit ,
1330
1505
};
1331
1506
1332
1507
static int tsnep_mac_init (struct tsnep_adapter * adapter )
0 commit comments