Skip to content

Commit d24bc0b

Browse files
Gerhard Englederdavem330
Gerhard Engleder
authored andcommitted
tsnep: Add XDP TX support
Implement ndo_xdp_xmit() for XDP TX support. Support for fragmented XDP frames is included. Also some braces and logic cleanups are done in normal TX path to keep both TX paths in sync. Signed-off-by: Gerhard Engleder <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 95337b9 commit d24bc0b

File tree

2 files changed

+189
-10
lines changed

2 files changed

+189
-10
lines changed

drivers/net/ethernet/engleder/tsnep.h

+5-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,11 @@ struct tsnep_tx_entry {
6565

6666
u32 properties;
6767

68-
struct sk_buff *skb;
68+
u32 type;
69+
union {
70+
struct sk_buff *skb;
71+
struct xdp_frame *xdpf;
72+
};
6973
size_t len;
7074
DEFINE_DMA_UNMAP_ADDR(dma);
7175
};

drivers/net/ethernet/engleder/tsnep_main.c

+184-9
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,11 @@
4343
#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
4444
ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
4545

46+
#define TSNEP_TX_TYPE_SKB BIT(0)
47+
#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
48+
#define TSNEP_TX_TYPE_XDP_TX BIT(2)
49+
#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
50+
4651
static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
4752
{
4853
iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
@@ -306,10 +311,12 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
306311
struct tsnep_tx_entry *entry = &tx->entry[index];
307312

308313
entry->properties = 0;
314+
/* xdpf is union with skb */
309315
if (entry->skb) {
310316
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
311317
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
312-
if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
318+
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
319+
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
313320
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
314321

315322
/* toggle user flag to prevent false acknowledge
@@ -378,15 +385,19 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
378385
for (i = 0; i < count; i++) {
379386
entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
380387

381-
if (i == 0) {
388+
if (!i) {
382389
len = skb_headlen(skb);
383390
dma = dma_map_single(dmadev, skb->data, len,
384391
DMA_TO_DEVICE);
392+
393+
entry->type = TSNEP_TX_TYPE_SKB;
385394
} else {
386395
len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
387396
dma = skb_frag_dma_map(dmadev,
388397
&skb_shinfo(skb)->frags[i - 1],
389398
0, len, DMA_TO_DEVICE);
399+
400+
entry->type = TSNEP_TX_TYPE_SKB_FRAG;
390401
}
391402
if (dma_mapping_error(dmadev, dma))
392403
return -ENOMEM;
@@ -413,12 +424,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
413424
entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
414425

415426
if (entry->len) {
416-
if (i == 0)
427+
if (entry->type & TSNEP_TX_TYPE_SKB)
417428
dma_unmap_single(dmadev,
418429
dma_unmap_addr(entry, dma),
419430
dma_unmap_len(entry, len),
420431
DMA_TO_DEVICE);
421-
else
432+
else if (entry->type &
433+
(TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
422434
dma_unmap_page(dmadev,
423435
dma_unmap_addr(entry, dma),
424436
dma_unmap_len(entry, len),
@@ -472,7 +484,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
472484

473485
for (i = 0; i < count; i++)
474486
tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
475-
i == (count - 1));
487+
i == count - 1);
476488
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
477489

478490
skb_tx_timestamp(skb);
@@ -490,6 +502,110 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
490502
return NETDEV_TX_OK;
491503
}
492504

505+
static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
506+
struct skb_shared_info *shinfo, int count, u32 type)
507+
{
508+
struct device *dmadev = tx->adapter->dmadev;
509+
struct tsnep_tx_entry *entry;
510+
struct page *page;
511+
skb_frag_t *frag;
512+
unsigned int len;
513+
int map_len = 0;
514+
dma_addr_t dma;
515+
void *data;
516+
int i;
517+
518+
frag = NULL;
519+
len = xdpf->len;
520+
for (i = 0; i < count; i++) {
521+
entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
522+
if (type & TSNEP_TX_TYPE_XDP_NDO) {
523+
data = unlikely(frag) ? skb_frag_address(frag) :
524+
xdpf->data;
525+
dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
526+
if (dma_mapping_error(dmadev, dma))
527+
return -ENOMEM;
528+
529+
entry->type = TSNEP_TX_TYPE_XDP_NDO;
530+
} else {
531+
page = unlikely(frag) ? skb_frag_page(frag) :
532+
virt_to_page(xdpf->data);
533+
dma = page_pool_get_dma_addr(page);
534+
if (unlikely(frag))
535+
dma += skb_frag_off(frag);
536+
else
537+
dma += sizeof(*xdpf) + xdpf->headroom;
538+
dma_sync_single_for_device(dmadev, dma, len,
539+
DMA_BIDIRECTIONAL);
540+
541+
entry->type = TSNEP_TX_TYPE_XDP_TX;
542+
}
543+
544+
entry->len = len;
545+
dma_unmap_addr_set(entry, dma, dma);
546+
547+
entry->desc->tx = __cpu_to_le64(dma);
548+
549+
map_len += len;
550+
551+
if (i + 1 < count) {
552+
frag = &shinfo->frags[i];
553+
len = skb_frag_size(frag);
554+
}
555+
}
556+
557+
return map_len;
558+
}
559+
560+
/* This function requires __netif_tx_lock is held by the caller. */
561+
static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
562+
struct tsnep_tx *tx, u32 type)
563+
{
564+
struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
565+
struct tsnep_tx_entry *entry;
566+
int count, length, retval, i;
567+
568+
count = 1;
569+
if (unlikely(xdp_frame_has_frags(xdpf)))
570+
count += shinfo->nr_frags;
571+
572+
/* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
573+
* will be available for normal TX path and queue is stopped there if
574+
* necessary
575+
*/
576+
if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
577+
return false;
578+
579+
entry = &tx->entry[tx->write];
580+
entry->xdpf = xdpf;
581+
582+
retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
583+
if (retval < 0) {
584+
tsnep_tx_unmap(tx, tx->write, count);
585+
entry->xdpf = NULL;
586+
587+
tx->dropped++;
588+
589+
return false;
590+
}
591+
length = retval;
592+
593+
for (i = 0; i < count; i++)
594+
tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
595+
i == count - 1);
596+
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
597+
598+
/* descriptor properties shall be valid before hardware is notified */
599+
dma_wmb();
600+
601+
return true;
602+
}
603+
604+
static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
605+
{
606+
iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
607+
}
608+
493609
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
494610
{
495611
struct tsnep_tx_entry *entry;
@@ -517,12 +633,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
517633
dma_rmb();
518634

519635
count = 1;
520-
if (skb_shinfo(entry->skb)->nr_frags > 0)
636+
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
637+
skb_shinfo(entry->skb)->nr_frags > 0)
521638
count += skb_shinfo(entry->skb)->nr_frags;
639+
else if (!(entry->type & TSNEP_TX_TYPE_SKB) &&
640+
xdp_frame_has_frags(entry->xdpf))
641+
count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
522642

523643
length = tsnep_tx_unmap(tx, tx->read, count);
524644

525-
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
645+
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
646+
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
526647
(__le32_to_cpu(entry->desc_wb->properties) &
527648
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
528649
struct skb_shared_hwtstamps hwtstamps;
@@ -542,7 +663,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
542663
skb_tstamp_tx(entry->skb, &hwtstamps);
543664
}
544665

545-
napi_consume_skb(entry->skb, napi_budget);
666+
if (entry->type & TSNEP_TX_TYPE_SKB)
667+
napi_consume_skb(entry->skb, napi_budget);
668+
else
669+
xdp_return_frame_rx_napi(entry->xdpf);
670+
/* xdpf is union with skb */
546671
entry->skb = NULL;
547672

548673
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
@@ -560,7 +685,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
560685

561686
__netif_tx_unlock(nq);
562687

563-
return (budget != 0);
688+
return budget != 0;
564689
}
565690

566691
static bool tsnep_tx_pending(struct tsnep_tx *tx)
@@ -1316,6 +1441,55 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
13161441
return ns_to_ktime(timestamp);
13171442
}
13181443

1444+
static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
1445+
{
1446+
if (cpu >= TSNEP_MAX_QUEUES)
1447+
cpu &= TSNEP_MAX_QUEUES - 1;
1448+
1449+
while (cpu >= adapter->num_tx_queues)
1450+
cpu -= adapter->num_tx_queues;
1451+
1452+
return &adapter->tx[cpu];
1453+
}
1454+
1455+
static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
1456+
struct xdp_frame **xdp, u32 flags)
1457+
{
1458+
struct tsnep_adapter *adapter = netdev_priv(dev);
1459+
u32 cpu = smp_processor_id();
1460+
struct netdev_queue *nq;
1461+
struct tsnep_tx *tx;
1462+
int nxmit;
1463+
bool xmit;
1464+
1465+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1466+
return -EINVAL;
1467+
1468+
tx = tsnep_xdp_get_tx(adapter, cpu);
1469+
nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
1470+
1471+
__netif_tx_lock(nq, cpu);
1472+
1473+
for (nxmit = 0; nxmit < n; nxmit++) {
1474+
xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
1475+
TSNEP_TX_TYPE_XDP_NDO);
1476+
if (!xmit)
1477+
break;
1478+
1479+
/* avoid transmit queue timeout since we share it with the slow
1480+
* path
1481+
*/
1482+
txq_trans_cond_update(nq);
1483+
}
1484+
1485+
if (flags & XDP_XMIT_FLUSH)
1486+
tsnep_xdp_xmit_flush(tx);
1487+
1488+
__netif_tx_unlock(nq);
1489+
1490+
return nxmit;
1491+
}
1492+
13191493
static const struct net_device_ops tsnep_netdev_ops = {
13201494
.ndo_open = tsnep_netdev_open,
13211495
.ndo_stop = tsnep_netdev_close,
@@ -1327,6 +1501,7 @@ static const struct net_device_ops tsnep_netdev_ops = {
13271501
.ndo_set_features = tsnep_netdev_set_features,
13281502
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
13291503
.ndo_setup_tc = tsnep_tc_setup,
1504+
.ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
13301505
};
13311506

13321507
static int tsnep_mac_init(struct tsnep_adapter *adapter)

0 commit comments

Comments
 (0)