17
17
#include <linux/prefetch.h>
18
18
#include <linux/irq.h>
19
19
#include <linux/iommu.h>
20
+ #include <linux/bpf.h>
21
+ #include <linux/filter.h>
20
22
21
23
#include "nic_reg.h"
22
24
#include "nic.h"
@@ -397,8 +399,10 @@ static void nicvf_request_sqs(struct nicvf *nic)
397
399
398
400
if (nic -> rx_queues > MAX_RCV_QUEUES_PER_QS )
399
401
rx_queues = nic -> rx_queues - MAX_RCV_QUEUES_PER_QS ;
400
- if (nic -> tx_queues > MAX_SND_QUEUES_PER_QS )
401
- tx_queues = nic -> tx_queues - MAX_SND_QUEUES_PER_QS ;
402
+
403
+ tx_queues = nic -> tx_queues + nic -> xdp_tx_queues ;
404
+ if (tx_queues > MAX_SND_QUEUES_PER_QS )
405
+ tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS ;
402
406
403
407
/* Set no of Rx/Tx queues in each of the SQsets */
404
408
for (sqs = 0 ; sqs < nic -> sqs_count ; sqs ++ ) {
@@ -496,6 +500,43 @@ static int nicvf_init_resources(struct nicvf *nic)
496
500
return 0 ;
497
501
}
498
502
503
+ static inline bool nicvf_xdp_rx (struct nicvf * nic ,
504
+ struct bpf_prog * prog ,
505
+ struct cqe_rx_t * cqe_rx )
506
+ {
507
+ struct xdp_buff xdp ;
508
+ u32 action ;
509
+ u16 len ;
510
+ u64 dma_addr , cpu_addr ;
511
+
512
+ /* Retrieve packet buffer's DMA address and length */
513
+ len = * ((u16 * )((void * )cqe_rx + (3 * sizeof (u64 ))));
514
+ dma_addr = * ((u64 * )((void * )cqe_rx + (7 * sizeof (u64 ))));
515
+
516
+ cpu_addr = nicvf_iova_to_phys (nic , dma_addr );
517
+ if (!cpu_addr )
518
+ return false;
519
+
520
+ xdp .data = phys_to_virt (cpu_addr );
521
+ xdp .data_end = xdp .data + len ;
522
+
523
+ rcu_read_lock ();
524
+ action = bpf_prog_run_xdp (prog , & xdp );
525
+ rcu_read_unlock ();
526
+
527
+ switch (action ) {
528
+ case XDP_PASS :
529
+ case XDP_TX :
530
+ case XDP_ABORTED :
531
+ case XDP_DROP :
532
+ /* Pass on all packets to network stack */
533
+ return false;
534
+ default :
535
+ bpf_warn_invalid_xdp_action (action );
536
+ }
537
+ return false;
538
+ }
539
+
499
540
static void nicvf_snd_pkt_handler (struct net_device * netdev ,
500
541
struct cqe_send_t * cqe_tx ,
501
542
int budget , int * subdesc_cnt ,
@@ -599,6 +640,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
599
640
return ;
600
641
}
601
642
643
+ /* For XDP, ignore pkts spanning multiple pages */
644
+ if (nic -> xdp_prog && (cqe_rx -> rb_cnt == 1 ))
645
+ if (nicvf_xdp_rx (snic , nic -> xdp_prog , cqe_rx ))
646
+ return ;
647
+
602
648
skb = nicvf_get_rcv_skb (snic , cqe_rx );
603
649
if (!skb ) {
604
650
netdev_dbg (nic -> netdev , "Packet not received\n" );
@@ -1529,6 +1575,117 @@ static int nicvf_set_features(struct net_device *netdev,
1529
1575
return 0 ;
1530
1576
}
1531
1577
1578
+ static void nicvf_set_xdp_queues (struct nicvf * nic , bool bpf_attached )
1579
+ {
1580
+ u8 cq_count , txq_count ;
1581
+
1582
+ /* Set XDP Tx queue count same as Rx queue count */
1583
+ if (!bpf_attached )
1584
+ nic -> xdp_tx_queues = 0 ;
1585
+ else
1586
+ nic -> xdp_tx_queues = nic -> rx_queues ;
1587
+
1588
+ /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
1589
+ * needs to be allocated, check how many.
1590
+ */
1591
+ txq_count = nic -> xdp_tx_queues + nic -> tx_queues ;
1592
+ cq_count = max (nic -> rx_queues , txq_count );
1593
+ if (cq_count > MAX_CMP_QUEUES_PER_QS ) {
1594
+ nic -> sqs_count = roundup (cq_count , MAX_CMP_QUEUES_PER_QS );
1595
+ nic -> sqs_count = (nic -> sqs_count / MAX_CMP_QUEUES_PER_QS ) - 1 ;
1596
+ } else {
1597
+ nic -> sqs_count = 0 ;
1598
+ }
1599
+
1600
+ /* Set primary Qset's resources */
1601
+ nic -> qs -> rq_cnt = min_t (u8 , nic -> rx_queues , MAX_RCV_QUEUES_PER_QS );
1602
+ nic -> qs -> sq_cnt = min_t (u8 , txq_count , MAX_SND_QUEUES_PER_QS );
1603
+ nic -> qs -> cq_cnt = max_t (u8 , nic -> qs -> rq_cnt , nic -> qs -> sq_cnt );
1604
+
1605
+ /* Update stack */
1606
+ nicvf_set_real_num_queues (nic -> netdev , nic -> tx_queues , nic -> rx_queues );
1607
+ }
1608
+
1609
+ static int nicvf_xdp_setup (struct nicvf * nic , struct bpf_prog * prog )
1610
+ {
1611
+ struct net_device * dev = nic -> netdev ;
1612
+ bool if_up = netif_running (nic -> netdev );
1613
+ struct bpf_prog * old_prog ;
1614
+ bool bpf_attached = false;
1615
+
1616
+ /* For now just support only the usual MTU sized frames */
1617
+ if (prog && (dev -> mtu > 1500 )) {
1618
+ netdev_warn (dev , "Jumbo frames not yet supported with XDP, current MTU %d.\n" ,
1619
+ dev -> mtu );
1620
+ return - EOPNOTSUPP ;
1621
+ }
1622
+
1623
+ if (prog && prog -> xdp_adjust_head )
1624
+ return - EOPNOTSUPP ;
1625
+
1626
+ /* ALL SQs attached to CQs i.e same as RQs, are treated as
1627
+ * XDP Tx queues and more Tx queues are allocated for
1628
+ * network stack to send pkts out.
1629
+ *
1630
+ * No of Tx queues are either same as Rx queues or whatever
1631
+ * is left in max no of queues possible.
1632
+ */
1633
+ if ((nic -> rx_queues + nic -> tx_queues ) > nic -> max_queues ) {
1634
+ netdev_warn (dev ,
1635
+ "Failed to attach BPF prog, RXQs + TXQs > Max %d\n" ,
1636
+ nic -> max_queues );
1637
+ return - ENOMEM ;
1638
+ }
1639
+
1640
+ if (if_up )
1641
+ nicvf_stop (nic -> netdev );
1642
+
1643
+ old_prog = xchg (& nic -> xdp_prog , prog );
1644
+ /* Detach old prog, if any */
1645
+ if (old_prog )
1646
+ bpf_prog_put (old_prog );
1647
+
1648
+ if (nic -> xdp_prog ) {
1649
+ /* Attach BPF program */
1650
+ nic -> xdp_prog = bpf_prog_add (nic -> xdp_prog , nic -> rx_queues - 1 );
1651
+ if (!IS_ERR (nic -> xdp_prog ))
1652
+ bpf_attached = true;
1653
+ }
1654
+
1655
+ /* Calculate Tx queues needed for XDP and network stack */
1656
+ nicvf_set_xdp_queues (nic , bpf_attached );
1657
+
1658
+ if (if_up ) {
1659
+ /* Reinitialize interface, clean slate */
1660
+ nicvf_open (nic -> netdev );
1661
+ netif_trans_update (nic -> netdev );
1662
+ }
1663
+
1664
+ return 0 ;
1665
+ }
1666
+
1667
+ static int nicvf_xdp (struct net_device * netdev , struct netdev_xdp * xdp )
1668
+ {
1669
+ struct nicvf * nic = netdev_priv (netdev );
1670
+
1671
+ /* To avoid checks while retrieving buffer address from CQE_RX,
1672
+ * do not support XDP for T88 pass1.x silicons which are anyway
1673
+ * not in use widely.
1674
+ */
1675
+ if (pass1_silicon (nic -> pdev ))
1676
+ return - EOPNOTSUPP ;
1677
+
1678
+ switch (xdp -> command ) {
1679
+ case XDP_SETUP_PROG :
1680
+ return nicvf_xdp_setup (nic , xdp -> prog );
1681
+ case XDP_QUERY_PROG :
1682
+ xdp -> prog_attached = !!nic -> xdp_prog ;
1683
+ return 0 ;
1684
+ default :
1685
+ return - EINVAL ;
1686
+ }
1687
+ }
1688
+
1532
1689
static const struct net_device_ops nicvf_netdev_ops = {
1533
1690
.ndo_open = nicvf_open ,
1534
1691
.ndo_stop = nicvf_stop ,
@@ -1539,6 +1696,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
1539
1696
.ndo_tx_timeout = nicvf_tx_timeout ,
1540
1697
.ndo_fix_features = nicvf_fix_features ,
1541
1698
.ndo_set_features = nicvf_set_features ,
1699
+ .ndo_xdp = nicvf_xdp ,
1542
1700
};
1543
1701
1544
1702
static int nicvf_probe (struct pci_dev * pdev , const struct pci_device_id * ent )
0 commit comments