@@ -1432,6 +1432,11 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1432
1432
}
1433
1433
}
1434
1434
1435
+ static bool mtk_page_pool_enabled (struct mtk_eth * eth )
1436
+ {
1437
+ return !eth -> hwlro ;
1438
+ }
1439
+
1435
1440
static struct page_pool * mtk_create_page_pool (struct mtk_eth * eth ,
1436
1441
struct xdp_rxq_info * xdp_q ,
1437
1442
int id , int size )
@@ -1494,11 +1499,52 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1494
1499
skb_free_frag (data );
1495
1500
}
1496
1501
1502
+ static u32 mtk_xdp_run (struct mtk_eth * eth , struct mtk_rx_ring * ring ,
1503
+ struct xdp_buff * xdp , struct net_device * dev )
1504
+ {
1505
+ struct bpf_prog * prog ;
1506
+ u32 act = XDP_PASS ;
1507
+
1508
+ rcu_read_lock ();
1509
+
1510
+ prog = rcu_dereference (eth -> prog );
1511
+ if (!prog )
1512
+ goto out ;
1513
+
1514
+ act = bpf_prog_run_xdp (prog , xdp );
1515
+ switch (act ) {
1516
+ case XDP_PASS :
1517
+ goto out ;
1518
+ case XDP_REDIRECT :
1519
+ if (unlikely (xdp_do_redirect (dev , xdp , prog ))) {
1520
+ act = XDP_DROP ;
1521
+ break ;
1522
+ }
1523
+ goto out ;
1524
+ default :
1525
+ bpf_warn_invalid_xdp_action (dev , prog , act );
1526
+ fallthrough ;
1527
+ case XDP_ABORTED :
1528
+ trace_xdp_exception (dev , prog , act );
1529
+ fallthrough ;
1530
+ case XDP_DROP :
1531
+ break ;
1532
+ }
1533
+
1534
+ page_pool_put_full_page (ring -> page_pool ,
1535
+ virt_to_head_page (xdp -> data ), true);
1536
+ out :
1537
+ rcu_read_unlock ();
1538
+
1539
+ return act ;
1540
+ }
1541
+
1497
1542
static int mtk_poll_rx (struct napi_struct * napi , int budget ,
1498
1543
struct mtk_eth * eth )
1499
1544
{
1500
1545
struct dim_sample dim_sample = {};
1501
1546
struct mtk_rx_ring * ring ;
1547
+ bool xdp_flush = false;
1502
1548
int idx ;
1503
1549
struct sk_buff * skb ;
1504
1550
u8 * data , * new_data ;
@@ -1507,9 +1553,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1507
1553
1508
1554
while (done < budget ) {
1509
1555
unsigned int pktlen , * rxdcsum ;
1510
- u32 hash , reason , reserve_len ;
1511
1556
struct net_device * netdev ;
1512
1557
dma_addr_t dma_addr ;
1558
+ u32 hash , reason ;
1513
1559
int mac = 0 ;
1514
1560
1515
1561
ring = mtk_get_rx_ring (eth );
@@ -1539,15 +1585,49 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1539
1585
if (unlikely (test_bit (MTK_RESETTING , & eth -> state )))
1540
1586
goto release_desc ;
1541
1587
1588
+ pktlen = RX_DMA_GET_PLEN0 (trxd .rxd2 );
1589
+
1542
1590
/* alloc new buffer */
1543
1591
if (ring -> page_pool ) {
1592
+ struct page * page = virt_to_head_page (data );
1593
+ struct xdp_buff xdp ;
1594
+ u32 ret ;
1595
+
1544
1596
new_data = mtk_page_pool_get_buff (ring -> page_pool ,
1545
1597
& dma_addr ,
1546
1598
GFP_ATOMIC );
1547
1599
if (unlikely (!new_data )) {
1548
1600
netdev -> stats .rx_dropped ++ ;
1549
1601
goto release_desc ;
1550
1602
}
1603
+
1604
+ dma_sync_single_for_cpu (eth -> dma_dev ,
1605
+ page_pool_get_dma_addr (page ) + MTK_PP_HEADROOM ,
1606
+ pktlen , page_pool_get_dma_dir (ring -> page_pool ));
1607
+
1608
+ xdp_init_buff (& xdp , PAGE_SIZE , & ring -> xdp_q );
1609
+ xdp_prepare_buff (& xdp , data , MTK_PP_HEADROOM , pktlen ,
1610
+ false);
1611
+ xdp_buff_clear_frags_flag (& xdp );
1612
+
1613
+ ret = mtk_xdp_run (eth , ring , & xdp , netdev );
1614
+ if (ret == XDP_REDIRECT )
1615
+ xdp_flush = true;
1616
+
1617
+ if (ret != XDP_PASS )
1618
+ goto skip_rx ;
1619
+
1620
+ skb = build_skb (data , PAGE_SIZE );
1621
+ if (unlikely (!skb )) {
1622
+ page_pool_put_full_page (ring -> page_pool ,
1623
+ page , true);
1624
+ netdev -> stats .rx_dropped ++ ;
1625
+ goto skip_rx ;
1626
+ }
1627
+
1628
+ skb_reserve (skb , xdp .data - xdp .data_hard_start );
1629
+ skb_put (skb , xdp .data_end - xdp .data );
1630
+ skb_mark_for_recycle (skb );
1551
1631
} else {
1552
1632
if (ring -> frag_size <= PAGE_SIZE )
1553
1633
new_data = napi_alloc_frag (ring -> frag_size );
@@ -1571,27 +1651,20 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1571
1651
1572
1652
dma_unmap_single (eth -> dma_dev , trxd .rxd1 ,
1573
1653
ring -> buf_size , DMA_FROM_DEVICE );
1574
- }
1575
1654
1576
- /* receive data */
1577
- skb = build_skb (data , ring -> frag_size );
1578
- if (unlikely (!skb )) {
1579
- mtk_rx_put_buff (ring , data , true);
1580
- netdev -> stats .rx_dropped ++ ;
1581
- goto skip_rx ;
1582
- }
1655
+ skb = build_skb (data , ring -> frag_size );
1656
+ if (unlikely (!skb )) {
1657
+ netdev -> stats .rx_dropped ++ ;
1658
+ skb_free_frag (data );
1659
+ goto skip_rx ;
1660
+ }
1583
1661
1584
- if (ring -> page_pool ) {
1585
- reserve_len = MTK_PP_HEADROOM ;
1586
- skb_mark_for_recycle (skb );
1587
- } else {
1588
- reserve_len = NET_SKB_PAD + NET_IP_ALIGN ;
1662
+ skb_reserve (skb , NET_SKB_PAD + NET_IP_ALIGN );
1663
+ skb_put (skb , pktlen );
1589
1664
}
1590
- skb_reserve (skb , reserve_len );
1591
1665
1592
- pktlen = RX_DMA_GET_PLEN0 (trxd .rxd2 );
1593
1666
skb -> dev = netdev ;
1594
- skb_put ( skb , pktlen ) ;
1667
+ bytes += skb -> len ;
1595
1668
1596
1669
if (MTK_HAS_CAPS (eth -> soc -> caps , MTK_NETSYS_V2 ))
1597
1670
rxdcsum = & trxd .rxd3 ;
@@ -1603,7 +1676,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1603
1676
else
1604
1677
skb_checksum_none_assert (skb );
1605
1678
skb -> protocol = eth_type_trans (skb , netdev );
1606
- bytes += pktlen ;
1607
1679
1608
1680
hash = trxd .rxd4 & MTK_RXD4_FOE_ENTRY ;
1609
1681
if (hash != MTK_RXD4_FOE_ENTRY ) {
@@ -1666,6 +1738,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1666
1738
& dim_sample );
1667
1739
net_dim (& eth -> rx_dim , dim_sample );
1668
1740
1741
+ if (xdp_flush )
1742
+ xdp_do_flush_map ();
1743
+
1669
1744
return done ;
1670
1745
}
1671
1746
@@ -2011,7 +2086,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2011
2086
if (!ring -> data )
2012
2087
return - ENOMEM ;
2013
2088
2014
- if (! eth -> hwlro ) {
2089
+ if (mtk_page_pool_enabled ( eth ) ) {
2015
2090
struct page_pool * pp ;
2016
2091
2017
2092
pp = mtk_create_page_pool (eth , & ring -> xdp_q , ring_no ,
@@ -2750,6 +2825,48 @@ static int mtk_stop(struct net_device *dev)
2750
2825
return 0 ;
2751
2826
}
2752
2827
2828
+ static int mtk_xdp_setup (struct net_device * dev , struct bpf_prog * prog ,
2829
+ struct netlink_ext_ack * extack )
2830
+ {
2831
+ struct mtk_mac * mac = netdev_priv (dev );
2832
+ struct mtk_eth * eth = mac -> hw ;
2833
+ struct bpf_prog * old_prog ;
2834
+ bool need_update ;
2835
+
2836
+ if (eth -> hwlro ) {
2837
+ NL_SET_ERR_MSG_MOD (extack , "XDP not supported with HWLRO" );
2838
+ return - EOPNOTSUPP ;
2839
+ }
2840
+
2841
+ if (dev -> mtu > MTK_PP_MAX_BUF_SIZE ) {
2842
+ NL_SET_ERR_MSG_MOD (extack , "MTU too large for XDP" );
2843
+ return - EOPNOTSUPP ;
2844
+ }
2845
+
2846
+ need_update = !!eth -> prog != !!prog ;
2847
+ if (netif_running (dev ) && need_update )
2848
+ mtk_stop (dev );
2849
+
2850
+ old_prog = rcu_replace_pointer (eth -> prog , prog , lockdep_rtnl_is_held ());
2851
+ if (old_prog )
2852
+ bpf_prog_put (old_prog );
2853
+
2854
+ if (netif_running (dev ) && need_update )
2855
+ return mtk_open (dev );
2856
+
2857
+ return 0 ;
2858
+ }
2859
+
2860
+ static int mtk_xdp (struct net_device * dev , struct netdev_bpf * xdp )
2861
+ {
2862
+ switch (xdp -> command ) {
2863
+ case XDP_SETUP_PROG :
2864
+ return mtk_xdp_setup (dev , xdp -> prog , xdp -> extack );
2865
+ default :
2866
+ return - EINVAL ;
2867
+ }
2868
+ }
2869
+
2753
2870
static void ethsys_reset (struct mtk_eth * eth , u32 reset_bits )
2754
2871
{
2755
2872
regmap_update_bits (eth -> ethsys , ETHSYS_RSTCTRL ,
@@ -3045,6 +3162,12 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3045
3162
struct mtk_eth * eth = mac -> hw ;
3046
3163
u32 mcr_cur , mcr_new ;
3047
3164
3165
+ if (rcu_access_pointer (eth -> prog ) &&
3166
+ length > MTK_PP_MAX_BUF_SIZE ) {
3167
+ netdev_err (dev , "Invalid MTU for XDP mode\n" );
3168
+ return - EINVAL ;
3169
+ }
3170
+
3048
3171
if (!MTK_HAS_CAPS (eth -> soc -> caps , MTK_SOC_MT7628 )) {
3049
3172
mcr_cur = mtk_r32 (mac -> hw , MTK_MAC_MCR (mac -> id ));
3050
3173
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK ;
@@ -3372,6 +3495,7 @@ static const struct net_device_ops mtk_netdev_ops = {
3372
3495
.ndo_poll_controller = mtk_poll_controller ,
3373
3496
#endif
3374
3497
.ndo_setup_tc = mtk_eth_setup_tc ,
3498
+ .ndo_bpf = mtk_xdp ,
3375
3499
};
3376
3500
3377
3501
static int mtk_add_mac (struct mtk_eth * eth , struct device_node * np )
0 commit comments