@@ -2154,6 +2154,124 @@ static const struct bpf_func_proto bpf_skb_change_type_proto = {
2154
2154
.arg2_type = ARG_ANYTHING ,
2155
2155
};
2156
2156
2157
+ static u32 bpf_skb_net_base_len (const struct sk_buff * skb )
2158
+ {
2159
+ switch (skb -> protocol ) {
2160
+ case htons (ETH_P_IP ):
2161
+ return sizeof (struct iphdr );
2162
+ case htons (ETH_P_IPV6 ):
2163
+ return sizeof (struct ipv6hdr );
2164
+ default :
2165
+ return ~0U ;
2166
+ }
2167
+ }
2168
+
2169
+ static int bpf_skb_net_grow (struct sk_buff * skb , u32 len_diff )
2170
+ {
2171
+ u32 off = skb_mac_header_len (skb ) + bpf_skb_net_base_len (skb );
2172
+ int ret ;
2173
+
2174
+ ret = skb_cow (skb , len_diff );
2175
+ if (unlikely (ret < 0 ))
2176
+ return ret ;
2177
+
2178
+ ret = bpf_skb_net_hdr_push (skb , off , len_diff );
2179
+ if (unlikely (ret < 0 ))
2180
+ return ret ;
2181
+
2182
+ if (skb_is_gso (skb )) {
2183
+ /* Due to header grow, MSS needs to be downgraded. */
2184
+ skb_shinfo (skb )-> gso_size -= len_diff ;
2185
+ /* Header must be checked, and gso_segs recomputed. */
2186
+ skb_shinfo (skb )-> gso_type |= SKB_GSO_DODGY ;
2187
+ skb_shinfo (skb )-> gso_segs = 0 ;
2188
+ }
2189
+
2190
+ return 0 ;
2191
+ }
2192
+
2193
+ static int bpf_skb_net_shrink (struct sk_buff * skb , u32 len_diff )
2194
+ {
2195
+ u32 off = skb_mac_header_len (skb ) + bpf_skb_net_base_len (skb );
2196
+ int ret ;
2197
+
2198
+ ret = skb_unclone (skb , GFP_ATOMIC );
2199
+ if (unlikely (ret < 0 ))
2200
+ return ret ;
2201
+
2202
+ ret = bpf_skb_net_hdr_pop (skb , off , len_diff );
2203
+ if (unlikely (ret < 0 ))
2204
+ return ret ;
2205
+
2206
+ if (skb_is_gso (skb )) {
2207
+ /* Due to header shrink, MSS can be upgraded. */
2208
+ skb_shinfo (skb )-> gso_size += len_diff ;
2209
+ /* Header must be checked, and gso_segs recomputed. */
2210
+ skb_shinfo (skb )-> gso_type |= SKB_GSO_DODGY ;
2211
+ skb_shinfo (skb )-> gso_segs = 0 ;
2212
+ }
2213
+
2214
+ return 0 ;
2215
+ }
2216
+
2217
+ static u32 __bpf_skb_max_len (const struct sk_buff * skb )
2218
+ {
2219
+ return skb -> dev -> mtu + skb -> dev -> hard_header_len ;
2220
+ }
2221
+
2222
+ static int bpf_skb_adjust_net (struct sk_buff * skb , s32 len_diff )
2223
+ {
2224
+ bool trans_same = skb -> transport_header == skb -> network_header ;
2225
+ u32 len_cur , len_diff_abs = abs (len_diff );
2226
+ u32 len_min = bpf_skb_net_base_len (skb );
2227
+ u32 len_max = __bpf_skb_max_len (skb );
2228
+ __be16 proto = skb -> protocol ;
2229
+ bool shrink = len_diff < 0 ;
2230
+ int ret ;
2231
+
2232
+ if (unlikely (len_diff_abs > 0xfffU ))
2233
+ return - EFAULT ;
2234
+ if (unlikely (proto != htons (ETH_P_IP ) &&
2235
+ proto != htons (ETH_P_IPV6 )))
2236
+ return - ENOTSUPP ;
2237
+
2238
+ len_cur = skb -> len - skb_network_offset (skb );
2239
+ if (skb_transport_header_was_set (skb ) && !trans_same )
2240
+ len_cur = skb_network_header_len (skb );
2241
+ if ((shrink && (len_diff_abs >= len_cur ||
2242
+ len_cur - len_diff_abs < len_min )) ||
2243
+ (!shrink && (skb -> len + len_diff_abs > len_max &&
2244
+ !skb_is_gso (skb ))))
2245
+ return - ENOTSUPP ;
2246
+
2247
+ ret = shrink ? bpf_skb_net_shrink (skb , len_diff_abs ) :
2248
+ bpf_skb_net_grow (skb , len_diff_abs );
2249
+
2250
+ bpf_compute_data_end (skb );
2251
+ return 0 ;
2252
+ }
2253
+
2254
+ BPF_CALL_4 (bpf_skb_adjust_room , struct sk_buff * , skb , s32 , len_diff ,
2255
+ u32 , mode , u64 , flags )
2256
+ {
2257
+ if (unlikely (flags ))
2258
+ return - EINVAL ;
2259
+ if (likely (mode == BPF_ADJ_ROOM_NET ))
2260
+ return bpf_skb_adjust_net (skb , len_diff );
2261
+
2262
+ return - ENOTSUPP ;
2263
+ }
2264
+
2265
+ static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
2266
+ .func = bpf_skb_adjust_room ,
2267
+ .gpl_only = false,
2268
+ .ret_type = RET_INTEGER ,
2269
+ .arg1_type = ARG_PTR_TO_CTX ,
2270
+ .arg2_type = ARG_ANYTHING ,
2271
+ .arg3_type = ARG_ANYTHING ,
2272
+ .arg4_type = ARG_ANYTHING ,
2273
+ };
2274
+
2157
2275
static u32 __bpf_skb_min_len (const struct sk_buff * skb )
2158
2276
{
2159
2277
u32 min_len = skb_network_offset (skb );
@@ -2166,11 +2284,6 @@ static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2166
2284
return min_len ;
2167
2285
}
2168
2286
2169
- static u32 __bpf_skb_max_len (const struct sk_buff * skb )
2170
- {
2171
- return skb -> dev -> mtu + skb -> dev -> hard_header_len ;
2172
- }
2173
-
2174
2287
static int bpf_skb_grow_rcsum (struct sk_buff * skb , unsigned int new_len )
2175
2288
{
2176
2289
unsigned int old_len = skb -> len ;
@@ -2307,6 +2420,7 @@ bool bpf_helper_changes_pkt_data(void *func)
2307
2420
func == bpf_skb_change_proto ||
2308
2421
func == bpf_skb_change_head ||
2309
2422
func == bpf_skb_change_tail ||
2423
+ func == bpf_skb_adjust_room ||
2310
2424
func == bpf_skb_pull_data ||
2311
2425
func == bpf_clone_redirect ||
2312
2426
func == bpf_l3_csum_replace ||
@@ -2849,6 +2963,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2849
2963
return & bpf_skb_change_proto_proto ;
2850
2964
case BPF_FUNC_skb_change_type :
2851
2965
return & bpf_skb_change_type_proto ;
2966
+ case BPF_FUNC_skb_adjust_room :
2967
+ return & bpf_skb_adjust_room_proto ;
2852
2968
case BPF_FUNC_skb_change_tail :
2853
2969
return & bpf_skb_change_tail_proto ;
2854
2970
case BPF_FUNC_skb_get_tunnel_key :
0 commit comments