~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/core/gro.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 #include <net/gro.h>
  3 #include <net/dst_metadata.h>
  4 #include <net/busy_poll.h>
  5 #include <trace/events/net.h>
  6 #include <linux/skbuff_ref.h>
  7 
  8 #define MAX_GRO_SKBS 8
  9 
 10 /* This should be increased if a protocol with a bigger head is added. */
 11 #define GRO_MAX_HEAD (MAX_HEADER + 128)
 12 
 13 static DEFINE_SPINLOCK(offload_lock);
 14 
 15 /**
 16  *      dev_add_offload - register offload handlers
 17  *      @po: protocol offload declaration
 18  *
 19  *      Add protocol offload handlers to the networking stack. The passed
 20  *      &proto_offload is linked into kernel lists and may not be freed until
 21  *      it has been removed from the kernel lists.
 22  *
 23  *      This call does not sleep therefore it can not
 24  *      guarantee all CPU's that are in middle of receiving packets
 25  *      will see the new offload handlers (until the next received packet).
 26  */
 27 void dev_add_offload(struct packet_offload *po)
 28 {
 29         struct packet_offload *elem;
 30 
 31         spin_lock(&offload_lock);
 32         list_for_each_entry(elem, &net_hotdata.offload_base, list) {
 33                 if (po->priority < elem->priority)
 34                         break;
 35         }
 36         list_add_rcu(&po->list, elem->list.prev);
 37         spin_unlock(&offload_lock);
 38 }
 39 EXPORT_SYMBOL(dev_add_offload);
 40 
 41 /**
 42  *      __dev_remove_offload     - remove offload handler
 43  *      @po: packet offload declaration
 44  *
 45  *      Remove a protocol offload handler that was previously added to the
 46  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
 47  *      is removed from the kernel lists and can be freed or reused once this
 48  *      function returns.
 49  *
 50  *      The packet type might still be in use by receivers
 51  *      and must not be freed until after all the CPU's have gone
 52  *      through a quiescent state.
 53  */
 54 static void __dev_remove_offload(struct packet_offload *po)
 55 {
 56         struct list_head *head = &net_hotdata.offload_base;
 57         struct packet_offload *po1;
 58 
 59         spin_lock(&offload_lock);
 60 
 61         list_for_each_entry(po1, head, list) {
 62                 if (po == po1) {
 63                         list_del_rcu(&po->list);
 64                         goto out;
 65                 }
 66         }
 67 
 68         pr_warn("dev_remove_offload: %p not found\n", po);
 69 out:
 70         spin_unlock(&offload_lock);
 71 }
 72 
 73 /**
 74  *      dev_remove_offload       - remove packet offload handler
 75  *      @po: packet offload declaration
 76  *
 77  *      Remove a packet offload handler that was previously added to the kernel
 78  *      offload handlers by dev_add_offload(). The passed &offload_type is
 79  *      removed from the kernel lists and can be freed or reused once this
 80  *      function returns.
 81  *
 82  *      This call sleeps to guarantee that no CPU is looking at the packet
 83  *      type after return.
 84  */
 85 void dev_remove_offload(struct packet_offload *po)
 86 {
 87         __dev_remove_offload(po);
 88 
 89         synchronize_net();
 90 }
 91 EXPORT_SYMBOL(dev_remove_offload);
 92 
 93 
 94 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
 95 {
 96         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
 97         unsigned int offset = skb_gro_offset(skb);
 98         unsigned int headlen = skb_headlen(skb);
 99         unsigned int len = skb_gro_len(skb);
100         unsigned int delta_truesize;
101         unsigned int new_truesize;
102         struct sk_buff *lp;
103         int segs;
104 
105         /* Do not splice page pool based packets w/ non-page pool
106          * packets. This can result in reference count issues as page
107          * pool pages will not decrement the reference count and will
108          * instead be immediately returned to the pool or have frag
109          * count decremented.
110          */
111         if (p->pp_recycle != skb->pp_recycle)
112                 return -ETOOMANYREFS;
113 
114         if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
115                      NAPI_GRO_CB(skb)->flush))
116                 return -E2BIG;
117 
118         if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
119                 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
120                     (p->protocol == htons(ETH_P_IPV6) &&
121                      skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
122                     p->encapsulation)
123                         return -E2BIG;
124         }
125 
126         segs = NAPI_GRO_CB(skb)->count;
127         lp = NAPI_GRO_CB(p)->last;
128         pinfo = skb_shinfo(lp);
129 
130         if (headlen <= offset) {
131                 skb_frag_t *frag;
132                 skb_frag_t *frag2;
133                 int i = skbinfo->nr_frags;
134                 int nr_frags = pinfo->nr_frags + i;
135 
136                 if (nr_frags > MAX_SKB_FRAGS)
137                         goto merge;
138 
139                 offset -= headlen;
140                 pinfo->nr_frags = nr_frags;
141                 skbinfo->nr_frags = 0;
142 
143                 frag = pinfo->frags + nr_frags;
144                 frag2 = skbinfo->frags + i;
145                 do {
146                         *--frag = *--frag2;
147                 } while (--i);
148 
149                 skb_frag_off_add(frag, offset);
150                 skb_frag_size_sub(frag, offset);
151 
152                 /* all fragments truesize : remove (head size + sk_buff) */
153                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
154                 delta_truesize = skb->truesize - new_truesize;
155 
156                 skb->truesize = new_truesize;
157                 skb->len -= skb->data_len;
158                 skb->data_len = 0;
159 
160                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
161                 goto done;
162         } else if (skb->head_frag) {
163                 int nr_frags = pinfo->nr_frags;
164                 skb_frag_t *frag = pinfo->frags + nr_frags;
165                 struct page *page = virt_to_head_page(skb->head);
166                 unsigned int first_size = headlen - offset;
167                 unsigned int first_offset;
168 
169                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
170                         goto merge;
171 
172                 first_offset = skb->data -
173                                (unsigned char *)page_address(page) +
174                                offset;
175 
176                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
177 
178                 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
179 
180                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
181                 /* We dont need to clear skbinfo->nr_frags here */
182 
183                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
184                 delta_truesize = skb->truesize - new_truesize;
185                 skb->truesize = new_truesize;
186                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
187                 goto done;
188         }
189 
190 merge:
191         /* sk ownership - if any - completely transferred to the aggregated packet */
192         skb->destructor = NULL;
193         skb->sk = NULL;
194         delta_truesize = skb->truesize;
195         if (offset > headlen) {
196                 unsigned int eat = offset - headlen;
197 
198                 skb_frag_off_add(&skbinfo->frags[0], eat);
199                 skb_frag_size_sub(&skbinfo->frags[0], eat);
200                 skb->data_len -= eat;
201                 skb->len -= eat;
202                 offset = headlen;
203         }
204 
205         __skb_pull(skb, offset);
206 
207         if (NAPI_GRO_CB(p)->last == p)
208                 skb_shinfo(p)->frag_list = skb;
209         else
210                 NAPI_GRO_CB(p)->last->next = skb;
211         NAPI_GRO_CB(p)->last = skb;
212         __skb_header_release(skb);
213         lp = p;
214 
215 done:
216         NAPI_GRO_CB(p)->count += segs;
217         p->data_len += len;
218         p->truesize += delta_truesize;
219         p->len += len;
220         if (lp != p) {
221                 lp->data_len += len;
222                 lp->truesize += delta_truesize;
223                 lp->len += len;
224         }
225         NAPI_GRO_CB(skb)->same_flow = 1;
226         return 0;
227 }
228 
229 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
230 {
231         if (unlikely(p->len + skb->len >= 65536))
232                 return -E2BIG;
233 
234         if (NAPI_GRO_CB(p)->last == p)
235                 skb_shinfo(p)->frag_list = skb;
236         else
237                 NAPI_GRO_CB(p)->last->next = skb;
238 
239         skb_pull(skb, skb_gro_offset(skb));
240 
241         NAPI_GRO_CB(p)->last = skb;
242         NAPI_GRO_CB(p)->count++;
243         p->data_len += skb->len;
244 
245         /* sk ownership - if any - completely transferred to the aggregated packet */
246         skb->destructor = NULL;
247         skb->sk = NULL;
248         p->truesize += skb->truesize;
249         p->len += skb->len;
250 
251         NAPI_GRO_CB(skb)->same_flow = 1;
252 
253         return 0;
254 }
255 
256 
257 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
258 {
259         struct list_head *head = &net_hotdata.offload_base;
260         struct packet_offload *ptype;
261         __be16 type = skb->protocol;
262         int err = -ENOENT;
263 
264         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
265 
266         if (NAPI_GRO_CB(skb)->count == 1) {
267                 skb_shinfo(skb)->gso_size = 0;
268                 goto out;
269         }
270 
271         rcu_read_lock();
272         list_for_each_entry_rcu(ptype, head, list) {
273                 if (ptype->type != type || !ptype->callbacks.gro_complete)
274                         continue;
275 
276                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
277                                          ipv6_gro_complete, inet_gro_complete,
278                                          skb, 0);
279                 break;
280         }
281         rcu_read_unlock();
282 
283         if (err) {
284                 WARN_ON(&ptype->list == head);
285                 kfree_skb(skb);
286                 return;
287         }
288 
289 out:
290         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
291 }
292 
293 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
294                                    bool flush_old)
295 {
296         struct list_head *head = &napi->gro_hash[index].list;
297         struct sk_buff *skb, *p;
298 
299         list_for_each_entry_safe_reverse(skb, p, head, list) {
300                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
301                         return;
302                 skb_list_del_init(skb);
303                 napi_gro_complete(napi, skb);
304                 napi->gro_hash[index].count--;
305         }
306 
307         if (!napi->gro_hash[index].count)
308                 __clear_bit(index, &napi->gro_bitmask);
309 }
310 
311 /* napi->gro_hash[].list contains packets ordered by age.
312  * youngest packets at the head of it.
313  * Complete skbs in reverse order to reduce latencies.
314  */
315 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
316 {
317         unsigned long bitmask = napi->gro_bitmask;
318         unsigned int i, base = ~0U;
319 
320         while ((i = ffs(bitmask)) != 0) {
321                 bitmask >>= i;
322                 base += i;
323                 __napi_gro_flush_chain(napi, base, flush_old);
324         }
325 }
326 EXPORT_SYMBOL(napi_gro_flush);
327 
328 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
329                                              const struct sk_buff *p,
330                                              unsigned long diffs)
331 {
332 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
333         struct tc_skb_ext *skb_ext;
334         struct tc_skb_ext *p_ext;
335 
336         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
337         p_ext = skb_ext_find(p, TC_SKB_EXT);
338 
339         diffs |= (!!p_ext) ^ (!!skb_ext);
340         if (!diffs && unlikely(skb_ext))
341                 diffs |= p_ext->chain ^ skb_ext->chain;
342 #endif
343         return diffs;
344 }
345 
346 static void gro_list_prepare(const struct list_head *head,
347                              const struct sk_buff *skb)
348 {
349         unsigned int maclen = skb->dev->hard_header_len;
350         u32 hash = skb_get_hash_raw(skb);
351         struct sk_buff *p;
352 
353         list_for_each_entry(p, head, list) {
354                 unsigned long diffs;
355 
356                 if (hash != skb_get_hash_raw(p)) {
357                         NAPI_GRO_CB(p)->same_flow = 0;
358                         continue;
359                 }
360 
361                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
362                 diffs |= p->vlan_all ^ skb->vlan_all;
363                 diffs |= skb_metadata_differs(p, skb);
364                 if (maclen == ETH_HLEN)
365                         diffs |= compare_ether_header(skb_mac_header(p),
366                                                       skb_mac_header(skb));
367                 else if (!diffs)
368                         diffs = memcmp(skb_mac_header(p),
369                                        skb_mac_header(skb),
370                                        maclen);
371 
372                 /* in most common scenarions 'slow_gro' is 0
373                  * otherwise we are already on some slower paths
374                  * either skip all the infrequent tests altogether or
375                  * avoid trying too hard to skip each of them individually
376                  */
377                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
378                         diffs |= p->sk != skb->sk;
379                         diffs |= skb_metadata_dst_cmp(p, skb);
380                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
381 
382                         diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
383                 }
384 
385                 NAPI_GRO_CB(p)->same_flow = !diffs;
386         }
387 }
388 
389 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
390 {
391         const struct skb_shared_info *pinfo;
392         const skb_frag_t *frag0;
393         unsigned int headlen;
394 
395         NAPI_GRO_CB(skb)->network_offset = 0;
396         NAPI_GRO_CB(skb)->data_offset = 0;
397         headlen = skb_headlen(skb);
398         NAPI_GRO_CB(skb)->frag0 = skb->data;
399         NAPI_GRO_CB(skb)->frag0_len = headlen;
400         if (headlen)
401                 return;
402 
403         pinfo = skb_shinfo(skb);
404         frag0 = &pinfo->frags[0];
405 
406         if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
407             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
408                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
409                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
410                                                     skb_frag_size(frag0),
411                                                     skb->end - skb->tail);
412         }
413 }
414 
415 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
416 {
417         struct skb_shared_info *pinfo = skb_shinfo(skb);
418 
419         BUG_ON(skb->end - skb->tail < grow);
420 
421         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
422 
423         skb->data_len -= grow;
424         skb->tail += grow;
425 
426         skb_frag_off_add(&pinfo->frags[0], grow);
427         skb_frag_size_sub(&pinfo->frags[0], grow);
428 
429         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
430                 skb_frag_unref(skb, 0);
431                 memmove(pinfo->frags, pinfo->frags + 1,
432                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
433         }
434 }
435 
436 static void gro_try_pull_from_frag0(struct sk_buff *skb)
437 {
438         int grow = skb_gro_offset(skb) - skb_headlen(skb);
439 
440         if (grow > 0)
441                 gro_pull_from_frag0(skb, grow);
442 }
443 
444 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
445 {
446         struct sk_buff *oldest;
447 
448         oldest = list_last_entry(head, struct sk_buff, list);
449 
450         /* We are called with head length >= MAX_GRO_SKBS, so this is
451          * impossible.
452          */
453         if (WARN_ON_ONCE(!oldest))
454                 return;
455 
456         /* Do not adjust napi->gro_hash[].count, caller is adding a new
457          * SKB to the chain.
458          */
459         skb_list_del_init(oldest);
460         napi_gro_complete(napi, oldest);
461 }
462 
463 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
464 {
465         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
466         struct gro_list *gro_list = &napi->gro_hash[bucket];
467         struct list_head *head = &net_hotdata.offload_base;
468         struct packet_offload *ptype;
469         __be16 type = skb->protocol;
470         struct sk_buff *pp = NULL;
471         enum gro_result ret;
472         int same_flow;
473 
474         if (netif_elide_gro(skb->dev))
475                 goto normal;
476 
477         gro_list_prepare(&gro_list->list, skb);
478 
479         rcu_read_lock();
480         list_for_each_entry_rcu(ptype, head, list) {
481                 if (ptype->type == type && ptype->callbacks.gro_receive)
482                         goto found_ptype;
483         }
484         rcu_read_unlock();
485         goto normal;
486 
487 found_ptype:
488         skb_set_network_header(skb, skb_gro_offset(skb));
489         skb_reset_mac_len(skb);
490         BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
491         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
492                                         sizeof(u32))); /* Avoid slow unaligned acc */
493         *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
494         NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
495         NAPI_GRO_CB(skb)->count = 1;
496         if (unlikely(skb_is_gso(skb))) {
497                 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
498                 /* Only support TCP and non DODGY users. */
499                 if (!skb_is_gso_tcp(skb) ||
500                     (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
501                         NAPI_GRO_CB(skb)->flush = 1;
502         }
503 
504         /* Setup for GRO checksum validation */
505         switch (skb->ip_summed) {
506         case CHECKSUM_COMPLETE:
507                 NAPI_GRO_CB(skb)->csum = skb->csum;
508                 NAPI_GRO_CB(skb)->csum_valid = 1;
509                 break;
510         case CHECKSUM_UNNECESSARY:
511                 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
512                 break;
513         }
514 
515         pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
516                                 ipv6_gro_receive, inet_gro_receive,
517                                 &gro_list->list, skb);
518 
519         rcu_read_unlock();
520 
521         if (PTR_ERR(pp) == -EINPROGRESS) {
522                 ret = GRO_CONSUMED;
523                 goto ok;
524         }
525 
526         same_flow = NAPI_GRO_CB(skb)->same_flow;
527         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
528 
529         if (pp) {
530                 skb_list_del_init(pp);
531                 napi_gro_complete(napi, pp);
532                 gro_list->count--;
533         }
534 
535         if (same_flow)
536                 goto ok;
537 
538         if (NAPI_GRO_CB(skb)->flush)
539                 goto normal;
540 
541         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
542                 gro_flush_oldest(napi, &gro_list->list);
543         else
544                 gro_list->count++;
545 
546         /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
547         gro_try_pull_from_frag0(skb);
548         NAPI_GRO_CB(skb)->age = jiffies;
549         NAPI_GRO_CB(skb)->last = skb;
550         if (!skb_is_gso(skb))
551                 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
552         list_add(&skb->list, &gro_list->list);
553         ret = GRO_HELD;
554 ok:
555         if (gro_list->count) {
556                 if (!test_bit(bucket, &napi->gro_bitmask))
557                         __set_bit(bucket, &napi->gro_bitmask);
558         } else if (test_bit(bucket, &napi->gro_bitmask)) {
559                 __clear_bit(bucket, &napi->gro_bitmask);
560         }
561 
562         return ret;
563 
564 normal:
565         ret = GRO_NORMAL;
566         gro_try_pull_from_frag0(skb);
567         goto ok;
568 }
569 
570 struct packet_offload *gro_find_receive_by_type(__be16 type)
571 {
572         struct list_head *offload_head = &net_hotdata.offload_base;
573         struct packet_offload *ptype;
574 
575         list_for_each_entry_rcu(ptype, offload_head, list) {
576                 if (ptype->type != type || !ptype->callbacks.gro_receive)
577                         continue;
578                 return ptype;
579         }
580         return NULL;
581 }
582 EXPORT_SYMBOL(gro_find_receive_by_type);
583 
584 struct packet_offload *gro_find_complete_by_type(__be16 type)
585 {
586         struct list_head *offload_head = &net_hotdata.offload_base;
587         struct packet_offload *ptype;
588 
589         list_for_each_entry_rcu(ptype, offload_head, list) {
590                 if (ptype->type != type || !ptype->callbacks.gro_complete)
591                         continue;
592                 return ptype;
593         }
594         return NULL;
595 }
596 EXPORT_SYMBOL(gro_find_complete_by_type);
597 
598 static gro_result_t napi_skb_finish(struct napi_struct *napi,
599                                     struct sk_buff *skb,
600                                     gro_result_t ret)
601 {
602         switch (ret) {
603         case GRO_NORMAL:
604                 gro_normal_one(napi, skb, 1);
605                 break;
606 
607         case GRO_MERGED_FREE:
608                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
609                         napi_skb_free_stolen_head(skb);
610                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
611                         __kfree_skb(skb);
612                 else
613                         __napi_kfree_skb(skb, SKB_CONSUMED);
614                 break;
615 
616         case GRO_HELD:
617         case GRO_MERGED:
618         case GRO_CONSUMED:
619                 break;
620         }
621 
622         return ret;
623 }
624 
625 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
626 {
627         gro_result_t ret;
628 
629         skb_mark_napi_id(skb, napi);
630         trace_napi_gro_receive_entry(skb);
631 
632         skb_gro_reset_offset(skb, 0);
633 
634         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
635         trace_napi_gro_receive_exit(ret);
636 
637         return ret;
638 }
639 EXPORT_SYMBOL(napi_gro_receive);
640 
641 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
642 {
643         if (unlikely(skb->pfmemalloc)) {
644                 consume_skb(skb);
645                 return;
646         }
647         __skb_pull(skb, skb_headlen(skb));
648         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
649         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
650         __vlan_hwaccel_clear_tag(skb);
651         skb->dev = napi->dev;
652         skb->skb_iif = 0;
653 
654         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
655         skb->pkt_type = PACKET_HOST;
656 
657         skb->encapsulation = 0;
658         skb_shinfo(skb)->gso_type = 0;
659         skb_shinfo(skb)->gso_size = 0;
660         if (unlikely(skb->slow_gro)) {
661                 skb_orphan(skb);
662                 skb_ext_reset(skb);
663                 nf_reset_ct(skb);
664                 skb->slow_gro = 0;
665         }
666 
667         napi->skb = skb;
668 }
669 
670 struct sk_buff *napi_get_frags(struct napi_struct *napi)
671 {
672         struct sk_buff *skb = napi->skb;
673 
674         if (!skb) {
675                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
676                 if (skb) {
677                         napi->skb = skb;
678                         skb_mark_napi_id(skb, napi);
679                 }
680         }
681         return skb;
682 }
683 EXPORT_SYMBOL(napi_get_frags);
684 
685 static gro_result_t napi_frags_finish(struct napi_struct *napi,
686                                       struct sk_buff *skb,
687                                       gro_result_t ret)
688 {
689         switch (ret) {
690         case GRO_NORMAL:
691         case GRO_HELD:
692                 __skb_push(skb, ETH_HLEN);
693                 skb->protocol = eth_type_trans(skb, skb->dev);
694                 if (ret == GRO_NORMAL)
695                         gro_normal_one(napi, skb, 1);
696                 break;
697 
698         case GRO_MERGED_FREE:
699                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
700                         napi_skb_free_stolen_head(skb);
701                 else
702                         napi_reuse_skb(napi, skb);
703                 break;
704 
705         case GRO_MERGED:
706         case GRO_CONSUMED:
707                 break;
708         }
709 
710         return ret;
711 }
712 
713 /* Upper GRO stack assumes network header starts at gro_offset=0
714  * Drivers could call both napi_gro_frags() and napi_gro_receive()
715  * We copy ethernet header into skb->data to have a common layout.
716  */
717 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
718 {
719         struct sk_buff *skb = napi->skb;
720         const struct ethhdr *eth;
721         unsigned int hlen = sizeof(*eth);
722 
723         napi->skb = NULL;
724 
725         skb_reset_mac_header(skb);
726         skb_gro_reset_offset(skb, hlen);
727 
728         if (unlikely(!skb_gro_may_pull(skb, hlen))) {
729                 eth = skb_gro_header_slow(skb, hlen, 0);
730                 if (unlikely(!eth)) {
731                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
732                                              __func__, napi->dev->name);
733                         napi_reuse_skb(napi, skb);
734                         return NULL;
735                 }
736         } else {
737                 eth = (const struct ethhdr *)skb->data;
738 
739                 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
740                         gro_pull_from_frag0(skb, hlen);
741 
742                 NAPI_GRO_CB(skb)->frag0 += hlen;
743                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
744         }
745         __skb_pull(skb, hlen);
746 
747         /*
748          * This works because the only protocols we care about don't require
749          * special handling.
750          * We'll fix it up properly in napi_frags_finish()
751          */
752         skb->protocol = eth->h_proto;
753 
754         return skb;
755 }
756 
757 gro_result_t napi_gro_frags(struct napi_struct *napi)
758 {
759         gro_result_t ret;
760         struct sk_buff *skb = napi_frags_skb(napi);
761 
762         trace_napi_gro_frags_entry(skb);
763 
764         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
765         trace_napi_gro_frags_exit(ret);
766 
767         return ret;
768 }
769 EXPORT_SYMBOL(napi_gro_frags);
770 
771 /* Compute the checksum from gro_offset and return the folded value
772  * after adding in any pseudo checksum.
773  */
774 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
775 {
776         __wsum wsum;
777         __sum16 sum;
778 
779         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
780 
781         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
782         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
783         /* See comments in __skb_checksum_complete(). */
784         if (likely(!sum)) {
785                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
786                     !skb->csum_complete_sw)
787                         netdev_rx_csum_fault(skb->dev, skb);
788         }
789 
790         NAPI_GRO_CB(skb)->csum = wsum;
791         NAPI_GRO_CB(skb)->csum_valid = 1;
792 
793         return sum;
794 }
795 EXPORT_SYMBOL(__skb_gro_checksum_complete);
796 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php