~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/xfrm/xfrm_device.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * xfrm_device.c - IPsec device offloading code.
  4  *
  5  * Copyright (c) 2015 secunet Security Networks AG
  6  *
  7  * Author:
  8  * Steffen Klassert <steffen.klassert@secunet.com>
  9  */
 10 
 11 #include <linux/errno.h>
 12 #include <linux/module.h>
 13 #include <linux/netdevice.h>
 14 #include <linux/skbuff.h>
 15 #include <linux/slab.h>
 16 #include <linux/spinlock.h>
 17 #include <net/dst.h>
 18 #include <net/gso.h>
 19 #include <net/xfrm.h>
 20 #include <linux/notifier.h>
 21 
 22 #ifdef CONFIG_XFRM_OFFLOAD
 23 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
 24                                   unsigned int hsize)
 25 {
 26         struct xfrm_offload *xo = xfrm_offload(skb);
 27 
 28         skb_reset_mac_len(skb);
 29         if (xo->flags & XFRM_GSO_SEGMENT)
 30                 skb->transport_header -= x->props.header_len;
 31 
 32         pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
 33 }
 34 
 35 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
 36                                     unsigned int hsize)
 37 
 38 {
 39         struct xfrm_offload *xo = xfrm_offload(skb);
 40 
 41         if (xo->flags & XFRM_GSO_SEGMENT)
 42                 skb->transport_header = skb->network_header + hsize;
 43 
 44         skb_reset_mac_len(skb);
 45         pskb_pull(skb, skb->mac_len + x->props.header_len);
 46 }
 47 
 48 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
 49                                   unsigned int hsize)
 50 {
 51         struct xfrm_offload *xo = xfrm_offload(skb);
 52         int phlen = 0;
 53 
 54         if (xo->flags & XFRM_GSO_SEGMENT)
 55                 skb->transport_header = skb->network_header + hsize;
 56 
 57         skb_reset_mac_len(skb);
 58         if (x->sel.family != AF_INET6) {
 59                 phlen = IPV4_BEET_PHMAXLEN;
 60                 if (x->outer_mode.family == AF_INET6)
 61                         phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
 62         }
 63 
 64         pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
 65 }
 66 
 67 /* Adjust pointers into the packet when IPsec is done at layer2 */
 68 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
 69 {
 70         switch (x->outer_mode.encap) {
 71         case XFRM_MODE_TUNNEL:
 72                 if (x->outer_mode.family == AF_INET)
 73                         return __xfrm_mode_tunnel_prep(x, skb,
 74                                                        sizeof(struct iphdr));
 75                 if (x->outer_mode.family == AF_INET6)
 76                         return __xfrm_mode_tunnel_prep(x, skb,
 77                                                        sizeof(struct ipv6hdr));
 78                 break;
 79         case XFRM_MODE_TRANSPORT:
 80                 if (x->outer_mode.family == AF_INET)
 81                         return __xfrm_transport_prep(x, skb,
 82                                                      sizeof(struct iphdr));
 83                 if (x->outer_mode.family == AF_INET6)
 84                         return __xfrm_transport_prep(x, skb,
 85                                                      sizeof(struct ipv6hdr));
 86                 break;
 87         case XFRM_MODE_BEET:
 88                 if (x->outer_mode.family == AF_INET)
 89                         return __xfrm_mode_beet_prep(x, skb,
 90                                                      sizeof(struct iphdr));
 91                 if (x->outer_mode.family == AF_INET6)
 92                         return __xfrm_mode_beet_prep(x, skb,
 93                                                      sizeof(struct ipv6hdr));
 94                 break;
 95         case XFRM_MODE_ROUTEOPTIMIZATION:
 96         case XFRM_MODE_IN_TRIGGER:
 97                 break;
 98         }
 99 }
100 
101 static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
102 {
103         struct xfrm_offload *xo = xfrm_offload(skb);
104         __u32 seq = xo->seq.low;
105 
106         seq += skb_shinfo(skb)->gso_segs;
107         if (unlikely(seq < xo->seq.low))
108                 return true;
109 
110         return false;
111 }
112 
113 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
114 {
115         int err;
116         unsigned long flags;
117         struct xfrm_state *x;
118         struct softnet_data *sd;
119         struct sk_buff *skb2, *nskb, *pskb = NULL;
120         netdev_features_t esp_features = features;
121         struct xfrm_offload *xo = xfrm_offload(skb);
122         struct net_device *dev = skb->dev;
123         struct sec_path *sp;
124 
125         if (!xo || (xo->flags & XFRM_XMIT))
126                 return skb;
127 
128         if (!(features & NETIF_F_HW_ESP))
129                 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
130 
131         sp = skb_sec_path(skb);
132         x = sp->xvec[sp->len - 1];
133         if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
134                 return skb;
135 
136         /* The packet was sent to HW IPsec packet offload engine,
137          * but to wrong device. Drop the packet, so it won't skip
138          * XFRM stack.
139          */
140         if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
141                 kfree_skb(skb);
142                 dev_core_stats_tx_dropped_inc(dev);
143                 return NULL;
144         }
145 
146         /* This skb was already validated on the upper/virtual dev */
147         if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
148                 return skb;
149 
150         local_irq_save(flags);
151         sd = this_cpu_ptr(&softnet_data);
152         err = !skb_queue_empty(&sd->xfrm_backlog);
153         local_irq_restore(flags);
154 
155         if (err) {
156                 *again = true;
157                 return skb;
158         }
159 
160         if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
161                                 unlikely(xmit_xfrm_check_overflow(skb)))) {
162                 struct sk_buff *segs;
163 
164                 /* Packet got rerouted, fixup features and segment it. */
165                 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
166 
167                 segs = skb_gso_segment(skb, esp_features);
168                 if (IS_ERR(segs)) {
169                         kfree_skb(skb);
170                         dev_core_stats_tx_dropped_inc(dev);
171                         return NULL;
172                 } else {
173                         consume_skb(skb);
174                         skb = segs;
175                 }
176         }
177 
178         if (!skb->next) {
179                 esp_features |= skb->dev->gso_partial_features;
180                 xfrm_outer_mode_prep(x, skb);
181 
182                 xo->flags |= XFRM_DEV_RESUME;
183 
184                 err = x->type_offload->xmit(x, skb, esp_features);
185                 if (err) {
186                         if (err == -EINPROGRESS)
187                                 return NULL;
188 
189                         XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
190                         kfree_skb(skb);
191                         return NULL;
192                 }
193 
194                 skb_push(skb, skb->data - skb_mac_header(skb));
195 
196                 return skb;
197         }
198 
199         skb_list_walk_safe(skb, skb2, nskb) {
200                 esp_features |= skb->dev->gso_partial_features;
201                 skb_mark_not_on_list(skb2);
202 
203                 xo = xfrm_offload(skb2);
204                 xo->flags |= XFRM_DEV_RESUME;
205 
206                 xfrm_outer_mode_prep(x, skb2);
207 
208                 err = x->type_offload->xmit(x, skb2, esp_features);
209                 if (!err) {
210                         skb2->next = nskb;
211                 } else if (err != -EINPROGRESS) {
212                         XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
213                         skb2->next = nskb;
214                         kfree_skb_list(skb2);
215                         return NULL;
216                 } else {
217                         if (skb == skb2)
218                                 skb = nskb;
219                         else
220                                 pskb->next = nskb;
221 
222                         continue;
223                 }
224 
225                 skb_push(skb2, skb2->data - skb_mac_header(skb2));
226                 pskb = skb2;
227         }
228 
229         return skb;
230 }
231 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
232 
233 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
234                        struct xfrm_user_offload *xuo,
235                        struct netlink_ext_ack *extack)
236 {
237         int err;
238         struct dst_entry *dst;
239         struct net_device *dev;
240         struct xfrm_dev_offload *xso = &x->xso;
241         xfrm_address_t *saddr;
242         xfrm_address_t *daddr;
243         bool is_packet_offload;
244 
245         if (!x->type_offload) {
246                 NL_SET_ERR_MSG(extack, "Type doesn't support offload");
247                 return -EINVAL;
248         }
249 
250         if (xuo->flags &
251             ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
252                 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
253                 return -EINVAL;
254         }
255 
256         if ((xuo->flags & XFRM_OFFLOAD_INBOUND && x->dir == XFRM_SA_DIR_OUT) ||
257             (!(xuo->flags & XFRM_OFFLOAD_INBOUND) && x->dir == XFRM_SA_DIR_IN)) {
258                 NL_SET_ERR_MSG(extack, "Mismatched SA and offload direction");
259                 return -EINVAL;
260         }
261 
262         is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
263 
264         /* We don't yet support TFC padding. */
265         if (x->tfcpad) {
266                 NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded");
267                 return -EINVAL;
268         }
269 
270         dev = dev_get_by_index(net, xuo->ifindex);
271         if (!dev) {
272                 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
273                         saddr = &x->props.saddr;
274                         daddr = &x->id.daddr;
275                 } else {
276                         saddr = &x->id.daddr;
277                         daddr = &x->props.saddr;
278                 }
279 
280                 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
281                                         x->props.family,
282                                         xfrm_smark_get(0, x));
283                 if (IS_ERR(dst))
284                         return (is_packet_offload) ? -EINVAL : 0;
285 
286                 dev = dst->dev;
287 
288                 dev_hold(dev);
289                 dst_release(dst);
290         }
291 
292         if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
293                 xso->dev = NULL;
294                 dev_put(dev);
295                 return (is_packet_offload) ? -EINVAL : 0;
296         }
297 
298         if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN &&
299             !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
300                 NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
301                 xso->dev = NULL;
302                 dev_put(dev);
303                 return -EINVAL;
304         }
305 
306         xso->dev = dev;
307         netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
308         xso->real_dev = dev;
309 
310         if (xuo->flags & XFRM_OFFLOAD_INBOUND)
311                 xso->dir = XFRM_DEV_OFFLOAD_IN;
312         else
313                 xso->dir = XFRM_DEV_OFFLOAD_OUT;
314 
315         if (is_packet_offload)
316                 xso->type = XFRM_DEV_OFFLOAD_PACKET;
317         else
318                 xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
319 
320         err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack);
321         if (err) {
322                 xso->dev = NULL;
323                 xso->dir = 0;
324                 xso->real_dev = NULL;
325                 netdev_put(dev, &xso->dev_tracker);
326                 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
327 
328                 /* User explicitly requested packet offload mode and configured
329                  * policy in addition to the XFRM state. So be civil to users,
330                  * and return an error instead of taking fallback path.
331                  *
332                  * This WARN_ON() can be seen as a documentation for driver
333                  * authors to do not return -EOPNOTSUPP in packet offload mode.
334                  */
335                 WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
336                 if (err != -EOPNOTSUPP || is_packet_offload) {
337                         NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
338                         return err;
339                 }
340         }
341 
342         return 0;
343 }
344 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
345 
346 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
347                         struct xfrm_user_offload *xuo, u8 dir,
348                         struct netlink_ext_ack *extack)
349 {
350         struct xfrm_dev_offload *xdo = &xp->xdo;
351         struct net_device *dev;
352         int err;
353 
354         if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
355                 /* We support only packet offload mode and it means
356                  * that user must set XFRM_OFFLOAD_PACKET bit.
357                  */
358                 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
359                 return -EINVAL;
360         }
361 
362         dev = dev_get_by_index(net, xuo->ifindex);
363         if (!dev)
364                 return -EINVAL;
365 
366         if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
367                 xdo->dev = NULL;
368                 dev_put(dev);
369                 NL_SET_ERR_MSG(extack, "Policy offload is not supported");
370                 return -EINVAL;
371         }
372 
373         xdo->dev = dev;
374         netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
375         xdo->real_dev = dev;
376         xdo->type = XFRM_DEV_OFFLOAD_PACKET;
377         switch (dir) {
378         case XFRM_POLICY_IN:
379                 xdo->dir = XFRM_DEV_OFFLOAD_IN;
380                 break;
381         case XFRM_POLICY_OUT:
382                 xdo->dir = XFRM_DEV_OFFLOAD_OUT;
383                 break;
384         case XFRM_POLICY_FWD:
385                 xdo->dir = XFRM_DEV_OFFLOAD_FWD;
386                 break;
387         default:
388                 xdo->dev = NULL;
389                 netdev_put(dev, &xdo->dev_tracker);
390                 NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
391                 return -EINVAL;
392         }
393 
394         err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack);
395         if (err) {
396                 xdo->dev = NULL;
397                 xdo->real_dev = NULL;
398                 xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
399                 xdo->dir = 0;
400                 netdev_put(dev, &xdo->dev_tracker);
401                 NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy");
402                 return err;
403         }
404 
405         return 0;
406 }
407 EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
408 
409 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
410 {
411         int mtu;
412         struct dst_entry *dst = skb_dst(skb);
413         struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
414         struct net_device *dev = x->xso.dev;
415 
416         if (!x->type_offload ||
417             (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
418                 return false;
419 
420         if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
421             ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
422              !xdst->child->xfrm)) {
423                 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
424                 if (skb->len <= mtu)
425                         goto ok;
426 
427                 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
428                         goto ok;
429         }
430 
431         return false;
432 
433 ok:
434         if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
435                 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
436 
437         return true;
438 }
439 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
440 
441 void xfrm_dev_resume(struct sk_buff *skb)
442 {
443         struct net_device *dev = skb->dev;
444         int ret = NETDEV_TX_BUSY;
445         struct netdev_queue *txq;
446         struct softnet_data *sd;
447         unsigned long flags;
448 
449         rcu_read_lock();
450         txq = netdev_core_pick_tx(dev, skb, NULL);
451 
452         HARD_TX_LOCK(dev, txq, smp_processor_id());
453         if (!netif_xmit_frozen_or_stopped(txq))
454                 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
455         HARD_TX_UNLOCK(dev, txq);
456 
457         if (!dev_xmit_complete(ret)) {
458                 local_irq_save(flags);
459                 sd = this_cpu_ptr(&softnet_data);
460                 skb_queue_tail(&sd->xfrm_backlog, skb);
461                 raise_softirq_irqoff(NET_TX_SOFTIRQ);
462                 local_irq_restore(flags);
463         }
464         rcu_read_unlock();
465 }
466 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
467 
468 void xfrm_dev_backlog(struct softnet_data *sd)
469 {
470         struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
471         struct sk_buff_head list;
472         struct sk_buff *skb;
473 
474         if (skb_queue_empty(xfrm_backlog))
475                 return;
476 
477         __skb_queue_head_init(&list);
478 
479         spin_lock(&xfrm_backlog->lock);
480         skb_queue_splice_init(xfrm_backlog, &list);
481         spin_unlock(&xfrm_backlog->lock);
482 
483         while (!skb_queue_empty(&list)) {
484                 skb = __skb_dequeue(&list);
485                 xfrm_dev_resume(skb);
486         }
487 
488 }
489 #endif
490 
491 static int xfrm_api_check(struct net_device *dev)
492 {
493 #ifdef CONFIG_XFRM_OFFLOAD
494         if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
495             !(dev->features & NETIF_F_HW_ESP))
496                 return NOTIFY_BAD;
497 
498         if ((dev->features & NETIF_F_HW_ESP) &&
499             (!(dev->xfrmdev_ops &&
500                dev->xfrmdev_ops->xdo_dev_state_add &&
501                dev->xfrmdev_ops->xdo_dev_state_delete)))
502                 return NOTIFY_BAD;
503 #else
504         if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
505                 return NOTIFY_BAD;
506 #endif
507 
508         return NOTIFY_DONE;
509 }
510 
511 static int xfrm_dev_down(struct net_device *dev)
512 {
513         if (dev->features & NETIF_F_HW_ESP) {
514                 xfrm_dev_state_flush(dev_net(dev), dev, true);
515                 xfrm_dev_policy_flush(dev_net(dev), dev, true);
516         }
517 
518         return NOTIFY_DONE;
519 }
520 
521 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
522 {
523         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
524 
525         switch (event) {
526         case NETDEV_REGISTER:
527                 return xfrm_api_check(dev);
528 
529         case NETDEV_FEAT_CHANGE:
530                 return xfrm_api_check(dev);
531 
532         case NETDEV_DOWN:
533         case NETDEV_UNREGISTER:
534                 return xfrm_dev_down(dev);
535         }
536         return NOTIFY_DONE;
537 }
538 
539 static struct notifier_block xfrm_dev_notifier = {
540         .notifier_call  = xfrm_dev_event,
541 };
542 
543 void __init xfrm_dev_init(void)
544 {
545         register_netdevice_notifier(&xfrm_dev_notifier);
546 }
547 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php