~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/ipv4/esp4_offload.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * IPV4 GSO/GRO offload support
  4  * Linux INET implementation
  5  *
  6  * Copyright (C) 2016 secunet Security Networks AG
  7  * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8  *
  9  * ESP GRO support
 10  */
 11 
 12 #include <linux/skbuff.h>
 13 #include <linux/init.h>
 14 #include <net/protocol.h>
 15 #include <crypto/aead.h>
 16 #include <crypto/authenc.h>
 17 #include <linux/err.h>
 18 #include <linux/module.h>
 19 #include <net/gro.h>
 20 #include <net/gso.h>
 21 #include <net/ip.h>
 22 #include <net/xfrm.h>
 23 #include <net/esp.h>
 24 #include <linux/scatterlist.h>
 25 #include <linux/kernel.h>
 26 #include <linux/slab.h>
 27 #include <linux/spinlock.h>
 28 #include <net/udp.h>
 29 
 30 static struct sk_buff *esp4_gro_receive(struct list_head *head,
 31                                         struct sk_buff *skb)
 32 {
 33         int offset = skb_gro_offset(skb);
 34         struct xfrm_offload *xo;
 35         struct xfrm_state *x;
 36         int encap_type = 0;
 37         __be32 seq;
 38         __be32 spi;
 39 
 40         if (!pskb_pull(skb, offset))
 41                 return NULL;
 42 
 43         if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
 44                 goto out;
 45 
 46         xo = xfrm_offload(skb);
 47         if (!xo || !(xo->flags & CRYPTO_DONE)) {
 48                 struct sec_path *sp = secpath_set(skb);
 49 
 50                 if (!sp)
 51                         goto out;
 52 
 53                 if (sp->len == XFRM_MAX_DEPTH)
 54                         goto out_reset;
 55 
 56                 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 57                                       (xfrm_address_t *)&ip_hdr(skb)->daddr,
 58                                       spi, IPPROTO_ESP, AF_INET);
 59 
 60                 if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
 61                         /* non-offload path will record the error and audit log */
 62                         xfrm_state_put(x);
 63                         x = NULL;
 64                 }
 65 
 66                 if (!x)
 67                         goto out_reset;
 68 
 69                 skb->mark = xfrm_smark_get(skb->mark, x);
 70 
 71                 sp->xvec[sp->len++] = x;
 72                 sp->olen++;
 73 
 74                 xo = xfrm_offload(skb);
 75                 if (!xo)
 76                         goto out_reset;
 77         }
 78 
 79         xo->flags |= XFRM_GRO;
 80 
 81         if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
 82                 encap_type = UDP_ENCAP_ESPINUDP;
 83 
 84         XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 85         XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 86         XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 87         XFRM_SPI_SKB_CB(skb)->seq = seq;
 88 
 89         /* We don't need to handle errors from xfrm_input, it does all
 90          * the error handling and frees the resources on error. */
 91         xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
 92 
 93         return ERR_PTR(-EINPROGRESS);
 94 out_reset:
 95         secpath_reset(skb);
 96 out:
 97         skb_push(skb, offset);
 98         NAPI_GRO_CB(skb)->same_flow = 0;
 99         NAPI_GRO_CB(skb)->flush = 1;
100 
101         return NULL;
102 }
103 
104 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
105 {
106         struct ip_esp_hdr *esph;
107         struct iphdr *iph = ip_hdr(skb);
108         struct xfrm_offload *xo = xfrm_offload(skb);
109         int proto = iph->protocol;
110 
111         skb_push(skb, -skb_network_offset(skb));
112         esph = ip_esp_hdr(skb);
113         *skb_mac_header(skb) = IPPROTO_ESP;
114 
115         esph->spi = x->id.spi;
116         esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
117 
118         xo->proto = proto;
119 }
120 
121 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
122                                                 struct sk_buff *skb,
123                                                 netdev_features_t features)
124 {
125         __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
126                                                        : htons(ETH_P_IP);
127 
128         return skb_eth_gso_segment(skb, features, type);
129 }
130 
131 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
132                                                    struct sk_buff *skb,
133                                                    netdev_features_t features)
134 {
135         const struct net_offload *ops;
136         struct sk_buff *segs = ERR_PTR(-EINVAL);
137         struct xfrm_offload *xo = xfrm_offload(skb);
138 
139         skb->transport_header += x->props.header_len;
140         ops = rcu_dereference(inet_offloads[xo->proto]);
141         if (likely(ops && ops->callbacks.gso_segment))
142                 segs = ops->callbacks.gso_segment(skb, features);
143 
144         return segs;
145 }
146 
147 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
148                                               struct sk_buff *skb,
149                                               netdev_features_t features)
150 {
151         struct xfrm_offload *xo = xfrm_offload(skb);
152         struct sk_buff *segs = ERR_PTR(-EINVAL);
153         const struct net_offload *ops;
154         u8 proto = xo->proto;
155 
156         skb->transport_header += x->props.header_len;
157 
158         if (x->sel.family != AF_INET6) {
159                 if (proto == IPPROTO_BEETPH) {
160                         struct ip_beet_phdr *ph =
161                                 (struct ip_beet_phdr *)skb->data;
162 
163                         skb->transport_header += ph->hdrlen * 8;
164                         proto = ph->nexthdr;
165                 } else {
166                         skb->transport_header -= IPV4_BEET_PHMAXLEN;
167                 }
168         } else {
169                 __be16 frag;
170 
171                 skb->transport_header +=
172                         ipv6_skip_exthdr(skb, 0, &proto, &frag);
173                 if (proto == IPPROTO_TCP)
174                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
175         }
176 
177         if (proto == IPPROTO_IPV6)
178                 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
179 
180         __skb_pull(skb, skb_transport_offset(skb));
181         ops = rcu_dereference(inet_offloads[proto]);
182         if (likely(ops && ops->callbacks.gso_segment))
183                 segs = ops->callbacks.gso_segment(skb, features);
184 
185         return segs;
186 }
187 
188 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
189                                                     struct sk_buff *skb,
190                                                     netdev_features_t features)
191 {
192         switch (x->outer_mode.encap) {
193         case XFRM_MODE_TUNNEL:
194                 return xfrm4_tunnel_gso_segment(x, skb, features);
195         case XFRM_MODE_TRANSPORT:
196                 return xfrm4_transport_gso_segment(x, skb, features);
197         case XFRM_MODE_BEET:
198                 return xfrm4_beet_gso_segment(x, skb, features);
199         }
200 
201         return ERR_PTR(-EOPNOTSUPP);
202 }
203 
204 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
205                                         netdev_features_t features)
206 {
207         struct xfrm_state *x;
208         struct ip_esp_hdr *esph;
209         struct crypto_aead *aead;
210         netdev_features_t esp_features = features;
211         struct xfrm_offload *xo = xfrm_offload(skb);
212         struct sec_path *sp;
213 
214         if (!xo)
215                 return ERR_PTR(-EINVAL);
216 
217         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
218                 return ERR_PTR(-EINVAL);
219 
220         sp = skb_sec_path(skb);
221         x = sp->xvec[sp->len - 1];
222         aead = x->data;
223         esph = ip_esp_hdr(skb);
224 
225         if (esph->spi != x->id.spi)
226                 return ERR_PTR(-EINVAL);
227 
228         if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
229                 return ERR_PTR(-EINVAL);
230 
231         __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
232 
233         skb->encap_hdr_csum = 1;
234 
235         if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
236              !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
237                 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
238                                             NETIF_F_SCTP_CRC);
239         else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
240                  !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
241                 esp_features = features & ~(NETIF_F_CSUM_MASK |
242                                             NETIF_F_SCTP_CRC);
243 
244         xo->flags |= XFRM_GSO_SEGMENT;
245 
246         return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
247 }
248 
249 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
250 {
251         struct crypto_aead *aead = x->data;
252         struct xfrm_offload *xo = xfrm_offload(skb);
253 
254         if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
255                 return -EINVAL;
256 
257         if (!(xo->flags & CRYPTO_DONE))
258                 skb->ip_summed = CHECKSUM_NONE;
259 
260         return esp_input_done2(skb, 0);
261 }
262 
263 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
264 {
265         int err;
266         int alen;
267         int blksize;
268         struct xfrm_offload *xo;
269         struct ip_esp_hdr *esph;
270         struct crypto_aead *aead;
271         struct esp_info esp;
272         bool hw_offload = true;
273         __u32 seq;
274         int encap_type = 0;
275 
276         esp.inplace = true;
277 
278         xo = xfrm_offload(skb);
279 
280         if (!xo)
281                 return -EINVAL;
282 
283         if ((!(features & NETIF_F_HW_ESP) &&
284              !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
285             x->xso.dev != skb->dev) {
286                 xo->flags |= CRYPTO_FALLBACK;
287                 hw_offload = false;
288         }
289 
290         esp.proto = xo->proto;
291 
292         /* skb is pure payload to encrypt */
293 
294         aead = x->data;
295         alen = crypto_aead_authsize(aead);
296 
297         esp.tfclen = 0;
298         /* XXX: Add support for tfc padding here. */
299 
300         blksize = ALIGN(crypto_aead_blocksize(aead), 4);
301         esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
302         esp.plen = esp.clen - skb->len - esp.tfclen;
303         esp.tailen = esp.tfclen + esp.plen + alen;
304 
305         esp.esph = ip_esp_hdr(skb);
306 
307         if (x->encap)
308                 encap_type = x->encap->encap_type;
309 
310         if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
311                 esp.nfrags = esp_output_head(x, skb, &esp);
312                 if (esp.nfrags < 0)
313                         return esp.nfrags;
314         }
315 
316         seq = xo->seq.low;
317 
318         esph = esp.esph;
319         esph->spi = x->id.spi;
320 
321         skb_push(skb, -skb_network_offset(skb));
322 
323         if (xo->flags & XFRM_GSO_SEGMENT) {
324                 esph->seq_no = htonl(seq);
325 
326                 if (!skb_is_gso(skb))
327                         xo->seq.low++;
328                 else
329                         xo->seq.low += skb_shinfo(skb)->gso_segs;
330         }
331 
332         if (xo->seq.low < seq)
333                 xo->seq.hi++;
334 
335         esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
336 
337         if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
338                 /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
339                  * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
340                  * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
341                  * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
342                  * Therefore, the protocol field needs to be corrected.
343                  */
344                 ip_hdr(skb)->protocol = IPPROTO_UDP;
345 
346                 esph->seq_no = htonl(seq);
347         }
348 
349         ip_hdr(skb)->tot_len = htons(skb->len);
350         ip_send_check(ip_hdr(skb));
351 
352         if (hw_offload) {
353                 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
354                         return -ENOMEM;
355 
356                 xo = xfrm_offload(skb);
357                 if (!xo)
358                         return -EINVAL;
359 
360                 xo->flags |= XFRM_XMIT;
361                 return 0;
362         }
363 
364         err = esp_output_tail(x, skb, &esp);
365         if (err)
366                 return err;
367 
368         secpath_reset(skb);
369 
370         if (skb_needs_linearize(skb, skb->dev->features) &&
371             __skb_linearize(skb))
372                 return -ENOMEM;
373         return 0;
374 }
375 
376 static const struct net_offload esp4_offload = {
377         .callbacks = {
378                 .gro_receive = esp4_gro_receive,
379                 .gso_segment = esp4_gso_segment,
380         },
381 };
382 
383 static const struct xfrm_type_offload esp_type_offload = {
384         .owner          = THIS_MODULE,
385         .proto          = IPPROTO_ESP,
386         .input_tail     = esp_input_tail,
387         .xmit           = esp_xmit,
388         .encap          = esp4_gso_encap,
389 };
390 
391 static int __init esp4_offload_init(void)
392 {
393         if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
394                 pr_info("%s: can't add xfrm type offload\n", __func__);
395                 return -EAGAIN;
396         }
397 
398         return inet_add_offload(&esp4_offload, IPPROTO_ESP);
399 }
400 
401 static void __exit esp4_offload_exit(void)
402 {
403         xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
404         inet_del_offload(&esp4_offload, IPPROTO_ESP);
405 }
406 
407 module_init(esp4_offload_init);
408 module_exit(esp4_offload_exit);
409 MODULE_LICENSE("GPL");
410 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
411 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
412 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
413 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php