~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/l2tp/l2tp_core.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /* L2TP core.
  3  *
  4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  5  *
  6  * This file contains some code of the original L2TPv2 pppol2tp
  7  * driver, which has the following copyright:
  8  *
  9  * Authors:     Martijn van Oosterhout <kleptog@svana.org>
 10  *              James Chapman (jchapman@katalix.com)
 11  * Contributors:
 12  *              Michal Ostrowski <mostrows@speakeasy.net>
 13  *              Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
 14  *              David S. Miller (davem@redhat.com)
 15  */
 16 
 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 18 
 19 #include <linux/module.h>
 20 #include <linux/string.h>
 21 #include <linux/list.h>
 22 #include <linux/rculist.h>
 23 #include <linux/uaccess.h>
 24 
 25 #include <linux/kernel.h>
 26 #include <linux/spinlock.h>
 27 #include <linux/kthread.h>
 28 #include <linux/sched.h>
 29 #include <linux/slab.h>
 30 #include <linux/errno.h>
 31 #include <linux/jiffies.h>
 32 
 33 #include <linux/netdevice.h>
 34 #include <linux/net.h>
 35 #include <linux/inetdevice.h>
 36 #include <linux/skbuff.h>
 37 #include <linux/init.h>
 38 #include <linux/in.h>
 39 #include <linux/ip.h>
 40 #include <linux/udp.h>
 41 #include <linux/l2tp.h>
 42 #include <linux/sort.h>
 43 #include <linux/file.h>
 44 #include <linux/nsproxy.h>
 45 #include <net/net_namespace.h>
 46 #include <net/netns/generic.h>
 47 #include <net/dst.h>
 48 #include <net/ip.h>
 49 #include <net/udp.h>
 50 #include <net/udp_tunnel.h>
 51 #include <net/inet_common.h>
 52 #include <net/xfrm.h>
 53 #include <net/protocol.h>
 54 #include <net/inet6_connection_sock.h>
 55 #include <net/inet_ecn.h>
 56 #include <net/ip6_route.h>
 57 #include <net/ip6_checksum.h>
 58 
 59 #include <asm/byteorder.h>
 60 #include <linux/atomic.h>
 61 
 62 #include "l2tp_core.h"
 63 
 64 #define CREATE_TRACE_POINTS
 65 #include "trace.h"
 66 
 67 #define L2TP_DRV_VERSION        "V2.0"
 68 
 69 /* L2TP header constants */
 70 #define L2TP_HDRFLAG_T     0x8000
 71 #define L2TP_HDRFLAG_L     0x4000
 72 #define L2TP_HDRFLAG_S     0x0800
 73 #define L2TP_HDRFLAG_O     0x0200
 74 #define L2TP_HDRFLAG_P     0x0100
 75 
 76 #define L2TP_HDR_VER_MASK  0x000F
 77 #define L2TP_HDR_VER_2     0x0002
 78 #define L2TP_HDR_VER_3     0x0003
 79 
 80 /* L2TPv3 default L2-specific sublayer */
 81 #define L2TP_SLFLAG_S      0x40000000
 82 #define L2TP_SL_SEQ_MASK   0x00ffffff
 83 
 84 #define L2TP_HDR_SIZE_MAX               14
 85 
 86 /* Default trace flags */
 87 #define L2TP_DEFAULT_DEBUG_FLAGS        0
 88 
 89 #define L2TP_DEPTH_NESTING              2
 90 #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
 91 #error "L2TP requires its own lockdep subclass"
 92 #endif
 93 
 94 /* Private data stored for received packets in the skb.
 95  */
 96 struct l2tp_skb_cb {
 97         u32                     ns;
 98         u16                     has_seq;
 99         u16                     length;
100         unsigned long           expires;
101 };
102 
103 #define L2TP_SKB_CB(skb)        ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
104 
105 static struct workqueue_struct *l2tp_wq;
106 
107 /* per-net private data for this module */
108 static unsigned int l2tp_net_id;
109 struct l2tp_net {
110         /* Lock for write access to l2tp_tunnel_idr */
111         spinlock_t l2tp_tunnel_idr_lock;
112         struct idr l2tp_tunnel_idr;
113         /* Lock for write access to l2tp_v[23]_session_idr/htable */
114         spinlock_t l2tp_session_idr_lock;
115         struct idr l2tp_v2_session_idr;
116         struct idr l2tp_v3_session_idr;
117         struct hlist_head l2tp_v3_session_htable[16];
118 };
119 
120 static inline u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
121 {
122         return ((u32)tunnel_id) << 16 | session_id;
123 }
124 
125 static inline unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
126 {
127         return ((unsigned long)sk) + session_id;
128 }
129 
130 #if IS_ENABLED(CONFIG_IPV6)
131 static bool l2tp_sk_is_v6(struct sock *sk)
132 {
133         return sk->sk_family == PF_INET6 &&
134                !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
135 }
136 #endif
137 
138 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
139 {
140         return net_generic(net, l2tp_net_id);
141 }
142 
143 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
144 {
145         trace_free_tunnel(tunnel);
146         sock_put(tunnel->sock);
147         /* the tunnel is freed in the socket destructor */
148 }
149 
150 static void l2tp_session_free(struct l2tp_session *session)
151 {
152         trace_free_session(session);
153         if (session->tunnel)
154                 l2tp_tunnel_dec_refcount(session->tunnel);
155         kfree_rcu(session, rcu);
156 }
157 
158 struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
159 {
160         struct l2tp_tunnel *tunnel = sk->sk_user_data;
161 
162         if (tunnel)
163                 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
164                         return NULL;
165 
166         return tunnel;
167 }
168 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
169 
170 void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
171 {
172         refcount_inc(&tunnel->ref_count);
173 }
174 EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
175 
176 void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
177 {
178         if (refcount_dec_and_test(&tunnel->ref_count))
179                 l2tp_tunnel_free(tunnel);
180 }
181 EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
182 
183 void l2tp_session_inc_refcount(struct l2tp_session *session)
184 {
185         refcount_inc(&session->ref_count);
186 }
187 EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
188 
189 void l2tp_session_dec_refcount(struct l2tp_session *session)
190 {
191         if (refcount_dec_and_test(&session->ref_count))
192                 l2tp_session_free(session);
193 }
194 EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
195 
196 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
197 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
198 {
199         const struct l2tp_net *pn = l2tp_pernet(net);
200         struct l2tp_tunnel *tunnel;
201 
202         rcu_read_lock_bh();
203         tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
204         if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
205                 rcu_read_unlock_bh();
206                 return tunnel;
207         }
208         rcu_read_unlock_bh();
209 
210         return NULL;
211 }
212 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
213 
214 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
215 {
216         struct l2tp_net *pn = l2tp_pernet(net);
217         unsigned long tunnel_id, tmp;
218         struct l2tp_tunnel *tunnel;
219         int count = 0;
220 
221         rcu_read_lock_bh();
222         idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
223                 if (tunnel && ++count > nth &&
224                     refcount_inc_not_zero(&tunnel->ref_count)) {
225                         rcu_read_unlock_bh();
226                         return tunnel;
227                 }
228         }
229         rcu_read_unlock_bh();
230 
231         return NULL;
232 }
233 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
234 
235 struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
236 {
237         const struct l2tp_net *pn = l2tp_pernet(net);
238         struct l2tp_session *session;
239 
240         rcu_read_lock_bh();
241         session = idr_find(&pn->l2tp_v3_session_idr, session_id);
242         if (session && !hash_hashed(&session->hlist) &&
243             refcount_inc_not_zero(&session->ref_count)) {
244                 rcu_read_unlock_bh();
245                 return session;
246         }
247 
248         /* If we get here and session is non-NULL, the session_id
249          * collides with one in another tunnel. If sk is non-NULL,
250          * find the session matching sk.
251          */
252         if (session && sk) {
253                 unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
254 
255                 hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
256                                            hlist, key) {
257                         /* session->tunnel may be NULL if another thread is in
258                          * l2tp_session_register and has added an item to
259                          * l2tp_v3_session_htable but hasn't yet added the
260                          * session to its tunnel's session_list.
261                          */
262                         struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
263 
264                         if (tunnel && tunnel->sock == sk &&
265                             refcount_inc_not_zero(&session->ref_count)) {
266                                 rcu_read_unlock_bh();
267                                 return session;
268                         }
269                 }
270         }
271         rcu_read_unlock_bh();
272 
273         return NULL;
274 }
275 EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
276 
277 struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
278 {
279         u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
280         const struct l2tp_net *pn = l2tp_pernet(net);
281         struct l2tp_session *session;
282 
283         rcu_read_lock_bh();
284         session = idr_find(&pn->l2tp_v2_session_idr, session_key);
285         if (session && refcount_inc_not_zero(&session->ref_count)) {
286                 rcu_read_unlock_bh();
287                 return session;
288         }
289         rcu_read_unlock_bh();
290 
291         return NULL;
292 }
293 EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
294 
295 struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
296                                       u32 tunnel_id, u32 session_id)
297 {
298         if (pver == L2TP_HDR_VER_2)
299                 return l2tp_v2_session_get(net, tunnel_id, session_id);
300         else
301                 return l2tp_v3_session_get(net, sk, session_id);
302 }
303 EXPORT_SYMBOL_GPL(l2tp_session_get);
304 
305 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
306 {
307         struct l2tp_session *session;
308         int count = 0;
309 
310         rcu_read_lock_bh();
311         list_for_each_entry_rcu(session, &tunnel->session_list, list) {
312                 if (++count > nth) {
313                         l2tp_session_inc_refcount(session);
314                         rcu_read_unlock_bh();
315                         return session;
316                 }
317         }
318         rcu_read_unlock_bh();
319 
320         return NULL;
321 }
322 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
323 
324 /* Lookup a session by interface name.
325  * This is very inefficient but is only used by management interfaces.
326  */
327 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
328                                                 const char *ifname)
329 {
330         struct l2tp_net *pn = l2tp_pernet(net);
331         unsigned long tunnel_id, tmp;
332         struct l2tp_session *session;
333         struct l2tp_tunnel *tunnel;
334 
335         rcu_read_lock_bh();
336         idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
337                 if (tunnel) {
338                         list_for_each_entry_rcu(session, &tunnel->session_list, list) {
339                                 if (!strcmp(session->ifname, ifname)) {
340                                         l2tp_session_inc_refcount(session);
341                                         rcu_read_unlock_bh();
342 
343                                         return session;
344                                 }
345                         }
346                 }
347         }
348         rcu_read_unlock_bh();
349 
350         return NULL;
351 }
352 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
353 
354 static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
355                                        struct l2tp_session *session)
356 {
357         l2tp_session_inc_refcount(session);
358         WARN_ON_ONCE(session->coll_list);
359         session->coll_list = clist;
360         spin_lock(&clist->lock);
361         list_add(&session->clist, &clist->list);
362         spin_unlock(&clist->lock);
363 }
364 
365 static int l2tp_session_collision_add(struct l2tp_net *pn,
366                                       struct l2tp_session *session1,
367                                       struct l2tp_session *session2)
368 {
369         struct l2tp_session_coll_list *clist;
370 
371         lockdep_assert_held(&pn->l2tp_session_idr_lock);
372 
373         if (!session2)
374                 return -EEXIST;
375 
376         /* If existing session is in IP-encap tunnel, refuse new session */
377         if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
378                 return -EEXIST;
379 
380         clist = session2->coll_list;
381         if (!clist) {
382                 /* First collision. Allocate list to manage the collided sessions
383                  * and add the existing session to the list.
384                  */
385                 clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
386                 if (!clist)
387                         return -ENOMEM;
388 
389                 spin_lock_init(&clist->lock);
390                 INIT_LIST_HEAD(&clist->list);
391                 refcount_set(&clist->ref_count, 1);
392                 l2tp_session_coll_list_add(clist, session2);
393         }
394 
395         /* If existing session isn't already in the session hlist, add it. */
396         if (!hash_hashed(&session2->hlist))
397                 hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
398                              session2->hlist_key);
399 
400         /* Add new session to the hlist and collision list */
401         hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
402                      session1->hlist_key);
403         refcount_inc(&clist->ref_count);
404         l2tp_session_coll_list_add(clist, session1);
405 
406         return 0;
407 }
408 
409 static void l2tp_session_collision_del(struct l2tp_net *pn,
410                                        struct l2tp_session *session)
411 {
412         struct l2tp_session_coll_list *clist = session->coll_list;
413         unsigned long session_key = session->session_id;
414         struct l2tp_session *session2;
415 
416         lockdep_assert_held(&pn->l2tp_session_idr_lock);
417 
418         hash_del_rcu(&session->hlist);
419 
420         if (clist) {
421                 /* Remove session from its collision list. If there
422                  * are other sessions with the same ID, replace this
423                  * session's IDR entry with that session, otherwise
424                  * remove the IDR entry. If this is the last session,
425                  * the collision list data is freed.
426                  */
427                 spin_lock(&clist->lock);
428                 list_del_init(&session->clist);
429                 session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
430                 if (session2) {
431                         void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
432 
433                         WARN_ON_ONCE(IS_ERR_VALUE(old));
434                 } else {
435                         void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
436 
437                         WARN_ON_ONCE(removed != session);
438                 }
439                 session->coll_list = NULL;
440                 spin_unlock(&clist->lock);
441                 if (refcount_dec_and_test(&clist->ref_count))
442                         kfree(clist);
443                 l2tp_session_dec_refcount(session);
444         }
445 }
446 
447 int l2tp_session_register(struct l2tp_session *session,
448                           struct l2tp_tunnel *tunnel)
449 {
450         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
451         struct l2tp_session *other_session = NULL;
452         u32 session_key;
453         int err;
454 
455         spin_lock_bh(&tunnel->list_lock);
456         spin_lock_bh(&pn->l2tp_session_idr_lock);
457 
458         if (!tunnel->acpt_newsess) {
459                 err = -ENODEV;
460                 goto out;
461         }
462 
463         if (tunnel->version == L2TP_HDR_VER_3) {
464                 session_key = session->session_id;
465                 err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
466                                     &session_key, session_key, GFP_ATOMIC);
467                 /* IP encap expects session IDs to be globally unique, while
468                  * UDP encap doesn't. This isn't per the RFC, which says that
469                  * sessions are identified only by the session ID, but is to
470                  * support existing userspace which depends on it.
471                  */
472                 if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
473                         other_session = idr_find(&pn->l2tp_v3_session_idr,
474                                                  session_key);
475                         err = l2tp_session_collision_add(pn, session,
476                                                          other_session);
477                 }
478         } else {
479                 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
480                                                   session->session_id);
481                 err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
482                                     &session_key, session_key, GFP_ATOMIC);
483         }
484 
485         if (err) {
486                 if (err == -ENOSPC)
487                         err = -EEXIST;
488                 goto out;
489         }
490 
491         l2tp_tunnel_inc_refcount(tunnel);
492         WRITE_ONCE(session->tunnel, tunnel);
493         list_add_rcu(&session->list, &tunnel->session_list);
494 
495         if (tunnel->version == L2TP_HDR_VER_3) {
496                 if (!other_session)
497                         idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
498         } else {
499                 idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
500         }
501 
502 out:
503         spin_unlock_bh(&pn->l2tp_session_idr_lock);
504         spin_unlock_bh(&tunnel->list_lock);
505 
506         if (!err)
507                 trace_register_session(session);
508 
509         return err;
510 }
511 EXPORT_SYMBOL_GPL(l2tp_session_register);
512 
513 /*****************************************************************************
514  * Receive data handling
515  *****************************************************************************/
516 
517 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
518  * number.
519  */
520 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
521 {
522         struct sk_buff *skbp;
523         struct sk_buff *tmp;
524         u32 ns = L2TP_SKB_CB(skb)->ns;
525 
526         spin_lock_bh(&session->reorder_q.lock);
527         skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
528                 if (L2TP_SKB_CB(skbp)->ns > ns) {
529                         __skb_queue_before(&session->reorder_q, skbp, skb);
530                         atomic_long_inc(&session->stats.rx_oos_packets);
531                         goto out;
532                 }
533         }
534 
535         __skb_queue_tail(&session->reorder_q, skb);
536 
537 out:
538         spin_unlock_bh(&session->reorder_q.lock);
539 }
540 
541 /* Dequeue a single skb.
542  */
543 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
544 {
545         struct l2tp_tunnel *tunnel = session->tunnel;
546         int length = L2TP_SKB_CB(skb)->length;
547 
548         /* We're about to requeue the skb, so return resources
549          * to its current owner (a socket receive buffer).
550          */
551         skb_orphan(skb);
552 
553         atomic_long_inc(&tunnel->stats.rx_packets);
554         atomic_long_add(length, &tunnel->stats.rx_bytes);
555         atomic_long_inc(&session->stats.rx_packets);
556         atomic_long_add(length, &session->stats.rx_bytes);
557 
558         if (L2TP_SKB_CB(skb)->has_seq) {
559                 /* Bump our Nr */
560                 session->nr++;
561                 session->nr &= session->nr_max;
562                 trace_session_seqnum_update(session);
563         }
564 
565         /* call private receive handler */
566         if (session->recv_skb)
567                 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
568         else
569                 kfree_skb(skb);
570 }
571 
572 /* Dequeue skbs from the session's reorder_q, subject to packet order.
573  * Skbs that have been in the queue for too long are simply discarded.
574  */
575 static void l2tp_recv_dequeue(struct l2tp_session *session)
576 {
577         struct sk_buff *skb;
578         struct sk_buff *tmp;
579 
580         /* If the pkt at the head of the queue has the nr that we
581          * expect to send up next, dequeue it and any other
582          * in-sequence packets behind it.
583          */
584 start:
585         spin_lock_bh(&session->reorder_q.lock);
586         skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
587                 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
588 
589                 /* If the packet has been pending on the queue for too long, discard it */
590                 if (time_after(jiffies, cb->expires)) {
591                         atomic_long_inc(&session->stats.rx_seq_discards);
592                         atomic_long_inc(&session->stats.rx_errors);
593                         trace_session_pkt_expired(session, cb->ns);
594                         session->reorder_skip = 1;
595                         __skb_unlink(skb, &session->reorder_q);
596                         kfree_skb(skb);
597                         continue;
598                 }
599 
600                 if (cb->has_seq) {
601                         if (session->reorder_skip) {
602                                 session->reorder_skip = 0;
603                                 session->nr = cb->ns;
604                                 trace_session_seqnum_reset(session);
605                         }
606                         if (cb->ns != session->nr)
607                                 goto out;
608                 }
609                 __skb_unlink(skb, &session->reorder_q);
610 
611                 /* Process the skb. We release the queue lock while we
612                  * do so to let other contexts process the queue.
613                  */
614                 spin_unlock_bh(&session->reorder_q.lock);
615                 l2tp_recv_dequeue_skb(session, skb);
616                 goto start;
617         }
618 
619 out:
620         spin_unlock_bh(&session->reorder_q.lock);
621 }
622 
623 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
624 {
625         u32 nws;
626 
627         if (nr >= session->nr)
628                 nws = nr - session->nr;
629         else
630                 nws = (session->nr_max + 1) - (session->nr - nr);
631 
632         return nws < session->nr_window_size;
633 }
634 
635 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
636  * acceptable, else non-zero.
637  */
638 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
639 {
640         struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
641 
642         if (!l2tp_seq_check_rx_window(session, cb->ns)) {
643                 /* Packet sequence number is outside allowed window.
644                  * Discard it.
645                  */
646                 trace_session_pkt_outside_rx_window(session, cb->ns);
647                 goto discard;
648         }
649 
650         if (session->reorder_timeout != 0) {
651                 /* Packet reordering enabled. Add skb to session's
652                  * reorder queue, in order of ns.
653                  */
654                 l2tp_recv_queue_skb(session, skb);
655                 goto out;
656         }
657 
658         /* Packet reordering disabled. Discard out-of-sequence packets, while
659          * tracking the number if in-sequence packets after the first OOS packet
660          * is seen. After nr_oos_count_max in-sequence packets, reset the
661          * sequence number to re-enable packet reception.
662          */
663         if (cb->ns == session->nr) {
664                 skb_queue_tail(&session->reorder_q, skb);
665         } else {
666                 u32 nr_oos = cb->ns;
667                 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
668 
669                 if (nr_oos == nr_next)
670                         session->nr_oos_count++;
671                 else
672                         session->nr_oos_count = 0;
673 
674                 session->nr_oos = nr_oos;
675                 if (session->nr_oos_count > session->nr_oos_count_max) {
676                         session->reorder_skip = 1;
677                 }
678                 if (!session->reorder_skip) {
679                         atomic_long_inc(&session->stats.rx_seq_discards);
680                         trace_session_pkt_oos(session, cb->ns);
681                         goto discard;
682                 }
683                 skb_queue_tail(&session->reorder_q, skb);
684         }
685 
686 out:
687         return 0;
688 
689 discard:
690         return 1;
691 }
692 
693 /* Do receive processing of L2TP data frames. We handle both L2TPv2
694  * and L2TPv3 data frames here.
695  *
696  * L2TPv2 Data Message Header
697  *
698  *  0                   1                   2                   3
699  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
700  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
701  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
702  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
703  * |           Tunnel ID           |           Session ID          |
704  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
705  * |             Ns (opt)          |             Nr (opt)          |
706  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
707  * |      Offset Size (opt)        |    Offset pad... (opt)
708  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
709  *
710  * Data frames are marked by T=0. All other fields are the same as
711  * those in L2TP control frames.
712  *
713  * L2TPv3 Data Message Header
714  *
715  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
716  * |                      L2TP Session Header                      |
717  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
718  * |                      L2-Specific Sublayer                     |
719  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
720  * |                        Tunnel Payload                      ...
721  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
722  *
723  * L2TPv3 Session Header Over IP
724  *
725  *  0                   1                   2                   3
726  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
727  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
728  * |                           Session ID                          |
729  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
730  * |               Cookie (optional, maximum 64 bits)...
731  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
732  *                                                                 |
733  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
734  *
735  * L2TPv3 L2-Specific Sublayer Format
736  *
737  *  0                   1                   2                   3
738  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
739  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
740  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
741  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
742  *
743  * Cookie value and sublayer format are negotiated with the peer when
744  * the session is set up. Unlike L2TPv2, we do not need to parse the
745  * packet header to determine if optional fields are present.
746  *
747  * Caller must already have parsed the frame and determined that it is
748  * a data (not control) frame before coming here. Fields up to the
749  * session-id have already been parsed and ptr points to the data
750  * after the session-id.
751  */
752 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
753                       unsigned char *ptr, unsigned char *optr, u16 hdrflags,
754                       int length)
755 {
756         struct l2tp_tunnel *tunnel = session->tunnel;
757         int offset;
758 
759         /* Parse and check optional cookie */
760         if (session->peer_cookie_len > 0) {
761                 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
762                         pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
763                                              tunnel->name, tunnel->tunnel_id,
764                                              session->session_id);
765                         atomic_long_inc(&session->stats.rx_cookie_discards);
766                         goto discard;
767                 }
768                 ptr += session->peer_cookie_len;
769         }
770 
771         /* Handle the optional sequence numbers. Sequence numbers are
772          * in different places for L2TPv2 and L2TPv3.
773          *
774          * If we are the LAC, enable/disable sequence numbers under
775          * the control of the LNS.  If no sequence numbers present but
776          * we were expecting them, discard frame.
777          */
778         L2TP_SKB_CB(skb)->has_seq = 0;
779         if (tunnel->version == L2TP_HDR_VER_2) {
780                 if (hdrflags & L2TP_HDRFLAG_S) {
781                         /* Store L2TP info in the skb */
782                         L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
783                         L2TP_SKB_CB(skb)->has_seq = 1;
784                         ptr += 2;
785                         /* Skip past nr in the header */
786                         ptr += 2;
787 
788                 }
789         } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
790                 u32 l2h = ntohl(*(__be32 *)ptr);
791 
792                 if (l2h & 0x40000000) {
793                         /* Store L2TP info in the skb */
794                         L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
795                         L2TP_SKB_CB(skb)->has_seq = 1;
796                 }
797                 ptr += 4;
798         }
799 
800         if (L2TP_SKB_CB(skb)->has_seq) {
801                 /* Received a packet with sequence numbers. If we're the LAC,
802                  * check if we sre sending sequence numbers and if not,
803                  * configure it so.
804                  */
805                 if (!session->lns_mode && !session->send_seq) {
806                         trace_session_seqnum_lns_enable(session);
807                         session->send_seq = 1;
808                         l2tp_session_set_header_len(session, tunnel->version,
809                                                     tunnel->encap);
810                 }
811         } else {
812                 /* No sequence numbers.
813                  * If user has configured mandatory sequence numbers, discard.
814                  */
815                 if (session->recv_seq) {
816                         pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
817                                              session->name);
818                         atomic_long_inc(&session->stats.rx_seq_discards);
819                         goto discard;
820                 }
821 
822                 /* If we're the LAC and we're sending sequence numbers, the
823                  * LNS has requested that we no longer send sequence numbers.
824                  * If we're the LNS and we're sending sequence numbers, the
825                  * LAC is broken. Discard the frame.
826                  */
827                 if (!session->lns_mode && session->send_seq) {
828                         trace_session_seqnum_lns_disable(session);
829                         session->send_seq = 0;
830                         l2tp_session_set_header_len(session, tunnel->version,
831                                                     tunnel->encap);
832                 } else if (session->send_seq) {
833                         pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
834                                              session->name);
835                         atomic_long_inc(&session->stats.rx_seq_discards);
836                         goto discard;
837                 }
838         }
839 
840         /* Session data offset is defined only for L2TPv2 and is
841          * indicated by an optional 16-bit value in the header.
842          */
843         if (tunnel->version == L2TP_HDR_VER_2) {
844                 /* If offset bit set, skip it. */
845                 if (hdrflags & L2TP_HDRFLAG_O) {
846                         offset = ntohs(*(__be16 *)ptr);
847                         ptr += 2 + offset;
848                 }
849         }
850 
851         offset = ptr - optr;
852         if (!pskb_may_pull(skb, offset))
853                 goto discard;
854 
855         __skb_pull(skb, offset);
856 
857         /* Prepare skb for adding to the session's reorder_q.  Hold
858          * packets for max reorder_timeout or 1 second if not
859          * reordering.
860          */
861         L2TP_SKB_CB(skb)->length = length;
862         L2TP_SKB_CB(skb)->expires = jiffies +
863                 (session->reorder_timeout ? session->reorder_timeout : HZ);
864 
865         /* Add packet to the session's receive queue. Reordering is done here, if
866          * enabled. Saved L2TP protocol info is stored in skb->sb[].
867          */
868         if (L2TP_SKB_CB(skb)->has_seq) {
869                 if (l2tp_recv_data_seq(session, skb))
870                         goto discard;
871         } else {
872                 /* No sequence numbers. Add the skb to the tail of the
873                  * reorder queue. This ensures that it will be
874                  * delivered after all previous sequenced skbs.
875                  */
876                 skb_queue_tail(&session->reorder_q, skb);
877         }
878 
879         /* Try to dequeue as many skbs from reorder_q as we can. */
880         l2tp_recv_dequeue(session);
881 
882         return;
883 
884 discard:
885         atomic_long_inc(&session->stats.rx_errors);
886         kfree_skb(skb);
887 }
888 EXPORT_SYMBOL_GPL(l2tp_recv_common);
889 
890 /* Drop skbs from the session's reorder_q
891  */
892 static void l2tp_session_queue_purge(struct l2tp_session *session)
893 {
894         struct sk_buff *skb = NULL;
895 
896         while ((skb = skb_dequeue(&session->reorder_q))) {
897                 atomic_long_inc(&session->stats.rx_errors);
898                 kfree_skb(skb);
899         }
900 }
901 
902 /* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
903 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
904 {
905         struct l2tp_session *session = NULL;
906         struct l2tp_tunnel *tunnel = NULL;
907         struct net *net = sock_net(sk);
908         unsigned char *ptr, *optr;
909         u16 hdrflags;
910         u16 version;
911         int length;
912 
913         /* UDP has verified checksum */
914 
915         /* UDP always verifies the packet length. */
916         __skb_pull(skb, sizeof(struct udphdr));
917 
918         /* Short packet? */
919         if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
920                 goto pass;
921 
922         /* Point to L2TP header */
923         optr = skb->data;
924         ptr = skb->data;
925 
926         /* Get L2TP header flags */
927         hdrflags = ntohs(*(__be16 *)ptr);
928 
929         /* Get protocol version */
930         version = hdrflags & L2TP_HDR_VER_MASK;
931 
932         /* Get length of L2TP packet */
933         length = skb->len;
934 
935         /* If type is control packet, it is handled by userspace. */
936         if (hdrflags & L2TP_HDRFLAG_T)
937                 goto pass;
938 
939         /* Skip flags */
940         ptr += 2;
941 
942         if (version == L2TP_HDR_VER_2) {
943                 u16 tunnel_id, session_id;
944 
945                 /* If length is present, skip it */
946                 if (hdrflags & L2TP_HDRFLAG_L)
947                         ptr += 2;
948 
949                 /* Extract tunnel and session ID */
950                 tunnel_id = ntohs(*(__be16 *)ptr);
951                 ptr += 2;
952                 session_id = ntohs(*(__be16 *)ptr);
953                 ptr += 2;
954 
955                 session = l2tp_v2_session_get(net, tunnel_id, session_id);
956         } else {
957                 u32 session_id;
958 
959                 ptr += 2;       /* skip reserved bits */
960                 session_id = ntohl(*(__be32 *)ptr);
961                 ptr += 4;
962 
963                 session = l2tp_v3_session_get(net, sk, session_id);
964         }
965 
966         if (!session || !session->recv_skb) {
967                 if (session)
968                         l2tp_session_dec_refcount(session);
969 
970                 /* Not found? Pass to userspace to deal with */
971                 goto pass;
972         }
973 
974         tunnel = session->tunnel;
975 
976         /* Check protocol version */
977         if (version != tunnel->version)
978                 goto invalid;
979 
980         if (version == L2TP_HDR_VER_3 &&
981             l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
982                 l2tp_session_dec_refcount(session);
983                 goto invalid;
984         }
985 
986         l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
987         l2tp_session_dec_refcount(session);
988 
989         return 0;
990 
991 invalid:
992         atomic_long_inc(&tunnel->stats.rx_invalid);
993 
994 pass:
995         /* Put UDP header back */
996         __skb_push(skb, sizeof(struct udphdr));
997 
998         return 1;
999 }
1000 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1001 
1002 /* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
1003 static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
1004                                     __be16 port, u32 info, u8 *payload)
1005 {
1006         sk->sk_err = err;
1007         sk_error_report(sk);
1008 
1009         if (ip_hdr(skb)->version == IPVERSION) {
1010                 if (inet_test_bit(RECVERR, sk))
1011                         return ip_icmp_error(sk, skb, err, port, info, payload);
1012 #if IS_ENABLED(CONFIG_IPV6)
1013         } else {
1014                 if (inet6_test_bit(RECVERR6, sk))
1015                         return ipv6_icmp_error(sk, skb, err, port, info, payload);
1016 #endif
1017         }
1018 }
1019 
1020 /************************************************************************
1021  * Transmit handling
1022  ***********************************************************************/
1023 
1024 /* Build an L2TP header for the session into the buffer provided.
1025  */
1026 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1027 {
1028         struct l2tp_tunnel *tunnel = session->tunnel;
1029         __be16 *bufp = buf;
1030         __be16 *optr = buf;
1031         u16 flags = L2TP_HDR_VER_2;
1032         u32 tunnel_id = tunnel->peer_tunnel_id;
1033         u32 session_id = session->peer_session_id;
1034 
1035         if (session->send_seq)
1036                 flags |= L2TP_HDRFLAG_S;
1037 
1038         /* Setup L2TP header. */
1039         *bufp++ = htons(flags);
1040         *bufp++ = htons(tunnel_id);
1041         *bufp++ = htons(session_id);
1042         if (session->send_seq) {
1043                 *bufp++ = htons(session->ns);
1044                 *bufp++ = 0;
1045                 session->ns++;
1046                 session->ns &= 0xffff;
1047                 trace_session_seqnum_update(session);
1048         }
1049 
1050         return bufp - optr;
1051 }
1052 
1053 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1054 {
1055         struct l2tp_tunnel *tunnel = session->tunnel;
1056         char *bufp = buf;
1057         char *optr = bufp;
1058 
1059         /* Setup L2TP header. The header differs slightly for UDP and
1060          * IP encapsulations. For UDP, there is 4 bytes of flags.
1061          */
1062         if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1063                 u16 flags = L2TP_HDR_VER_3;
1064                 *((__be16 *)bufp) = htons(flags);
1065                 bufp += 2;
1066                 *((__be16 *)bufp) = 0;
1067                 bufp += 2;
1068         }
1069 
1070         *((__be32 *)bufp) = htonl(session->peer_session_id);
1071         bufp += 4;
1072         if (session->cookie_len) {
1073                 memcpy(bufp, &session->cookie[0], session->cookie_len);
1074                 bufp += session->cookie_len;
1075         }
1076         if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1077                 u32 l2h = 0;
1078 
1079                 if (session->send_seq) {
1080                         l2h = 0x40000000 | session->ns;
1081                         session->ns++;
1082                         session->ns &= 0xffffff;
1083                         trace_session_seqnum_update(session);
1084                 }
1085 
1086                 *((__be32 *)bufp) = htonl(l2h);
1087                 bufp += 4;
1088         }
1089 
1090         return bufp - optr;
1091 }
1092 
1093 /* Queue the packet to IP for output: tunnel socket lock must be held */
1094 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1095 {
1096         int err;
1097 
1098         skb->ignore_df = 1;
1099         skb_dst_drop(skb);
1100 #if IS_ENABLED(CONFIG_IPV6)
1101         if (l2tp_sk_is_v6(tunnel->sock))
1102                 err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1103         else
1104 #endif
1105                 err = ip_queue_xmit(tunnel->sock, skb, fl);
1106 
1107         return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1108 }
1109 
1110 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1111 {
1112         struct l2tp_tunnel *tunnel = session->tunnel;
1113         unsigned int data_len = skb->len;
1114         struct sock *sk = tunnel->sock;
1115         int headroom, uhlen, udp_len;
1116         int ret = NET_XMIT_SUCCESS;
1117         struct inet_sock *inet;
1118         struct udphdr *uh;
1119 
1120         /* Check that there's enough headroom in the skb to insert IP,
1121          * UDP and L2TP headers. If not enough, expand it to
1122          * make room. Adjust truesize.
1123          */
1124         uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1125         headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1126         if (skb_cow_head(skb, headroom)) {
1127                 kfree_skb(skb);
1128                 return NET_XMIT_DROP;
1129         }
1130 
1131         /* Setup L2TP header */
1132         if (tunnel->version == L2TP_HDR_VER_2)
1133                 l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1134         else
1135                 l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1136 
1137         /* Reset skb netfilter state */
1138         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1139         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1140         nf_reset_ct(skb);
1141 
1142         /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1143          * nested socket calls on the same lockdep socket class. This can
1144          * happen when data from a user socket is routed over l2tp, which uses
1145          * another userspace socket.
1146          */
1147         spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1148 
1149         if (sock_owned_by_user(sk)) {
1150                 kfree_skb(skb);
1151                 ret = NET_XMIT_DROP;
1152                 goto out_unlock;
1153         }
1154 
1155         /* The user-space may change the connection status for the user-space
1156          * provided socket at run time: we must check it under the socket lock
1157          */
1158         if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1159                 kfree_skb(skb);
1160                 ret = NET_XMIT_DROP;
1161                 goto out_unlock;
1162         }
1163 
1164         /* Report transmitted length before we add encap header, which keeps
1165          * statistics consistent for both UDP and IP encap tx/rx paths.
1166          */
1167         *len = skb->len;
1168 
1169         inet = inet_sk(sk);
1170         switch (tunnel->encap) {
1171         case L2TP_ENCAPTYPE_UDP:
1172                 /* Setup UDP header */
1173                 __skb_push(skb, sizeof(*uh));
1174                 skb_reset_transport_header(skb);
1175                 uh = udp_hdr(skb);
1176                 uh->source = inet->inet_sport;
1177                 uh->dest = inet->inet_dport;
1178                 udp_len = uhlen + session->hdr_len + data_len;
1179                 uh->len = htons(udp_len);
1180 
1181                 /* Calculate UDP checksum if configured to do so */
1182 #if IS_ENABLED(CONFIG_IPV6)
1183                 if (l2tp_sk_is_v6(sk))
1184                         udp6_set_csum(udp_get_no_check6_tx(sk),
1185                                       skb, &inet6_sk(sk)->saddr,
1186                                       &sk->sk_v6_daddr, udp_len);
1187                 else
1188 #endif
1189                         udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1190                                      inet->inet_daddr, udp_len);
1191                 break;
1192 
1193         case L2TP_ENCAPTYPE_IP:
1194                 break;
1195         }
1196 
1197         ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1198 
1199 out_unlock:
1200         spin_unlock(&sk->sk_lock.slock);
1201 
1202         return ret;
1203 }
1204 
1205 /* If caller requires the skb to have a ppp header, the header must be
1206  * inserted in the skb data before calling this function.
1207  */
1208 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1209 {
1210         unsigned int len = 0;
1211         int ret;
1212 
1213         ret = l2tp_xmit_core(session, skb, &len);
1214         if (ret == NET_XMIT_SUCCESS) {
1215                 atomic_long_inc(&session->tunnel->stats.tx_packets);
1216                 atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1217                 atomic_long_inc(&session->stats.tx_packets);
1218                 atomic_long_add(len, &session->stats.tx_bytes);
1219         } else {
1220                 atomic_long_inc(&session->tunnel->stats.tx_errors);
1221                 atomic_long_inc(&session->stats.tx_errors);
1222         }
1223         return ret;
1224 }
1225 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1226 
1227 /*****************************************************************************
1228  * Tinnel and session create/destroy.
1229  *****************************************************************************/
1230 
1231 /* Tunnel socket destruct hook.
1232  * The tunnel context is deleted only when all session sockets have been
1233  * closed.
1234  */
1235 static void l2tp_tunnel_destruct(struct sock *sk)
1236 {
1237         struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1238 
1239         if (!tunnel)
1240                 goto end;
1241 
1242         /* Disable udp encapsulation */
1243         switch (tunnel->encap) {
1244         case L2TP_ENCAPTYPE_UDP:
1245                 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1246                 WRITE_ONCE(udp_sk(sk)->encap_type, 0);
1247                 udp_sk(sk)->encap_rcv = NULL;
1248                 udp_sk(sk)->encap_destroy = NULL;
1249                 break;
1250         case L2TP_ENCAPTYPE_IP:
1251                 break;
1252         }
1253 
1254         /* Remove hooks into tunnel socket */
1255         write_lock_bh(&sk->sk_callback_lock);
1256         sk->sk_destruct = tunnel->old_sk_destruct;
1257         sk->sk_user_data = NULL;
1258         write_unlock_bh(&sk->sk_callback_lock);
1259 
1260         /* Call the original destructor */
1261         if (sk->sk_destruct)
1262                 (*sk->sk_destruct)(sk);
1263 
1264         kfree_rcu(tunnel, rcu);
1265 end:
1266         return;
1267 }
1268 
1269 /* Remove an l2tp session from l2tp_core's lists. */
1270 static void l2tp_session_unhash(struct l2tp_session *session)
1271 {
1272         struct l2tp_tunnel *tunnel = session->tunnel;
1273 
1274         if (tunnel) {
1275                 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1276                 struct l2tp_session *removed = session;
1277 
1278                 spin_lock_bh(&tunnel->list_lock);
1279                 spin_lock_bh(&pn->l2tp_session_idr_lock);
1280 
1281                 /* Remove from the per-tunnel list */
1282                 list_del_init(&session->list);
1283 
1284                 /* Remove from per-net IDR */
1285                 if (tunnel->version == L2TP_HDR_VER_3) {
1286                         if (hash_hashed(&session->hlist))
1287                                 l2tp_session_collision_del(pn, session);
1288                         else
1289                                 removed = idr_remove(&pn->l2tp_v3_session_idr,
1290                                                      session->session_id);
1291                 } else {
1292                         u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1293                                                               session->session_id);
1294                         removed = idr_remove(&pn->l2tp_v2_session_idr,
1295                                              session_key);
1296                 }
1297                 WARN_ON_ONCE(removed && removed != session);
1298 
1299                 spin_unlock_bh(&pn->l2tp_session_idr_lock);
1300                 spin_unlock_bh(&tunnel->list_lock);
1301         }
1302 }
1303 
1304 /* When the tunnel is closed, all the attached sessions need to go too.
1305  */
1306 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1307 {
1308         struct l2tp_session *session;
1309 
1310         spin_lock_bh(&tunnel->list_lock);
1311         tunnel->acpt_newsess = false;
1312         for (;;) {
1313                 session = list_first_entry_or_null(&tunnel->session_list,
1314                                                    struct l2tp_session, list);
1315                 if (!session)
1316                         break;
1317                 l2tp_session_inc_refcount(session);
1318                 list_del_init(&session->list);
1319                 spin_unlock_bh(&tunnel->list_lock);
1320                 l2tp_session_delete(session);
1321                 spin_lock_bh(&tunnel->list_lock);
1322                 l2tp_session_dec_refcount(session);
1323         }
1324         spin_unlock_bh(&tunnel->list_lock);
1325 }
1326 
1327 /* Tunnel socket destroy hook for UDP encapsulation */
1328 static void l2tp_udp_encap_destroy(struct sock *sk)
1329 {
1330         struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1331 
1332         if (tunnel)
1333                 l2tp_tunnel_delete(tunnel);
1334 }
1335 
1336 static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1337 {
1338         struct l2tp_net *pn = l2tp_pernet(net);
1339 
1340         spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1341         idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1342         spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1343 }
1344 
1345 /* Workqueue tunnel deletion function */
1346 static void l2tp_tunnel_del_work(struct work_struct *work)
1347 {
1348         struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1349                                                   del_work);
1350         struct sock *sk = tunnel->sock;
1351         struct socket *sock = sk->sk_socket;
1352 
1353         l2tp_tunnel_closeall(tunnel);
1354 
1355         /* If the tunnel socket was created within the kernel, use
1356          * the sk API to release it here.
1357          */
1358         if (tunnel->fd < 0) {
1359                 if (sock) {
1360                         kernel_sock_shutdown(sock, SHUT_RDWR);
1361                         sock_release(sock);
1362                 }
1363         }
1364 
1365         l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1366         /* drop initial ref */
1367         l2tp_tunnel_dec_refcount(tunnel);
1368 
1369         /* drop workqueue ref */
1370         l2tp_tunnel_dec_refcount(tunnel);
1371 }
1372 
1373 /* Create a socket for the tunnel, if one isn't set up by
1374  * userspace. This is used for static tunnels where there is no
1375  * managing L2TP daemon.
1376  *
1377  * Since we don't want these sockets to keep a namespace alive by
1378  * themselves, we drop the socket's namespace refcount after creation.
1379  * These sockets are freed when the namespace exits using the pernet
1380  * exit hook.
1381  */
1382 static int l2tp_tunnel_sock_create(struct net *net,
1383                                    u32 tunnel_id,
1384                                    u32 peer_tunnel_id,
1385                                    struct l2tp_tunnel_cfg *cfg,
1386                                    struct socket **sockp)
1387 {
1388         int err = -EINVAL;
1389         struct socket *sock = NULL;
1390         struct udp_port_cfg udp_conf;
1391 
1392         switch (cfg->encap) {
1393         case L2TP_ENCAPTYPE_UDP:
1394                 memset(&udp_conf, 0, sizeof(udp_conf));
1395 
1396 #if IS_ENABLED(CONFIG_IPV6)
1397                 if (cfg->local_ip6 && cfg->peer_ip6) {
1398                         udp_conf.family = AF_INET6;
1399                         memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1400                                sizeof(udp_conf.local_ip6));
1401                         memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1402                                sizeof(udp_conf.peer_ip6));
1403                         udp_conf.use_udp6_tx_checksums =
1404                           !cfg->udp6_zero_tx_checksums;
1405                         udp_conf.use_udp6_rx_checksums =
1406                           !cfg->udp6_zero_rx_checksums;
1407                 } else
1408 #endif
1409                 {
1410                         udp_conf.family = AF_INET;
1411                         udp_conf.local_ip = cfg->local_ip;
1412                         udp_conf.peer_ip = cfg->peer_ip;
1413                         udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1414                 }
1415 
1416                 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1417                 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1418 
1419                 err = udp_sock_create(net, &udp_conf, &sock);
1420                 if (err < 0)
1421                         goto out;
1422 
1423                 break;
1424 
1425         case L2TP_ENCAPTYPE_IP:
1426 #if IS_ENABLED(CONFIG_IPV6)
1427                 if (cfg->local_ip6 && cfg->peer_ip6) {
1428                         struct sockaddr_l2tpip6 ip6_addr = {0};
1429 
1430                         err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1431                                                IPPROTO_L2TP, &sock);
1432                         if (err < 0)
1433                                 goto out;
1434 
1435                         ip6_addr.l2tp_family = AF_INET6;
1436                         memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1437                                sizeof(ip6_addr.l2tp_addr));
1438                         ip6_addr.l2tp_conn_id = tunnel_id;
1439                         err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1440                                           sizeof(ip6_addr));
1441                         if (err < 0)
1442                                 goto out;
1443 
1444                         ip6_addr.l2tp_family = AF_INET6;
1445                         memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1446                                sizeof(ip6_addr.l2tp_addr));
1447                         ip6_addr.l2tp_conn_id = peer_tunnel_id;
1448                         err = kernel_connect(sock,
1449                                              (struct sockaddr *)&ip6_addr,
1450                                              sizeof(ip6_addr), 0);
1451                         if (err < 0)
1452                                 goto out;
1453                 } else
1454 #endif
1455                 {
1456                         struct sockaddr_l2tpip ip_addr = {0};
1457 
1458                         err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1459                                                IPPROTO_L2TP, &sock);
1460                         if (err < 0)
1461                                 goto out;
1462 
1463                         ip_addr.l2tp_family = AF_INET;
1464                         ip_addr.l2tp_addr = cfg->local_ip;
1465                         ip_addr.l2tp_conn_id = tunnel_id;
1466                         err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1467                                           sizeof(ip_addr));
1468                         if (err < 0)
1469                                 goto out;
1470 
1471                         ip_addr.l2tp_family = AF_INET;
1472                         ip_addr.l2tp_addr = cfg->peer_ip;
1473                         ip_addr.l2tp_conn_id = peer_tunnel_id;
1474                         err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1475                                              sizeof(ip_addr), 0);
1476                         if (err < 0)
1477                                 goto out;
1478                 }
1479                 break;
1480 
1481         default:
1482                 goto out;
1483         }
1484 
1485 out:
1486         *sockp = sock;
1487         if (err < 0 && sock) {
1488                 kernel_sock_shutdown(sock, SHUT_RDWR);
1489                 sock_release(sock);
1490                 *sockp = NULL;
1491         }
1492 
1493         return err;
1494 }
1495 
1496 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1497                        struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1498 {
1499         struct l2tp_tunnel *tunnel = NULL;
1500         int err;
1501         enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1502 
1503         if (cfg)
1504                 encap = cfg->encap;
1505 
1506         tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1507         if (!tunnel) {
1508                 err = -ENOMEM;
1509                 goto err;
1510         }
1511 
1512         tunnel->version = version;
1513         tunnel->tunnel_id = tunnel_id;
1514         tunnel->peer_tunnel_id = peer_tunnel_id;
1515 
1516         tunnel->magic = L2TP_TUNNEL_MAGIC;
1517         sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1518         spin_lock_init(&tunnel->list_lock);
1519         tunnel->acpt_newsess = true;
1520         INIT_LIST_HEAD(&tunnel->session_list);
1521 
1522         tunnel->encap = encap;
1523 
1524         refcount_set(&tunnel->ref_count, 1);
1525         tunnel->fd = fd;
1526 
1527         /* Init delete workqueue struct */
1528         INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1529 
1530         err = 0;
1531 err:
1532         if (tunnelp)
1533                 *tunnelp = tunnel;
1534 
1535         return err;
1536 }
1537 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1538 
1539 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1540                                 enum l2tp_encap_type encap)
1541 {
1542         if (!net_eq(sock_net(sk), net))
1543                 return -EINVAL;
1544 
1545         if (sk->sk_type != SOCK_DGRAM)
1546                 return -EPROTONOSUPPORT;
1547 
1548         if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1549                 return -EPROTONOSUPPORT;
1550 
1551         if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1552             (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1553                 return -EPROTONOSUPPORT;
1554 
1555         if (sk->sk_user_data)
1556                 return -EBUSY;
1557 
1558         return 0;
1559 }
1560 
1561 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1562                          struct l2tp_tunnel_cfg *cfg)
1563 {
1564         struct l2tp_net *pn = l2tp_pernet(net);
1565         u32 tunnel_id = tunnel->tunnel_id;
1566         struct socket *sock;
1567         struct sock *sk;
1568         int ret;
1569 
1570         spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1571         ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1572                             GFP_ATOMIC);
1573         spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1574         if (ret)
1575                 return ret == -ENOSPC ? -EEXIST : ret;
1576 
1577         if (tunnel->fd < 0) {
1578                 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1579                                               tunnel->peer_tunnel_id, cfg,
1580                                               &sock);
1581                 if (ret < 0)
1582                         goto err;
1583         } else {
1584                 sock = sockfd_lookup(tunnel->fd, &ret);
1585                 if (!sock)
1586                         goto err;
1587         }
1588 
1589         sk = sock->sk;
1590         lock_sock(sk);
1591         write_lock_bh(&sk->sk_callback_lock);
1592         ret = l2tp_validate_socket(sk, net, tunnel->encap);
1593         if (ret < 0)
1594                 goto err_inval_sock;
1595         rcu_assign_sk_user_data(sk, tunnel);
1596         write_unlock_bh(&sk->sk_callback_lock);
1597 
1598         if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1599                 struct udp_tunnel_sock_cfg udp_cfg = {
1600                         .sk_user_data = tunnel,
1601                         .encap_type = UDP_ENCAP_L2TPINUDP,
1602                         .encap_rcv = l2tp_udp_encap_recv,
1603                         .encap_err_rcv = l2tp_udp_encap_err_recv,
1604                         .encap_destroy = l2tp_udp_encap_destroy,
1605                 };
1606 
1607                 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1608         }
1609 
1610         tunnel->old_sk_destruct = sk->sk_destruct;
1611         sk->sk_destruct = &l2tp_tunnel_destruct;
1612         sk->sk_allocation = GFP_ATOMIC;
1613         release_sock(sk);
1614 
1615         sock_hold(sk);
1616         tunnel->sock = sk;
1617         tunnel->l2tp_net = net;
1618 
1619         spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1620         idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1621         spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1622 
1623         trace_register_tunnel(tunnel);
1624 
1625         if (tunnel->fd >= 0)
1626                 sockfd_put(sock);
1627 
1628         return 0;
1629 
1630 err_inval_sock:
1631         write_unlock_bh(&sk->sk_callback_lock);
1632         release_sock(sk);
1633 
1634         if (tunnel->fd < 0)
1635                 sock_release(sock);
1636         else
1637                 sockfd_put(sock);
1638 err:
1639         l2tp_tunnel_remove(net, tunnel);
1640         return ret;
1641 }
1642 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1643 
1644 /* This function is used by the netlink TUNNEL_DELETE command.
1645  */
1646 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1647 {
1648         if (!test_and_set_bit(0, &tunnel->dead)) {
1649                 trace_delete_tunnel(tunnel);
1650                 l2tp_tunnel_inc_refcount(tunnel);
1651                 queue_work(l2tp_wq, &tunnel->del_work);
1652         }
1653 }
1654 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1655 
1656 void l2tp_session_delete(struct l2tp_session *session)
1657 {
1658         if (test_and_set_bit(0, &session->dead))
1659                 return;
1660 
1661         trace_delete_session(session);
1662         l2tp_session_unhash(session);
1663         l2tp_session_queue_purge(session);
1664         if (session->session_close)
1665                 (*session->session_close)(session);
1666 
1667         l2tp_session_dec_refcount(session);
1668 }
1669 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1670 
1671 /* We come here whenever a session's send_seq, cookie_len or
1672  * l2specific_type parameters are set.
1673  */
1674 void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1675                                  enum l2tp_encap_type encap)
1676 {
1677         if (version == L2TP_HDR_VER_2) {
1678                 session->hdr_len = 6;
1679                 if (session->send_seq)
1680                         session->hdr_len += 4;
1681         } else {
1682                 session->hdr_len = 4 + session->cookie_len;
1683                 session->hdr_len += l2tp_get_l2specific_len(session);
1684                 if (encap == L2TP_ENCAPTYPE_UDP)
1685                         session->hdr_len += 4;
1686         }
1687 }
1688 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1689 
1690 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1691                                          u32 peer_session_id, struct l2tp_session_cfg *cfg)
1692 {
1693         struct l2tp_session *session;
1694 
1695         session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1696         if (session) {
1697                 session->magic = L2TP_SESSION_MAGIC;
1698 
1699                 session->session_id = session_id;
1700                 session->peer_session_id = peer_session_id;
1701                 session->nr = 0;
1702                 if (tunnel->version == L2TP_HDR_VER_2)
1703                         session->nr_max = 0xffff;
1704                 else
1705                         session->nr_max = 0xffffff;
1706                 session->nr_window_size = session->nr_max / 2;
1707                 session->nr_oos_count_max = 4;
1708 
1709                 /* Use NR of first received packet */
1710                 session->reorder_skip = 1;
1711 
1712                 sprintf(&session->name[0], "sess %u/%u",
1713                         tunnel->tunnel_id, session->session_id);
1714 
1715                 skb_queue_head_init(&session->reorder_q);
1716 
1717                 session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1718                 INIT_HLIST_NODE(&session->hlist);
1719                 INIT_LIST_HEAD(&session->clist);
1720                 INIT_LIST_HEAD(&session->list);
1721 
1722                 if (cfg) {
1723                         session->pwtype = cfg->pw_type;
1724                         session->send_seq = cfg->send_seq;
1725                         session->recv_seq = cfg->recv_seq;
1726                         session->lns_mode = cfg->lns_mode;
1727                         session->reorder_timeout = cfg->reorder_timeout;
1728                         session->l2specific_type = cfg->l2specific_type;
1729                         session->cookie_len = cfg->cookie_len;
1730                         memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1731                         session->peer_cookie_len = cfg->peer_cookie_len;
1732                         memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1733                 }
1734 
1735                 l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1736 
1737                 refcount_set(&session->ref_count, 1);
1738 
1739                 return session;
1740         }
1741 
1742         return ERR_PTR(-ENOMEM);
1743 }
1744 EXPORT_SYMBOL_GPL(l2tp_session_create);
1745 
1746 /*****************************************************************************
1747  * Init and cleanup
1748  *****************************************************************************/
1749 
1750 static __net_init int l2tp_init_net(struct net *net)
1751 {
1752         struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1753 
1754         idr_init(&pn->l2tp_tunnel_idr);
1755         spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1756 
1757         idr_init(&pn->l2tp_v2_session_idr);
1758         idr_init(&pn->l2tp_v3_session_idr);
1759         spin_lock_init(&pn->l2tp_session_idr_lock);
1760 
1761         return 0;
1762 }
1763 
1764 static __net_exit void l2tp_exit_net(struct net *net)
1765 {
1766         struct l2tp_net *pn = l2tp_pernet(net);
1767         struct l2tp_tunnel *tunnel = NULL;
1768         unsigned long tunnel_id, tmp;
1769 
1770         rcu_read_lock_bh();
1771         idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1772                 if (tunnel)
1773                         l2tp_tunnel_delete(tunnel);
1774         }
1775         rcu_read_unlock_bh();
1776 
1777         if (l2tp_wq)
1778                 flush_workqueue(l2tp_wq);
1779         rcu_barrier();
1780 
1781         idr_destroy(&pn->l2tp_v2_session_idr);
1782         idr_destroy(&pn->l2tp_v3_session_idr);
1783         idr_destroy(&pn->l2tp_tunnel_idr);
1784 }
1785 
1786 static struct pernet_operations l2tp_net_ops = {
1787         .init = l2tp_init_net,
1788         .exit = l2tp_exit_net,
1789         .id   = &l2tp_net_id,
1790         .size = sizeof(struct l2tp_net),
1791 };
1792 
1793 static int __init l2tp_init(void)
1794 {
1795         int rc = 0;
1796 
1797         rc = register_pernet_device(&l2tp_net_ops);
1798         if (rc)
1799                 goto out;
1800 
1801         l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1802         if (!l2tp_wq) {
1803                 pr_err("alloc_workqueue failed\n");
1804                 unregister_pernet_device(&l2tp_net_ops);
1805                 rc = -ENOMEM;
1806                 goto out;
1807         }
1808 
1809         pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1810 
1811 out:
1812         return rc;
1813 }
1814 
1815 static void __exit l2tp_exit(void)
1816 {
1817         unregister_pernet_device(&l2tp_net_ops);
1818         if (l2tp_wq) {
1819                 destroy_workqueue(l2tp_wq);
1820                 l2tp_wq = NULL;
1821         }
1822 }
1823 
1824 module_init(l2tp_init);
1825 module_exit(l2tp_exit);
1826 
1827 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1828 MODULE_DESCRIPTION("L2TP core");
1829 MODULE_LICENSE("GPL");
1830 MODULE_VERSION(L2TP_DRV_VERSION);
1831 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php