~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sched/act_ct.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2 /* -
  3  * net/sched/act_ct.c  Connection Tracking action
  4  *
  5  * Authors:   Paul Blakey <paulb@mellanox.com>
  6  *            Yossi Kuperman <yossiku@mellanox.com>
  7  *            Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
  8  */
  9 
 10 #include <linux/module.h>
 11 #include <linux/init.h>
 12 #include <linux/kernel.h>
 13 #include <linux/skbuff.h>
 14 #include <linux/rtnetlink.h>
 15 #include <linux/pkt_cls.h>
 16 #include <linux/ip.h>
 17 #include <linux/ipv6.h>
 18 #include <linux/rhashtable.h>
 19 #include <net/netlink.h>
 20 #include <net/pkt_sched.h>
 21 #include <net/pkt_cls.h>
 22 #include <net/act_api.h>
 23 #include <net/ip.h>
 24 #include <net/ipv6_frag.h>
 25 #include <uapi/linux/tc_act/tc_ct.h>
 26 #include <net/tc_act/tc_ct.h>
 27 #include <net/tc_wrapper.h>
 28 
 29 #include <net/netfilter/nf_flow_table.h>
 30 #include <net/netfilter/nf_conntrack.h>
 31 #include <net/netfilter/nf_conntrack_core.h>
 32 #include <net/netfilter/nf_conntrack_zones.h>
 33 #include <net/netfilter/nf_conntrack_helper.h>
 34 #include <net/netfilter/nf_conntrack_acct.h>
 35 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 36 #include <net/netfilter/nf_conntrack_act_ct.h>
 37 #include <net/netfilter/nf_conntrack_seqadj.h>
 38 #include <uapi/linux/netfilter/nf_nat.h>
 39 
 40 static struct workqueue_struct *act_ct_wq;
 41 static struct rhashtable zones_ht;
 42 static DEFINE_MUTEX(zones_mutex);
 43 
 44 struct zones_ht_key {
 45         struct net *net;
 46         u16 zone;
 47         /* Note : pad[] must be the last field. */
 48         u8  pad[];
 49 };
 50 
 51 struct tcf_ct_flow_table {
 52         struct rhash_head node; /* In zones tables */
 53 
 54         struct rcu_work rwork;
 55         struct nf_flowtable nf_ft;
 56         refcount_t ref;
 57         struct zones_ht_key key;
 58 
 59         bool dying;
 60 };
 61 
 62 static const struct rhashtable_params zones_params = {
 63         .head_offset = offsetof(struct tcf_ct_flow_table, node),
 64         .key_offset = offsetof(struct tcf_ct_flow_table, key),
 65         .key_len = offsetof(struct zones_ht_key, pad),
 66         .automatic_shrinking = true,
 67 };
 68 
 69 static struct flow_action_entry *
 70 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
 71 {
 72         int i = flow_action->num_entries++;
 73 
 74         return &flow_action->entries[i];
 75 }
 76 
 77 static void tcf_ct_add_mangle_action(struct flow_action *action,
 78                                      enum flow_action_mangle_base htype,
 79                                      u32 offset,
 80                                      u32 mask,
 81                                      u32 val)
 82 {
 83         struct flow_action_entry *entry;
 84 
 85         entry = tcf_ct_flow_table_flow_action_get_next(action);
 86         entry->id = FLOW_ACTION_MANGLE;
 87         entry->mangle.htype = htype;
 88         entry->mangle.mask = ~mask;
 89         entry->mangle.offset = offset;
 90         entry->mangle.val = val;
 91 }
 92 
 93 /* The following nat helper functions check if the inverted reverse tuple
 94  * (target) is different then the current dir tuple - meaning nat for ports
 95  * and/or ip is needed, and add the relevant mangle actions.
 96  */
 97 static void
 98 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
 99                                       struct nf_conntrack_tuple target,
100                                       struct flow_action *action)
101 {
102         if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
103                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
104                                          offsetof(struct iphdr, saddr),
105                                          0xFFFFFFFF,
106                                          be32_to_cpu(target.src.u3.ip));
107         if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
108                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
109                                          offsetof(struct iphdr, daddr),
110                                          0xFFFFFFFF,
111                                          be32_to_cpu(target.dst.u3.ip));
112 }
113 
114 static void
115 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
116                                    union nf_inet_addr *addr,
117                                    u32 offset)
118 {
119         int i;
120 
121         for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
122                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
123                                          i * sizeof(u32) + offset,
124                                          0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
125 }
126 
127 static void
128 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
129                                       struct nf_conntrack_tuple target,
130                                       struct flow_action *action)
131 {
132         if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
133                 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
134                                                    offsetof(struct ipv6hdr,
135                                                             saddr));
136         if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
137                 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
138                                                    offsetof(struct ipv6hdr,
139                                                             daddr));
140 }
141 
142 static void
143 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
144                                      struct nf_conntrack_tuple target,
145                                      struct flow_action *action)
146 {
147         __be16 target_src = target.src.u.tcp.port;
148         __be16 target_dst = target.dst.u.tcp.port;
149 
150         if (target_src != tuple->src.u.tcp.port)
151                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
152                                          offsetof(struct tcphdr, source),
153                                          0xFFFF, be16_to_cpu(target_src));
154         if (target_dst != tuple->dst.u.tcp.port)
155                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
156                                          offsetof(struct tcphdr, dest),
157                                          0xFFFF, be16_to_cpu(target_dst));
158 }
159 
160 static void
161 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
162                                      struct nf_conntrack_tuple target,
163                                      struct flow_action *action)
164 {
165         __be16 target_src = target.src.u.udp.port;
166         __be16 target_dst = target.dst.u.udp.port;
167 
168         if (target_src != tuple->src.u.udp.port)
169                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
170                                          offsetof(struct udphdr, source),
171                                          0xFFFF, be16_to_cpu(target_src));
172         if (target_dst != tuple->dst.u.udp.port)
173                 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
174                                          offsetof(struct udphdr, dest),
175                                          0xFFFF, be16_to_cpu(target_dst));
176 }
177 
178 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
179                                               enum ip_conntrack_dir dir,
180                                               enum ip_conntrack_info ctinfo,
181                                               struct flow_action *action)
182 {
183         struct nf_conn_labels *ct_labels;
184         struct flow_action_entry *entry;
185         u32 *act_ct_labels;
186 
187         entry = tcf_ct_flow_table_flow_action_get_next(action);
188         entry->id = FLOW_ACTION_CT_METADATA;
189 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
190         entry->ct_metadata.mark = READ_ONCE(ct->mark);
191 #endif
192         /* aligns with the CT reference on the SKB nf_ct_set */
193         entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
194         entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
195 
196         act_ct_labels = entry->ct_metadata.labels;
197         ct_labels = nf_ct_labels_find(ct);
198         if (ct_labels)
199                 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
200         else
201                 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
202 }
203 
204 static int tcf_ct_flow_table_add_action_nat(struct net *net,
205                                             struct nf_conn *ct,
206                                             enum ip_conntrack_dir dir,
207                                             struct flow_action *action)
208 {
209         const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
210         struct nf_conntrack_tuple target;
211 
212         if (!(ct->status & IPS_NAT_MASK))
213                 return 0;
214 
215         nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
216 
217         switch (tuple->src.l3num) {
218         case NFPROTO_IPV4:
219                 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
220                                                       action);
221                 break;
222         case NFPROTO_IPV6:
223                 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
224                                                       action);
225                 break;
226         default:
227                 return -EOPNOTSUPP;
228         }
229 
230         switch (nf_ct_protonum(ct)) {
231         case IPPROTO_TCP:
232                 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
233                 break;
234         case IPPROTO_UDP:
235                 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
236                 break;
237         default:
238                 return -EOPNOTSUPP;
239         }
240 
241         return 0;
242 }
243 
244 static int tcf_ct_flow_table_fill_actions(struct net *net,
245                                           struct flow_offload *flow,
246                                           enum flow_offload_tuple_dir tdir,
247                                           struct nf_flow_rule *flow_rule)
248 {
249         struct flow_action *action = &flow_rule->rule->action;
250         int num_entries = action->num_entries;
251         struct nf_conn *ct = flow->ct;
252         enum ip_conntrack_info ctinfo;
253         enum ip_conntrack_dir dir;
254         int i, err;
255 
256         switch (tdir) {
257         case FLOW_OFFLOAD_DIR_ORIGINAL:
258                 dir = IP_CT_DIR_ORIGINAL;
259                 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
260                         IP_CT_ESTABLISHED : IP_CT_NEW;
261                 if (ctinfo == IP_CT_ESTABLISHED)
262                         set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
263                 break;
264         case FLOW_OFFLOAD_DIR_REPLY:
265                 dir = IP_CT_DIR_REPLY;
266                 ctinfo = IP_CT_ESTABLISHED_REPLY;
267                 break;
268         default:
269                 return -EOPNOTSUPP;
270         }
271 
272         err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
273         if (err)
274                 goto err_nat;
275 
276         tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
277         return 0;
278 
279 err_nat:
280         /* Clear filled actions */
281         for (i = num_entries; i < action->num_entries; i++)
282                 memset(&action->entries[i], 0, sizeof(action->entries[i]));
283         action->num_entries = num_entries;
284 
285         return err;
286 }
287 
288 static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
289 {
290         return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
291                test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
292                !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
293                !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
294 }
295 
296 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
297 
298 static void tcf_ct_nf_get(struct nf_flowtable *ft)
299 {
300         struct tcf_ct_flow_table *ct_ft =
301                 container_of(ft, struct tcf_ct_flow_table, nf_ft);
302 
303         tcf_ct_flow_table_get_ref(ct_ft);
304 }
305 
306 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
307 
308 static void tcf_ct_nf_put(struct nf_flowtable *ft)
309 {
310         struct tcf_ct_flow_table *ct_ft =
311                 container_of(ft, struct tcf_ct_flow_table, nf_ft);
312 
313         tcf_ct_flow_table_put(ct_ft);
314 }
315 
316 static struct nf_flowtable_type flowtable_ct = {
317         .gc             = tcf_ct_flow_is_outdated,
318         .action         = tcf_ct_flow_table_fill_actions,
319         .get            = tcf_ct_nf_get,
320         .put            = tcf_ct_nf_put,
321         .owner          = THIS_MODULE,
322 };
323 
324 static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
325 {
326         struct zones_ht_key key = { .net = net, .zone = params->zone };
327         struct tcf_ct_flow_table *ct_ft;
328         int err = -ENOMEM;
329 
330         mutex_lock(&zones_mutex);
331         ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
332         if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
333                 goto out_unlock;
334 
335         ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
336         if (!ct_ft)
337                 goto err_alloc;
338         refcount_set(&ct_ft->ref, 1);
339 
340         ct_ft->key = key;
341         err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
342         if (err)
343                 goto err_insert;
344 
345         ct_ft->nf_ft.type = &flowtable_ct;
346         ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
347                               NF_FLOWTABLE_COUNTER;
348         err = nf_flow_table_init(&ct_ft->nf_ft);
349         if (err)
350                 goto err_init;
351         write_pnet(&ct_ft->nf_ft.net, net);
352 
353         __module_get(THIS_MODULE);
354 out_unlock:
355         params->ct_ft = ct_ft;
356         params->nf_ft = &ct_ft->nf_ft;
357         mutex_unlock(&zones_mutex);
358 
359         return 0;
360 
361 err_init:
362         rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
363 err_insert:
364         kfree(ct_ft);
365 err_alloc:
366         mutex_unlock(&zones_mutex);
367         return err;
368 }
369 
370 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
371 {
372         refcount_inc(&ct_ft->ref);
373 }
374 
375 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
376 {
377         struct tcf_ct_flow_table *ct_ft;
378         struct flow_block *block;
379 
380         ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
381                              rwork);
382         nf_flow_table_free(&ct_ft->nf_ft);
383 
384         block = &ct_ft->nf_ft.flow_block;
385         down_write(&ct_ft->nf_ft.flow_block_lock);
386         WARN_ON(!list_empty(&block->cb_list));
387         up_write(&ct_ft->nf_ft.flow_block_lock);
388         kfree(ct_ft);
389 
390         module_put(THIS_MODULE);
391 }
392 
393 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
394 {
395         if (refcount_dec_and_test(&ct_ft->ref)) {
396                 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
397                 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
398                 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
399         }
400 }
401 
402 static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
403                                  struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
404 {
405         entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
406         entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
407 }
408 
409 static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
410 {
411         struct nf_conn_act_ct_ext *act_ct_ext;
412 
413         act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
414         if (act_ct_ext) {
415                 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
416                 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
417         }
418 }
419 
420 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
421                                   struct nf_conn *ct,
422                                   bool tcp, bool bidirectional)
423 {
424         struct nf_conn_act_ct_ext *act_ct_ext;
425         struct flow_offload *entry;
426         int err;
427 
428         if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
429                 return;
430 
431         entry = flow_offload_alloc(ct);
432         if (!entry) {
433                 WARN_ON_ONCE(1);
434                 goto err_alloc;
435         }
436 
437         if (tcp) {
438                 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
439                 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
440         }
441         if (bidirectional)
442                 __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
443 
444         act_ct_ext = nf_conn_act_ct_ext_find(ct);
445         if (act_ct_ext) {
446                 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
447                 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
448         }
449 
450         err = flow_offload_add(&ct_ft->nf_ft, entry);
451         if (err)
452                 goto err_add;
453 
454         return;
455 
456 err_add:
457         flow_offload_free(entry);
458 err_alloc:
459         clear_bit(IPS_OFFLOAD_BIT, &ct->status);
460 }
461 
462 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
463                                            struct nf_conn *ct,
464                                            enum ip_conntrack_info ctinfo)
465 {
466         bool tcp = false, bidirectional = true;
467 
468         switch (nf_ct_protonum(ct)) {
469         case IPPROTO_TCP:
470                 if ((ctinfo != IP_CT_ESTABLISHED &&
471                      ctinfo != IP_CT_ESTABLISHED_REPLY) ||
472                     !test_bit(IPS_ASSURED_BIT, &ct->status) ||
473                     ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
474                         return;
475 
476                 tcp = true;
477                 break;
478         case IPPROTO_UDP:
479                 if (!nf_ct_is_confirmed(ct))
480                         return;
481                 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
482                         bidirectional = false;
483                 break;
484 #ifdef CONFIG_NF_CT_PROTO_GRE
485         case IPPROTO_GRE: {
486                 struct nf_conntrack_tuple *tuple;
487 
488                 if ((ctinfo != IP_CT_ESTABLISHED &&
489                      ctinfo != IP_CT_ESTABLISHED_REPLY) ||
490                     !test_bit(IPS_ASSURED_BIT, &ct->status) ||
491                     ct->status & IPS_NAT_MASK)
492                         return;
493 
494                 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
495                 /* No support for GRE v1 */
496                 if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
497                         return;
498                 break;
499         }
500 #endif
501         default:
502                 return;
503         }
504 
505         if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
506             ct->status & IPS_SEQ_ADJUST)
507                 return;
508 
509         tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
510 }
511 
512 static bool
513 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
514                                   struct flow_offload_tuple *tuple,
515                                   struct tcphdr **tcph)
516 {
517         struct flow_ports *ports;
518         unsigned int thoff;
519         struct iphdr *iph;
520         size_t hdrsize;
521         u8 ipproto;
522 
523         if (!pskb_network_may_pull(skb, sizeof(*iph)))
524                 return false;
525 
526         iph = ip_hdr(skb);
527         thoff = iph->ihl * 4;
528 
529         if (ip_is_fragment(iph) ||
530             unlikely(thoff != sizeof(struct iphdr)))
531                 return false;
532 
533         ipproto = iph->protocol;
534         switch (ipproto) {
535         case IPPROTO_TCP:
536                 hdrsize = sizeof(struct tcphdr);
537                 break;
538         case IPPROTO_UDP:
539                 hdrsize = sizeof(*ports);
540                 break;
541 #ifdef CONFIG_NF_CT_PROTO_GRE
542         case IPPROTO_GRE:
543                 hdrsize = sizeof(struct gre_base_hdr);
544                 break;
545 #endif
546         default:
547                 return false;
548         }
549 
550         if (iph->ttl <= 1)
551                 return false;
552 
553         if (!pskb_network_may_pull(skb, thoff + hdrsize))
554                 return false;
555 
556         switch (ipproto) {
557         case IPPROTO_TCP:
558                 *tcph = (void *)(skb_network_header(skb) + thoff);
559                 fallthrough;
560         case IPPROTO_UDP:
561                 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
562                 tuple->src_port = ports->source;
563                 tuple->dst_port = ports->dest;
564                 break;
565         case IPPROTO_GRE: {
566                 struct gre_base_hdr *greh;
567 
568                 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
569                 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
570                         return false;
571                 break;
572         }
573         }
574 
575         iph = ip_hdr(skb);
576 
577         tuple->src_v4.s_addr = iph->saddr;
578         tuple->dst_v4.s_addr = iph->daddr;
579         tuple->l3proto = AF_INET;
580         tuple->l4proto = ipproto;
581 
582         return true;
583 }
584 
585 static bool
586 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
587                                   struct flow_offload_tuple *tuple,
588                                   struct tcphdr **tcph)
589 {
590         struct flow_ports *ports;
591         struct ipv6hdr *ip6h;
592         unsigned int thoff;
593         size_t hdrsize;
594         u8 nexthdr;
595 
596         if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
597                 return false;
598 
599         ip6h = ipv6_hdr(skb);
600         thoff = sizeof(*ip6h);
601 
602         nexthdr = ip6h->nexthdr;
603         switch (nexthdr) {
604         case IPPROTO_TCP:
605                 hdrsize = sizeof(struct tcphdr);
606                 break;
607         case IPPROTO_UDP:
608                 hdrsize = sizeof(*ports);
609                 break;
610 #ifdef CONFIG_NF_CT_PROTO_GRE
611         case IPPROTO_GRE:
612                 hdrsize = sizeof(struct gre_base_hdr);
613                 break;
614 #endif
615         default:
616                 return false;
617         }
618 
619         if (ip6h->hop_limit <= 1)
620                 return false;
621 
622         if (!pskb_network_may_pull(skb, thoff + hdrsize))
623                 return false;
624 
625         switch (nexthdr) {
626         case IPPROTO_TCP:
627                 *tcph = (void *)(skb_network_header(skb) + thoff);
628                 fallthrough;
629         case IPPROTO_UDP:
630                 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
631                 tuple->src_port = ports->source;
632                 tuple->dst_port = ports->dest;
633                 break;
634         case IPPROTO_GRE: {
635                 struct gre_base_hdr *greh;
636 
637                 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
638                 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
639                         return false;
640                 break;
641         }
642         }
643 
644         ip6h = ipv6_hdr(skb);
645 
646         tuple->src_v6 = ip6h->saddr;
647         tuple->dst_v6 = ip6h->daddr;
648         tuple->l3proto = AF_INET6;
649         tuple->l4proto = nexthdr;
650 
651         return true;
652 }
653 
654 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
655                                      struct sk_buff *skb,
656                                      u8 family)
657 {
658         struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
659         struct flow_offload_tuple_rhash *tuplehash;
660         struct flow_offload_tuple tuple = {};
661         enum ip_conntrack_info ctinfo;
662         struct tcphdr *tcph = NULL;
663         bool force_refresh = false;
664         struct flow_offload *flow;
665         struct nf_conn *ct;
666         u8 dir;
667 
668         switch (family) {
669         case NFPROTO_IPV4:
670                 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
671                         return false;
672                 break;
673         case NFPROTO_IPV6:
674                 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
675                         return false;
676                 break;
677         default:
678                 return false;
679         }
680 
681         tuplehash = flow_offload_lookup(nf_ft, &tuple);
682         if (!tuplehash)
683                 return false;
684 
685         dir = tuplehash->tuple.dir;
686         flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
687         ct = flow->ct;
688 
689         if (dir == FLOW_OFFLOAD_DIR_REPLY &&
690             !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
691                 /* Only offload reply direction after connection became
692                  * assured.
693                  */
694                 if (test_bit(IPS_ASSURED_BIT, &ct->status))
695                         set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
696                 else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
697                         /* If flow_table flow has already been updated to the
698                          * established state, then don't refresh.
699                          */
700                         return false;
701                 force_refresh = true;
702         }
703 
704         if (tcph && (unlikely(tcph->fin || tcph->rst))) {
705                 flow_offload_teardown(flow);
706                 return false;
707         }
708 
709         if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
710                 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
711                         IP_CT_ESTABLISHED : IP_CT_NEW;
712         else
713                 ctinfo = IP_CT_ESTABLISHED_REPLY;
714 
715         nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
716         tcf_ct_flow_ct_ext_ifidx_update(flow);
717         flow_offload_refresh(nf_ft, flow, force_refresh);
718         if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
719                 /* Process this flow in SW to allow promoting to ASSURED */
720                 return false;
721         }
722 
723         nf_conntrack_get(&ct->ct_general);
724         nf_ct_set(skb, ct, ctinfo);
725         if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
726                 nf_ct_acct_update(ct, dir, skb->len);
727 
728         return true;
729 }
730 
731 static int tcf_ct_flow_tables_init(void)
732 {
733         return rhashtable_init(&zones_ht, &zones_params);
734 }
735 
736 static void tcf_ct_flow_tables_uninit(void)
737 {
738         rhashtable_destroy(&zones_ht);
739 }
740 
741 static struct tc_action_ops act_ct_ops;
742 
743 struct tc_ct_action_net {
744         struct tc_action_net tn; /* Must be first */
745 };
746 
747 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
748 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
749                                    struct tcf_ct_params *p)
750 {
751         enum ip_conntrack_info ctinfo;
752         struct nf_conn *ct;
753 
754         ct = nf_ct_get(skb, &ctinfo);
755         if (!ct)
756                 return false;
757         if (!net_eq(net, read_pnet(&ct->ct_net)))
758                 goto drop_ct;
759         if (nf_ct_zone(ct)->id != p->zone)
760                 goto drop_ct;
761         if (p->helper) {
762                 struct nf_conn_help *help;
763 
764                 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
765                 if (help && rcu_access_pointer(help->helper) != p->helper)
766                         goto drop_ct;
767         }
768 
769         /* Force conntrack entry direction. */
770         if ((p->ct_action & TCA_CT_ACT_FORCE) &&
771             CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
772                 if (nf_ct_is_confirmed(ct))
773                         nf_ct_kill(ct);
774 
775                 goto drop_ct;
776         }
777 
778         return true;
779 
780 drop_ct:
781         nf_ct_put(ct);
782         nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
783 
784         return false;
785 }
786 
787 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
788 {
789         u8 family = NFPROTO_UNSPEC;
790 
791         switch (skb_protocol(skb, true)) {
792         case htons(ETH_P_IP):
793                 family = NFPROTO_IPV4;
794                 break;
795         case htons(ETH_P_IPV6):
796                 family = NFPROTO_IPV6;
797                 break;
798         default:
799                 break;
800         }
801 
802         return family;
803 }
804 
805 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
806 {
807         unsigned int len;
808 
809         len =  skb_network_offset(skb) + sizeof(struct iphdr);
810         if (unlikely(skb->len < len))
811                 return -EINVAL;
812         if (unlikely(!pskb_may_pull(skb, len)))
813                 return -ENOMEM;
814 
815         *frag = ip_is_fragment(ip_hdr(skb));
816         return 0;
817 }
818 
819 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
820 {
821         unsigned int flags = 0, len, payload_ofs = 0;
822         unsigned short frag_off;
823         int nexthdr;
824 
825         len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
826         if (unlikely(skb->len < len))
827                 return -EINVAL;
828         if (unlikely(!pskb_may_pull(skb, len)))
829                 return -ENOMEM;
830 
831         nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
832         if (unlikely(nexthdr < 0))
833                 return -EPROTO;
834 
835         *frag = flags & IP6_FH_F_FRAG;
836         return 0;
837 }
838 
839 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
840                                    u8 family, u16 zone, bool *defrag)
841 {
842         enum ip_conntrack_info ctinfo;
843         struct nf_conn *ct;
844         int err = 0;
845         bool frag;
846         u8 proto;
847         u16 mru;
848 
849         /* Previously seen (loopback)? Ignore. */
850         ct = nf_ct_get(skb, &ctinfo);
851         if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
852                 return 0;
853 
854         if (family == NFPROTO_IPV4)
855                 err = tcf_ct_ipv4_is_fragment(skb, &frag);
856         else
857                 err = tcf_ct_ipv6_is_fragment(skb, &frag);
858         if (err || !frag)
859                 return err;
860 
861         err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
862         if (err)
863                 return err;
864 
865         *defrag = true;
866         tc_skb_cb(skb)->mru = mru;
867 
868         return 0;
869 }
870 
871 static void tcf_ct_params_free(struct tcf_ct_params *params)
872 {
873         if (params->helper) {
874 #if IS_ENABLED(CONFIG_NF_NAT)
875                 if (params->ct_action & TCA_CT_ACT_NAT)
876                         nf_nat_helper_put(params->helper);
877 #endif
878                 nf_conntrack_helper_put(params->helper);
879         }
880         if (params->ct_ft)
881                 tcf_ct_flow_table_put(params->ct_ft);
882         if (params->tmpl) {
883                 if (params->put_labels)
884                         nf_connlabels_put(nf_ct_net(params->tmpl));
885 
886                 nf_ct_put(params->tmpl);
887         }
888 
889         kfree(params);
890 }
891 
892 static void tcf_ct_params_free_rcu(struct rcu_head *head)
893 {
894         struct tcf_ct_params *params;
895 
896         params = container_of(head, struct tcf_ct_params, rcu);
897         tcf_ct_params_free(params);
898 }
899 
900 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
901 {
902 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
903         u32 new_mark;
904 
905         if (!mask)
906                 return;
907 
908         new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
909         if (READ_ONCE(ct->mark) != new_mark) {
910                 WRITE_ONCE(ct->mark, new_mark);
911                 if (nf_ct_is_confirmed(ct))
912                         nf_conntrack_event_cache(IPCT_MARK, ct);
913         }
914 #endif
915 }
916 
917 static void tcf_ct_act_set_labels(struct nf_conn *ct,
918                                   u32 *labels,
919                                   u32 *labels_m)
920 {
921 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
922         size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
923 
924         if (!memchr_inv(labels_m, 0, labels_sz))
925                 return;
926 
927         nf_connlabels_replace(ct, labels, labels_m, 4);
928 #endif
929 }
930 
931 static int tcf_ct_act_nat(struct sk_buff *skb,
932                           struct nf_conn *ct,
933                           enum ip_conntrack_info ctinfo,
934                           int ct_action,
935                           struct nf_nat_range2 *range,
936                           bool commit)
937 {
938 #if IS_ENABLED(CONFIG_NF_NAT)
939         int err, action = 0;
940 
941         if (!(ct_action & TCA_CT_ACT_NAT))
942                 return NF_ACCEPT;
943         if (ct_action & TCA_CT_ACT_NAT_SRC)
944                 action |= BIT(NF_NAT_MANIP_SRC);
945         if (ct_action & TCA_CT_ACT_NAT_DST)
946                 action |= BIT(NF_NAT_MANIP_DST);
947 
948         err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
949         if (err != NF_ACCEPT)
950                 return err & NF_VERDICT_MASK;
951 
952         if (action & BIT(NF_NAT_MANIP_SRC))
953                 tc_skb_cb(skb)->post_ct_snat = 1;
954         if (action & BIT(NF_NAT_MANIP_DST))
955                 tc_skb_cb(skb)->post_ct_dnat = 1;
956 
957         return err;
958 #else
959         return NF_ACCEPT;
960 #endif
961 }
962 
963 TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
964                                  struct tcf_result *res)
965 {
966         struct net *net = dev_net(skb->dev);
967         enum ip_conntrack_info ctinfo;
968         struct tcf_ct *c = to_ct(a);
969         struct nf_conn *tmpl = NULL;
970         struct nf_hook_state state;
971         bool cached, commit, clear;
972         int nh_ofs, err, retval;
973         struct tcf_ct_params *p;
974         bool add_helper = false;
975         bool skip_add = false;
976         bool defrag = false;
977         struct nf_conn *ct;
978         u8 family;
979 
980         p = rcu_dereference_bh(c->params);
981 
982         retval = READ_ONCE(c->tcf_action);
983         commit = p->ct_action & TCA_CT_ACT_COMMIT;
984         clear = p->ct_action & TCA_CT_ACT_CLEAR;
985         tmpl = p->tmpl;
986 
987         tcf_lastuse_update(&c->tcf_tm);
988         tcf_action_update_bstats(&c->common, skb);
989 
990         if (clear) {
991                 tc_skb_cb(skb)->post_ct = false;
992                 ct = nf_ct_get(skb, &ctinfo);
993                 if (ct) {
994                         nf_ct_put(ct);
995                         nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
996                 }
997 
998                 goto out_clear;
999         }
1000 
1001         family = tcf_ct_skb_nf_family(skb);
1002         if (family == NFPROTO_UNSPEC)
1003                 goto drop;
1004 
1005         /* The conntrack module expects to be working at L3.
1006          * We also try to pull the IPv4/6 header to linear area
1007          */
1008         nh_ofs = skb_network_offset(skb);
1009         skb_pull_rcsum(skb, nh_ofs);
1010         err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1011         if (err)
1012                 goto out_frag;
1013 
1014         err = nf_ct_skb_network_trim(skb, family);
1015         if (err)
1016                 goto drop;
1017 
1018         /* If we are recirculating packets to match on ct fields and
1019          * committing with a separate ct action, then we don't need to
1020          * actually run the packet through conntrack twice unless it's for a
1021          * different zone.
1022          */
1023         cached = tcf_ct_skb_nfct_cached(net, skb, p);
1024         if (!cached) {
1025                 if (tcf_ct_flow_table_lookup(p, skb, family)) {
1026                         skip_add = true;
1027                         goto do_nat;
1028                 }
1029 
1030                 /* Associate skb with specified zone. */
1031                 if (tmpl) {
1032                         nf_conntrack_put(skb_nfct(skb));
1033                         nf_conntrack_get(&tmpl->ct_general);
1034                         nf_ct_set(skb, tmpl, IP_CT_NEW);
1035                 }
1036 
1037                 state.hook = NF_INET_PRE_ROUTING;
1038                 state.net = net;
1039                 state.pf = family;
1040                 err = nf_conntrack_in(skb, &state);
1041                 if (err != NF_ACCEPT)
1042                         goto nf_error;
1043         }
1044 
1045 do_nat:
1046         ct = nf_ct_get(skb, &ctinfo);
1047         if (!ct)
1048                 goto out_push;
1049         nf_ct_deliver_cached_events(ct);
1050         nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1051 
1052         err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1053         if (err != NF_ACCEPT)
1054                 goto nf_error;
1055 
1056         if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
1057                 err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
1058                 if (err)
1059                         goto drop;
1060                 add_helper = true;
1061                 if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
1062                         if (!nfct_seqadj_ext_add(ct))
1063                                 goto drop;
1064                 }
1065         }
1066 
1067         if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
1068                 err = nf_ct_helper(skb, ct, ctinfo, family);
1069                 if (err != NF_ACCEPT)
1070                         goto nf_error;
1071         }
1072 
1073         if (commit) {
1074                 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1075                 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1076 
1077                 if (!nf_ct_is_confirmed(ct))
1078                         nf_conn_act_ct_ext_add(skb, ct, ctinfo);
1079 
1080                 /* This will take care of sending queued events
1081                  * even if the connection is already confirmed.
1082                  */
1083                 err = nf_conntrack_confirm(skb);
1084                 if (err != NF_ACCEPT)
1085                         goto nf_error;
1086 
1087                 /* The ct may be dropped if a clash has been resolved,
1088                  * so it's necessary to retrieve it from skb again to
1089                  * prevent UAF.
1090                  */
1091                 ct = nf_ct_get(skb, &ctinfo);
1092                 if (!ct)
1093                         skip_add = true;
1094         }
1095 
1096         if (!skip_add)
1097                 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1098 
1099 out_push:
1100         skb_push_rcsum(skb, nh_ofs);
1101 
1102         tc_skb_cb(skb)->post_ct = true;
1103         tc_skb_cb(skb)->zone = p->zone;
1104 out_clear:
1105         if (defrag)
1106                 qdisc_skb_cb(skb)->pkt_len = skb->len;
1107         return retval;
1108 
1109 out_frag:
1110         if (err != -EINPROGRESS)
1111                 tcf_action_inc_drop_qstats(&c->common);
1112         return TC_ACT_CONSUMED;
1113 
1114 drop:
1115         tcf_action_inc_drop_qstats(&c->common);
1116         return TC_ACT_SHOT;
1117 
1118 nf_error:
1119         /* some verdicts store extra data in upper bits, such
1120          * as errno or queue number.
1121          */
1122         switch (err & NF_VERDICT_MASK) {
1123         case NF_DROP:
1124                 goto drop;
1125         case NF_STOLEN:
1126                 tcf_action_inc_drop_qstats(&c->common);
1127                 return TC_ACT_CONSUMED;
1128         default:
1129                 DEBUG_NET_WARN_ON_ONCE(1);
1130                 goto drop;
1131         }
1132 }
1133 
1134 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1135         [TCA_CT_ACTION] = { .type = NLA_U16 },
1136         [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1137         [TCA_CT_ZONE] = { .type = NLA_U16 },
1138         [TCA_CT_MARK] = { .type = NLA_U32 },
1139         [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1140         [TCA_CT_LABELS] = { .type = NLA_BINARY,
1141                             .len = 128 / BITS_PER_BYTE },
1142         [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1143                                  .len = 128 / BITS_PER_BYTE },
1144         [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1145         [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1146         [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1147         [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1148         [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1149         [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1150         [TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
1151         [TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
1152         [TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
1153 };
1154 
1155 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1156                                   struct tc_ct *parm,
1157                                   struct nlattr **tb,
1158                                   struct netlink_ext_ack *extack)
1159 {
1160         struct nf_nat_range2 *range;
1161 
1162         if (!(p->ct_action & TCA_CT_ACT_NAT))
1163                 return 0;
1164 
1165         if (!IS_ENABLED(CONFIG_NF_NAT)) {
1166                 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1167                 return -EOPNOTSUPP;
1168         }
1169 
1170         if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1171                 return 0;
1172 
1173         if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1174             (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1175                 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1176                 return -EOPNOTSUPP;
1177         }
1178 
1179         range = &p->range;
1180         if (tb[TCA_CT_NAT_IPV4_MIN]) {
1181                 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1182 
1183                 p->ipv4_range = true;
1184                 range->flags |= NF_NAT_RANGE_MAP_IPS;
1185                 range->min_addr.ip =
1186                         nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1187 
1188                 range->max_addr.ip = max_attr ?
1189                                      nla_get_in_addr(max_attr) :
1190                                      range->min_addr.ip;
1191         } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1192                 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1193 
1194                 p->ipv4_range = false;
1195                 range->flags |= NF_NAT_RANGE_MAP_IPS;
1196                 range->min_addr.in6 =
1197                         nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1198 
1199                 range->max_addr.in6 = max_attr ?
1200                                       nla_get_in6_addr(max_attr) :
1201                                       range->min_addr.in6;
1202         }
1203 
1204         if (tb[TCA_CT_NAT_PORT_MIN]) {
1205                 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1206                 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1207 
1208                 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1209                                        nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1210                                        range->min_proto.all;
1211         }
1212 
1213         return 0;
1214 }
1215 
1216 static void tcf_ct_set_key_val(struct nlattr **tb,
1217                                void *val, int val_type,
1218                                void *mask, int mask_type,
1219                                int len)
1220 {
1221         if (!tb[val_type])
1222                 return;
1223         nla_memcpy(val, tb[val_type], len);
1224 
1225         if (!mask)
1226                 return;
1227 
1228         if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1229                 memset(mask, 0xff, len);
1230         else
1231                 nla_memcpy(mask, tb[mask_type], len);
1232 }
1233 
1234 static int tcf_ct_fill_params(struct net *net,
1235                               struct tcf_ct_params *p,
1236                               struct tc_ct *parm,
1237                               struct nlattr **tb,
1238                               struct netlink_ext_ack *extack)
1239 {
1240         struct nf_conntrack_zone zone;
1241         int err, family, proto, len;
1242         bool put_labels = false;
1243         struct nf_conn *tmpl;
1244         char *name;
1245 
1246         p->zone = NF_CT_DEFAULT_ZONE_ID;
1247 
1248         tcf_ct_set_key_val(tb,
1249                            &p->ct_action, TCA_CT_ACTION,
1250                            NULL, TCA_CT_UNSPEC,
1251                            sizeof(p->ct_action));
1252 
1253         if (p->ct_action & TCA_CT_ACT_CLEAR)
1254                 return 0;
1255 
1256         err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1257         if (err)
1258                 return err;
1259 
1260         if (tb[TCA_CT_MARK]) {
1261                 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1262                         NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1263                         return -EOPNOTSUPP;
1264                 }
1265                 tcf_ct_set_key_val(tb,
1266                                    &p->mark, TCA_CT_MARK,
1267                                    &p->mark_mask, TCA_CT_MARK_MASK,
1268                                    sizeof(p->mark));
1269         }
1270 
1271         if (tb[TCA_CT_LABELS]) {
1272                 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1273 
1274                 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1275                         NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1276                         return -EOPNOTSUPP;
1277                 }
1278 
1279                 if (nf_connlabels_get(net, n_bits - 1)) {
1280                         NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1281                         return -EOPNOTSUPP;
1282                 } else {
1283                         put_labels = true;
1284                 }
1285 
1286                 tcf_ct_set_key_val(tb,
1287                                    p->labels, TCA_CT_LABELS,
1288                                    p->labels_mask, TCA_CT_LABELS_MASK,
1289                                    sizeof(p->labels));
1290         }
1291 
1292         if (tb[TCA_CT_ZONE]) {
1293                 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1294                         NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1295                         return -EOPNOTSUPP;
1296                 }
1297 
1298                 tcf_ct_set_key_val(tb,
1299                                    &p->zone, TCA_CT_ZONE,
1300                                    NULL, TCA_CT_UNSPEC,
1301                                    sizeof(p->zone));
1302         }
1303 
1304         nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1305         tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1306         if (!tmpl) {
1307                 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1308                 return -ENOMEM;
1309         }
1310         p->tmpl = tmpl;
1311         if (tb[TCA_CT_HELPER_NAME]) {
1312                 name = nla_data(tb[TCA_CT_HELPER_NAME]);
1313                 len = nla_len(tb[TCA_CT_HELPER_NAME]);
1314                 if (len > 16 || name[len - 1] != '\0') {
1315                         NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
1316                         err = -EINVAL;
1317                         goto err;
1318                 }
1319                 family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
1320                 proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
1321                 err = nf_ct_add_helper(tmpl, name, family, proto,
1322                                        p->ct_action & TCA_CT_ACT_NAT, &p->helper);
1323                 if (err) {
1324                         NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
1325                         goto err;
1326                 }
1327         }
1328 
1329         p->put_labels = put_labels;
1330 
1331         if (p->ct_action & TCA_CT_ACT_COMMIT)
1332                 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1333         return 0;
1334 err:
1335         if (put_labels)
1336                 nf_connlabels_put(net);
1337 
1338         nf_ct_put(p->tmpl);
1339         p->tmpl = NULL;
1340         return err;
1341 }
1342 
1343 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1344                        struct nlattr *est, struct tc_action **a,
1345                        struct tcf_proto *tp, u32 flags,
1346                        struct netlink_ext_ack *extack)
1347 {
1348         struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
1349         bool bind = flags & TCA_ACT_FLAGS_BIND;
1350         struct tcf_ct_params *params = NULL;
1351         struct nlattr *tb[TCA_CT_MAX + 1];
1352         struct tcf_chain *goto_ch = NULL;
1353         struct tc_ct *parm;
1354         struct tcf_ct *c;
1355         int err, res = 0;
1356         u32 index;
1357 
1358         if (!nla) {
1359                 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1360                 return -EINVAL;
1361         }
1362 
1363         err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1364         if (err < 0)
1365                 return err;
1366 
1367         if (!tb[TCA_CT_PARMS]) {
1368                 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1369                 return -EINVAL;
1370         }
1371         parm = nla_data(tb[TCA_CT_PARMS]);
1372         index = parm->index;
1373         err = tcf_idr_check_alloc(tn, &index, a, bind);
1374         if (err < 0)
1375                 return err;
1376 
1377         if (!err) {
1378                 err = tcf_idr_create_from_flags(tn, index, est, a,
1379                                                 &act_ct_ops, bind, flags);
1380                 if (err) {
1381                         tcf_idr_cleanup(tn, index);
1382                         return err;
1383                 }
1384                 res = ACT_P_CREATED;
1385         } else {
1386                 if (bind)
1387                         return ACT_P_BOUND;
1388 
1389                 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1390                         tcf_idr_release(*a, bind);
1391                         return -EEXIST;
1392                 }
1393         }
1394         err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1395         if (err < 0)
1396                 goto cleanup;
1397 
1398         c = to_ct(*a);
1399 
1400         params = kzalloc(sizeof(*params), GFP_KERNEL);
1401         if (unlikely(!params)) {
1402                 err = -ENOMEM;
1403                 goto cleanup;
1404         }
1405 
1406         err = tcf_ct_fill_params(net, params, parm, tb, extack);
1407         if (err)
1408                 goto cleanup;
1409 
1410         err = tcf_ct_flow_table_get(net, params);
1411         if (err)
1412                 goto cleanup;
1413 
1414         spin_lock_bh(&c->tcf_lock);
1415         goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1416         params = rcu_replace_pointer(c->params, params,
1417                                      lockdep_is_held(&c->tcf_lock));
1418         spin_unlock_bh(&c->tcf_lock);
1419 
1420         if (goto_ch)
1421                 tcf_chain_put_by_act(goto_ch);
1422         if (params)
1423                 call_rcu(&params->rcu, tcf_ct_params_free_rcu);
1424 
1425         return res;
1426 
1427 cleanup:
1428         if (goto_ch)
1429                 tcf_chain_put_by_act(goto_ch);
1430         if (params)
1431                 tcf_ct_params_free(params);
1432         tcf_idr_release(*a, bind);
1433         return err;
1434 }
1435 
1436 static void tcf_ct_cleanup(struct tc_action *a)
1437 {
1438         struct tcf_ct_params *params;
1439         struct tcf_ct *c = to_ct(a);
1440 
1441         params = rcu_dereference_protected(c->params, 1);
1442         if (params)
1443                 call_rcu(&params->rcu, tcf_ct_params_free_rcu);
1444 }
1445 
1446 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1447                                void *val, int val_type,
1448                                void *mask, int mask_type,
1449                                int len)
1450 {
1451         int err;
1452 
1453         if (mask && !memchr_inv(mask, 0, len))
1454                 return 0;
1455 
1456         err = nla_put(skb, val_type, len, val);
1457         if (err)
1458                 return err;
1459 
1460         if (mask_type != TCA_CT_UNSPEC) {
1461                 err = nla_put(skb, mask_type, len, mask);
1462                 if (err)
1463                         return err;
1464         }
1465 
1466         return 0;
1467 }
1468 
1469 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1470 {
1471         struct nf_nat_range2 *range = &p->range;
1472 
1473         if (!(p->ct_action & TCA_CT_ACT_NAT))
1474                 return 0;
1475 
1476         if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1477                 return 0;
1478 
1479         if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1480                 if (p->ipv4_range) {
1481                         if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1482                                             range->min_addr.ip))
1483                                 return -1;
1484                         if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1485                                             range->max_addr.ip))
1486                                 return -1;
1487                 } else {
1488                         if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1489                                              &range->min_addr.in6))
1490                                 return -1;
1491                         if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1492                                              &range->max_addr.in6))
1493                                 return -1;
1494                 }
1495         }
1496 
1497         if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1498                 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1499                                  range->min_proto.all))
1500                         return -1;
1501                 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1502                                  range->max_proto.all))
1503                         return -1;
1504         }
1505 
1506         return 0;
1507 }
1508 
1509 static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
1510 {
1511         if (!helper)
1512                 return 0;
1513 
1514         if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
1515             nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
1516             nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
1517                 return -1;
1518 
1519         return 0;
1520 }
1521 
1522 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1523                               int bind, int ref)
1524 {
1525         unsigned char *b = skb_tail_pointer(skb);
1526         struct tcf_ct *c = to_ct(a);
1527         struct tcf_ct_params *p;
1528 
1529         struct tc_ct opt = {
1530                 .index   = c->tcf_index,
1531                 .refcnt  = refcount_read(&c->tcf_refcnt) - ref,
1532                 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1533         };
1534         struct tcf_t t;
1535 
1536         spin_lock_bh(&c->tcf_lock);
1537         p = rcu_dereference_protected(c->params,
1538                                       lockdep_is_held(&c->tcf_lock));
1539         opt.action = c->tcf_action;
1540 
1541         if (tcf_ct_dump_key_val(skb,
1542                                 &p->ct_action, TCA_CT_ACTION,
1543                                 NULL, TCA_CT_UNSPEC,
1544                                 sizeof(p->ct_action)))
1545                 goto nla_put_failure;
1546 
1547         if (p->ct_action & TCA_CT_ACT_CLEAR)
1548                 goto skip_dump;
1549 
1550         if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1551             tcf_ct_dump_key_val(skb,
1552                                 &p->mark, TCA_CT_MARK,
1553                                 &p->mark_mask, TCA_CT_MARK_MASK,
1554                                 sizeof(p->mark)))
1555                 goto nla_put_failure;
1556 
1557         if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1558             tcf_ct_dump_key_val(skb,
1559                                 p->labels, TCA_CT_LABELS,
1560                                 p->labels_mask, TCA_CT_LABELS_MASK,
1561                                 sizeof(p->labels)))
1562                 goto nla_put_failure;
1563 
1564         if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1565             tcf_ct_dump_key_val(skb,
1566                                 &p->zone, TCA_CT_ZONE,
1567                                 NULL, TCA_CT_UNSPEC,
1568                                 sizeof(p->zone)))
1569                 goto nla_put_failure;
1570 
1571         if (tcf_ct_dump_nat(skb, p))
1572                 goto nla_put_failure;
1573 
1574         if (tcf_ct_dump_helper(skb, p->helper))
1575                 goto nla_put_failure;
1576 
1577 skip_dump:
1578         if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1579                 goto nla_put_failure;
1580 
1581         tcf_tm_dump(&t, &c->tcf_tm);
1582         if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1583                 goto nla_put_failure;
1584         spin_unlock_bh(&c->tcf_lock);
1585 
1586         return skb->len;
1587 nla_put_failure:
1588         spin_unlock_bh(&c->tcf_lock);
1589         nlmsg_trim(skb, b);
1590         return -1;
1591 }
1592 
1593 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1594                              u64 drops, u64 lastuse, bool hw)
1595 {
1596         struct tcf_ct *c = to_ct(a);
1597 
1598         tcf_action_update_stats(a, bytes, packets, drops, hw);
1599         c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1600 }
1601 
1602 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1603                                     u32 *index_inc, bool bind,
1604                                     struct netlink_ext_ack *extack)
1605 {
1606         if (bind) {
1607                 struct flow_action_entry *entry = entry_data;
1608 
1609                 if (tcf_ct_helper(act))
1610                         return -EOPNOTSUPP;
1611 
1612                 entry->id = FLOW_ACTION_CT;
1613                 entry->ct.action = tcf_ct_action(act);
1614                 entry->ct.zone = tcf_ct_zone(act);
1615                 entry->ct.flow_table = tcf_ct_ft(act);
1616                 *index_inc = 1;
1617         } else {
1618                 struct flow_offload_action *fl_action = entry_data;
1619 
1620                 fl_action->id = FLOW_ACTION_CT;
1621         }
1622 
1623         return 0;
1624 }
1625 
1626 static struct tc_action_ops act_ct_ops = {
1627         .kind           =       "ct",
1628         .id             =       TCA_ID_CT,
1629         .owner          =       THIS_MODULE,
1630         .act            =       tcf_ct_act,
1631         .dump           =       tcf_ct_dump,
1632         .init           =       tcf_ct_init,
1633         .cleanup        =       tcf_ct_cleanup,
1634         .stats_update   =       tcf_stats_update,
1635         .offload_act_setup =    tcf_ct_offload_act_setup,
1636         .size           =       sizeof(struct tcf_ct),
1637 };
1638 MODULE_ALIAS_NET_ACT("ct");
1639 
1640 static __net_init int ct_init_net(struct net *net)
1641 {
1642         struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1643 
1644         return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1645 }
1646 
1647 static void __net_exit ct_exit_net(struct list_head *net_list)
1648 {
1649         tc_action_net_exit(net_list, act_ct_ops.net_id);
1650 }
1651 
1652 static struct pernet_operations ct_net_ops = {
1653         .init = ct_init_net,
1654         .exit_batch = ct_exit_net,
1655         .id   = &act_ct_ops.net_id,
1656         .size = sizeof(struct tc_ct_action_net),
1657 };
1658 
1659 static int __init ct_init_module(void)
1660 {
1661         int err;
1662 
1663         act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1664         if (!act_ct_wq)
1665                 return -ENOMEM;
1666 
1667         err = tcf_ct_flow_tables_init();
1668         if (err)
1669                 goto err_tbl_init;
1670 
1671         err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1672         if (err)
1673                 goto err_register;
1674 
1675         static_branch_inc(&tcf_frag_xmit_count);
1676 
1677         return 0;
1678 
1679 err_register:
1680         tcf_ct_flow_tables_uninit();
1681 err_tbl_init:
1682         destroy_workqueue(act_ct_wq);
1683         return err;
1684 }
1685 
1686 static void __exit ct_cleanup_module(void)
1687 {
1688         static_branch_dec(&tcf_frag_xmit_count);
1689         tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1690         tcf_ct_flow_tables_uninit();
1691         destroy_workqueue(act_ct_wq);
1692 }
1693 
1694 module_init(ct_init_module);
1695 module_exit(ct_cleanup_module);
1696 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1697 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1698 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1699 MODULE_DESCRIPTION("Connection tracking action");
1700 MODULE_LICENSE("GPL v2");
1701 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php