~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/netfilter/nft_set_pipapo.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 
  3 /* PIPAPO: PIle PAcket POlicies: set for arbitrary concatenations of ranges
  4  *
  5  * Copyright (c) 2019-2020 Red Hat GmbH
  6  *
  7  * Author: Stefano Brivio <sbrivio@redhat.com>
  8  */
  9 
 10 /**
 11  * DOC: Theory of Operation
 12  *
 13  *
 14  * Problem
 15  * -------
 16  *
 17  * Match packet bytes against entries composed of ranged or non-ranged packet
 18  * field specifiers, mapping them to arbitrary references. For example:
 19  *
 20  * ::
 21  *
 22  *               --- fields --->
 23  *      |    [net],[port],[net]... => [reference]
 24  *   entries [net],[port],[net]... => [reference]
 25  *      |    [net],[port],[net]... => [reference]
 26  *      V    ...
 27  *
 28  * where [net] fields can be IP ranges or netmasks, and [port] fields are port
 29  * ranges. Arbitrary packet fields can be matched.
 30  *
 31  *
 32  * Algorithm Overview
 33  * ------------------
 34  *
 35  * This algorithm is loosely inspired by [Ligatti 2010], and fundamentally
 36  * relies on the consideration that every contiguous range in a space of b bits
 37  * can be converted into b * 2 netmasks, from Theorem 3 in [Rottenstreich 2010],
 38  * as also illustrated in Section 9 of [Kogan 2014].
 39  *
 40  * Classification against a number of entries, that require matching given bits
 41  * of a packet field, is performed by grouping those bits in sets of arbitrary
 42  * size, and classifying packet bits one group at a time.
 43  *
 44  * Example:
 45  *   to match the source port (16 bits) of a packet, we can divide those 16 bits
 46  *   in 4 groups of 4 bits each. Given the entry:
 47  *      0000 0001 0101 1001
 48  *   and a packet with source port:
 49  *      0000 0001 1010 1001
 50  *   first and second groups match, but the third doesn't. We conclude that the
 51  *   packet doesn't match the given entry.
 52  *
 53  * Translate the set to a sequence of lookup tables, one per field. Each table
 54  * has two dimensions: bit groups to be matched for a single packet field, and
 55  * all the possible values of said groups (buckets). Input entries are
 56  * represented as one or more rules, depending on the number of composing
 57  * netmasks for the given field specifier, and a group match is indicated as a
 58  * set bit, with number corresponding to the rule index, in all the buckets
 59  * whose value matches the entry for a given group.
 60  *
 61  * Rules are mapped between fields through an array of x, n pairs, with each
 62  * item mapping a matched rule to one or more rules. The position of the pair in
 63  * the array indicates the matched rule to be mapped to the next field, x
 64  * indicates the first rule index in the next field, and n the amount of
 65  * next-field rules the current rule maps to.
 66  *
 67  * The mapping array for the last field maps to the desired references.
 68  *
 69  * To match, we perform table lookups using the values of grouped packet bits,
 70  * and use a sequence of bitwise operations to progressively evaluate rule
 71  * matching.
 72  *
 73  * A stand-alone, reference implementation, also including notes about possible
 74  * future optimisations, is available at:
 75  *    https://pipapo.lameexcu.se/
 76  *
 77  * Insertion
 78  * ---------
 79  *
 80  * - For each packet field:
 81  *
 82  *   - divide the b packet bits we want to classify into groups of size t,
 83  *     obtaining ceil(b / t) groups
 84  *
 85  *      Example: match on destination IP address, with t = 4: 32 bits, 8 groups
 86  *      of 4 bits each
 87  *
 88  *   - allocate a lookup table with one column ("bucket") for each possible
 89  *     value of a group, and with one row for each group
 90  *
 91  *      Example: 8 groups, 2^4 buckets:
 92  *
 93  * ::
 94  *
 95  *                     bucket
 96  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
 97  *        0
 98  *        1
 99  *        2
100  *        3
101  *        4
102  *        5
103  *        6
104  *        7
105  *
106  *   - map the bits we want to classify for the current field, for a given
107  *     entry, to a single rule for non-ranged and netmask set items, and to one
108  *     or multiple rules for ranges. Ranges are expanded to composing netmasks
109  *     by pipapo_expand().
110  *
111  *      Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048
112  *      - rule #0: 10.0.0.5
113  *      - rule #1: 192.168.1.0/24
114  *      - rule #2: 192.168.2.0/31
115  *
116  *   - insert references to the rules in the lookup table, selecting buckets
117  *     according to bit values of a rule in the given group. This is done by
118  *     pipapo_insert().
119  *
120  *      Example: given:
121  *      - rule #0: 10.0.0.5 mapping to buckets
122  *        < 0 10  0 0   0 0  0 5 >
123  *      - rule #1: 192.168.1.0/24 mapping to buckets
124  *        < 12 0  10 8  0 1  < 0..15 > < 0..15 > >
125  *      - rule #2: 192.168.2.0/31 mapping to buckets
126  *        < 12 0  10 8  0 2  0 < 0..1 > >
127  *
128  *      these bits are set in the lookup table:
129  *
130  * ::
131  *
132  *                     bucket
133  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
134  *        0    0                                              1,2
135  *        1   1,2                                      0
136  *        2    0                                      1,2
137  *        3    0                              1,2
138  *        4  0,1,2
139  *        5    0   1   2
140  *        6  0,1,2 1   1   1   1   1   1   1   1   1   1   1   1   1   1   1
141  *        7   1,2 1,2  1   1   1  0,1  1   1   1   1   1   1   1   1   1   1
142  *
143  *   - if this is not the last field in the set, fill a mapping array that maps
144  *     rules from the lookup table to rules belonging to the same entry in
145  *     the next lookup table, done by pipapo_map().
146  *
147  *     Note that as rules map to contiguous ranges of rules, given how netmask
148  *     expansion and insertion is performed, &union nft_pipapo_map_bucket stores
149  *     this information as pairs of first rule index, rule count.
150  *
151  *      Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048,
152  *      given lookup table #0 for field 0 (see example above):
153  *
154  * ::
155  *
156  *                     bucket
157  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
158  *        0    0                                              1,2
159  *        1   1,2                                      0
160  *        2    0                                      1,2
161  *        3    0                              1,2
162  *        4  0,1,2
163  *        5    0   1   2
164  *        6  0,1,2 1   1   1   1   1   1   1   1   1   1   1   1   1   1   1
165  *        7   1,2 1,2  1   1   1  0,1  1   1   1   1   1   1   1   1   1   1
166  *
167  *      and lookup table #1 for field 1 with:
168  *      - rule #0: 1024 mapping to buckets
169  *        < 0  0  4  0 >
170  *      - rule #1: 2048 mapping to buckets
171  *        < 0  0  5  0 >
172  *
173  * ::
174  *
175  *                     bucket
176  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
177  *        0   0,1
178  *        1   0,1
179  *        2                    0   1
180  *        3   0,1
181  *
182  *      we need to map rules for 10.0.0.5 in lookup table #0 (rule #0) to 1024
183  *      in lookup table #1 (rule #0) and rules for 192.168.1.0-192.168.2.1
184  *      (rules #1, #2) to 2048 in lookup table #2 (rule #1):
185  *
186  * ::
187  *
188  *       rule indices in current field: 0    1    2
189  *       map to rules in next field:    0    1    1
190  *
191  *   - if this is the last field in the set, fill a mapping array that maps
192  *     rules from the last lookup table to element pointers, also done by
193  *     pipapo_map().
194  *
195  *     Note that, in this implementation, we have two elements (start, end) for
196  *     each entry. The pointer to the end element is stored in this array, and
197  *     the pointer to the start element is linked from it.
198  *
199  *      Example: entry 10.0.0.5:1024 has a corresponding &struct nft_pipapo_elem
200  *      pointer, 0x66, and element for 192.168.1.0-192.168.2.1:2048 is at 0x42.
201  *      From the rules of lookup table #1 as mapped above:
202  *
203  * ::
204  *
205  *       rule indices in last field:    0    1
206  *       map to elements:             0x66  0x42
207  *
208  *
209  * Matching
210  * --------
211  *
212  * We use a result bitmap, with the size of a single lookup table bucket, to
213  * represent the matching state that applies at every algorithm step. This is
214  * done by pipapo_lookup().
215  *
216  * - For each packet field:
217  *
218  *   - start with an all-ones result bitmap (res_map in pipapo_lookup())
219  *
220  *   - perform a lookup into the table corresponding to the current field,
221  *     for each group, and at every group, AND the current result bitmap with
222  *     the value from the lookup table bucket
223  *
224  * ::
225  *
226  *      Example: 192.168.1.5 < 12 0  10 8  0 1  0 5 >, with lookup table from
227  *      insertion examples.
228  *      Lookup table buckets are at least 3 bits wide, we'll assume 8 bits for
229  *      convenience in this example. Initial result bitmap is 0xff, the steps
230  *      below show the value of the result bitmap after each group is processed:
231  *
232  *                     bucket
233  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
234  *        0    0                                              1,2
235  *        result bitmap is now: 0xff & 0x6 [bucket 12] = 0x6
236  *
237  *        1   1,2                                      0
238  *        result bitmap is now: 0x6 & 0x6 [bucket 0] = 0x6
239  *
240  *        2    0                                      1,2
241  *        result bitmap is now: 0x6 & 0x6 [bucket 10] = 0x6
242  *
243  *        3    0                              1,2
244  *        result bitmap is now: 0x6 & 0x6 [bucket 8] = 0x6
245  *
246  *        4  0,1,2
247  *        result bitmap is now: 0x6 & 0x7 [bucket 0] = 0x6
248  *
249  *        5    0   1   2
250  *        result bitmap is now: 0x6 & 0x2 [bucket 1] = 0x2
251  *
252  *        6  0,1,2 1   1   1   1   1   1   1   1   1   1   1   1   1   1   1
253  *        result bitmap is now: 0x2 & 0x7 [bucket 0] = 0x2
254  *
255  *        7   1,2 1,2  1   1   1  0,1  1   1   1   1   1   1   1   1   1   1
256  *        final result bitmap for this field is: 0x2 & 0x3 [bucket 5] = 0x2
257  *
258  *   - at the next field, start with a new, all-zeroes result bitmap. For each
259  *     bit set in the previous result bitmap, fill the new result bitmap
260  *     (fill_map in pipapo_lookup()) with the rule indices from the
261  *     corresponding buckets of the mapping field for this field, done by
262  *     pipapo_refill()
263  *
264  *      Example: with mapping table from insertion examples, with the current
265  *      result bitmap from the previous example, 0x02:
266  *
267  * ::
268  *
269  *       rule indices in current field: 0    1    2
270  *       map to rules in next field:    0    1    1
271  *
272  *      the new result bitmap will be 0x02: rule 1 was set, and rule 1 will be
273  *      set.
274  *
275  *      We can now extend this example to cover the second iteration of the step
276  *      above (lookup and AND bitmap): assuming the port field is
277  *      2048 < 0  0  5  0 >, with starting result bitmap 0x2, and lookup table
278  *      for "port" field from pre-computation example:
279  *
280  * ::
281  *
282  *                     bucket
283  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
284  *        0   0,1
285  *        1   0,1
286  *        2                    0   1
287  *        3   0,1
288  *
289  *       operations are: 0x2 & 0x3 [bucket 0] & 0x3 [bucket 0] & 0x2 [bucket 5]
290  *       & 0x3 [bucket 0], resulting bitmap is 0x2.
291  *
292  *   - if this is the last field in the set, look up the value from the mapping
293  *     array corresponding to the final result bitmap
294  *
295  *      Example: 0x2 resulting bitmap from 192.168.1.5:2048, mapping array for
296  *      last field from insertion example:
297  *
298  * ::
299  *
300  *       rule indices in last field:    0    1
301  *       map to elements:             0x66  0x42
302  *
303  *      the matching element is at 0x42.
304  *
305  *
306  * References
307  * ----------
308  *
309  * [Ligatti 2010]
310  *      A Packet-classification Algorithm for Arbitrary Bitmask Rules, with
311  *      Automatic Time-space Tradeoffs
312  *      Jay Ligatti, Josh Kuhn, and Chris Gage.
313  *      Proceedings of the IEEE International Conference on Computer
314  *      Communication Networks (ICCCN), August 2010.
315  *      https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf
316  *
317  * [Rottenstreich 2010]
318  *      Worst-Case TCAM Rule Expansion
319  *      Ori Rottenstreich and Isaac Keslassy.
320  *      2010 Proceedings IEEE INFOCOM, San Diego, CA, 2010.
321  *      http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.212.4592&rep=rep1&type=pdf
322  *
323  * [Kogan 2014]
324  *      SAX-PAC (Scalable And eXpressive PAcket Classification)
325  *      Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane,
326  *      and Patrick Eugster.
327  *      Proceedings of the 2014 ACM conference on SIGCOMM, August 2014.
328  *      https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf
329  */
330 
331 #include <linux/kernel.h>
332 #include <linux/init.h>
333 #include <linux/module.h>
334 #include <linux/netlink.h>
335 #include <linux/netfilter.h>
336 #include <linux/netfilter/nf_tables.h>
337 #include <net/netfilter/nf_tables_core.h>
338 #include <uapi/linux/netfilter/nf_tables.h>
339 #include <linux/bitmap.h>
340 #include <linux/bitops.h>
341 
342 #include "nft_set_pipapo_avx2.h"
343 #include "nft_set_pipapo.h"
344 
345 /**
346  * pipapo_refill() - For each set bit, set bits from selected mapping table item
347  * @map:        Bitmap to be scanned for set bits
348  * @len:        Length of bitmap in longs
349  * @rules:      Number of rules in field
350  * @dst:        Destination bitmap
351  * @mt:         Mapping table containing bit set specifiers
352  * @match_only: Find a single bit and return, don't fill
353  *
354  * Iteration over set bits with __builtin_ctzl(): Daniel Lemire, public domain.
355  *
356  * For each bit set in map, select the bucket from mapping table with index
357  * corresponding to the position of the bit set. Use start bit and amount of
358  * bits specified in bucket to fill region in dst.
359  *
360  * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
361  */
362 int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
363                   unsigned long *dst,
364                   const union nft_pipapo_map_bucket *mt, bool match_only)
365 {
366         unsigned long bitset;
367         unsigned int k;
368         int ret = -1;
369 
370         for (k = 0; k < len; k++) {
371                 bitset = map[k];
372                 while (bitset) {
373                         unsigned long t = bitset & -bitset;
374                         int r = __builtin_ctzl(bitset);
375                         int i = k * BITS_PER_LONG + r;
376 
377                         if (unlikely(i >= rules)) {
378                                 map[k] = 0;
379                                 return -1;
380                         }
381 
382                         if (match_only) {
383                                 bitmap_clear(map, i, 1);
384                                 return i;
385                         }
386 
387                         ret = 0;
388 
389                         bitmap_set(dst, mt[i].to, mt[i].n);
390 
391                         bitset ^= t;
392                 }
393                 map[k] = 0;
394         }
395 
396         return ret;
397 }
398 
399 /**
400  * nft_pipapo_lookup() - Lookup function
401  * @net:        Network namespace
402  * @set:        nftables API set representation
403  * @key:        nftables API element representation containing key data
404  * @ext:        nftables API extension pointer, filled with matching reference
405  *
406  * For more details, see DOC: Theory of Operation.
407  *
408  * Return: true on match, false otherwise.
409  */
410 bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
411                        const u32 *key, const struct nft_set_ext **ext)
412 {
413         struct nft_pipapo *priv = nft_set_priv(set);
414         struct nft_pipapo_scratch *scratch;
415         unsigned long *res_map, *fill_map;
416         u8 genmask = nft_genmask_cur(net);
417         const struct nft_pipapo_match *m;
418         const struct nft_pipapo_field *f;
419         const u8 *rp = (const u8 *)key;
420         bool map_index;
421         int i;
422 
423         local_bh_disable();
424 
425         m = rcu_dereference(priv->match);
426 
427         if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
428                 goto out;
429 
430         scratch = *raw_cpu_ptr(m->scratch);
431 
432         map_index = scratch->map_index;
433 
434         res_map  = scratch->map + (map_index ? m->bsize_max : 0);
435         fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
436 
437         pipapo_resmap_init(m, res_map);
438 
439         nft_pipapo_for_each_field(f, i, m) {
440                 bool last = i == m->field_count - 1;
441                 int b;
442 
443                 /* For each bit group: select lookup table bucket depending on
444                  * packet bytes value, then AND bucket value
445                  */
446                 if (likely(f->bb == 8))
447                         pipapo_and_field_buckets_8bit(f, res_map, rp);
448                 else
449                         pipapo_and_field_buckets_4bit(f, res_map, rp);
450                 NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
451 
452                 rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
453 
454                 /* Now populate the bitmap for the next field, unless this is
455                  * the last field, in which case return the matched 'ext'
456                  * pointer if any.
457                  *
458                  * Now res_map contains the matching bitmap, and fill_map is the
459                  * bitmap for the next field.
460                  */
461 next_match:
462                 b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
463                                   last);
464                 if (b < 0) {
465                         scratch->map_index = map_index;
466                         local_bh_enable();
467 
468                         return false;
469                 }
470 
471                 if (last) {
472                         *ext = &f->mt[b].e->ext;
473                         if (unlikely(nft_set_elem_expired(*ext) ||
474                                      !nft_set_elem_active(*ext, genmask)))
475                                 goto next_match;
476 
477                         /* Last field: we're just returning the key without
478                          * filling the initial bitmap for the next field, so the
479                          * current inactive bitmap is clean and can be reused as
480                          * *next* bitmap (not initial) for the next packet.
481                          */
482                         scratch->map_index = map_index;
483                         local_bh_enable();
484 
485                         return true;
486                 }
487 
488                 /* Swap bitmap indices: res_map is the initial bitmap for the
489                  * next field, and fill_map is guaranteed to be all-zeroes at
490                  * this point.
491                  */
492                 map_index = !map_index;
493                 swap(res_map, fill_map);
494 
495                 rp += NFT_PIPAPO_GROUPS_PADDING(f);
496         }
497 
498 out:
499         local_bh_enable();
500         return false;
501 }
502 
503 /**
504  * pipapo_get() - Get matching element reference given key data
505  * @net:        Network namespace
506  * @set:        nftables API set representation
507  * @m:          storage containing active/existing elements
508  * @data:       Key data to be matched against existing elements
509  * @genmask:    If set, check that element is active in given genmask
510  * @tstamp:     timestamp to check for expired elements
511  * @gfp:        the type of memory to allocate (see kmalloc).
512  *
513  * This is essentially the same as the lookup function, except that it matches
514  * key data against the uncommitted copy and doesn't use preallocated maps for
515  * bitmap results.
516  *
517  * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
518  */
519 static struct nft_pipapo_elem *pipapo_get(const struct net *net,
520                                           const struct nft_set *set,
521                                           const struct nft_pipapo_match *m,
522                                           const u8 *data, u8 genmask,
523                                           u64 tstamp, gfp_t gfp)
524 {
525         struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
526         unsigned long *res_map, *fill_map = NULL;
527         const struct nft_pipapo_field *f;
528         int i;
529 
530         if (m->bsize_max == 0)
531                 return ret;
532 
533         res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
534         if (!res_map) {
535                 ret = ERR_PTR(-ENOMEM);
536                 goto out;
537         }
538 
539         fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
540         if (!fill_map) {
541                 ret = ERR_PTR(-ENOMEM);
542                 goto out;
543         }
544 
545         pipapo_resmap_init(m, res_map);
546 
547         nft_pipapo_for_each_field(f, i, m) {
548                 bool last = i == m->field_count - 1;
549                 int b;
550 
551                 /* For each bit group: select lookup table bucket depending on
552                  * packet bytes value, then AND bucket value
553                  */
554                 if (f->bb == 8)
555                         pipapo_and_field_buckets_8bit(f, res_map, data);
556                 else if (f->bb == 4)
557                         pipapo_and_field_buckets_4bit(f, res_map, data);
558                 else
559                         BUG();
560 
561                 data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
562 
563                 /* Now populate the bitmap for the next field, unless this is
564                  * the last field, in which case return the matched 'ext'
565                  * pointer if any.
566                  *
567                  * Now res_map contains the matching bitmap, and fill_map is the
568                  * bitmap for the next field.
569                  */
570 next_match:
571                 b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
572                                   last);
573                 if (b < 0)
574                         goto out;
575 
576                 if (last) {
577                         if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
578                                 goto next_match;
579                         if ((genmask &&
580                              !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
581                                 goto next_match;
582 
583                         ret = f->mt[b].e;
584                         goto out;
585                 }
586 
587                 data += NFT_PIPAPO_GROUPS_PADDING(f);
588 
589                 /* Swap bitmap indices: fill_map will be the initial bitmap for
590                  * the next field (i.e. the new res_map), and res_map is
591                  * guaranteed to be all-zeroes at this point, ready to be filled
592                  * according to the next mapping table.
593                  */
594                 swap(res_map, fill_map);
595         }
596 
597 out:
598         kfree(fill_map);
599         kfree(res_map);
600         return ret;
601 }
602 
603 /**
604  * nft_pipapo_get() - Get matching element reference given key data
605  * @net:        Network namespace
606  * @set:        nftables API set representation
607  * @elem:       nftables API element representation containing key data
608  * @flags:      Unused
609  */
610 static struct nft_elem_priv *
611 nft_pipapo_get(const struct net *net, const struct nft_set *set,
612                const struct nft_set_elem *elem, unsigned int flags)
613 {
614         struct nft_pipapo *priv = nft_set_priv(set);
615         struct nft_pipapo_match *m = rcu_dereference(priv->match);
616         struct nft_pipapo_elem *e;
617 
618         e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
619                        nft_genmask_cur(net), get_jiffies_64(),
620                        GFP_ATOMIC);
621         if (IS_ERR(e))
622                 return ERR_CAST(e);
623 
624         return &e->priv;
625 }
626 
627 /**
628  * pipapo_realloc_mt() - Reallocate mapping table if needed upon resize
629  * @f:          Field containing mapping table
630  * @old_rules:  Amount of existing mapped rules
631  * @rules:      Amount of new rules to map
632  *
633  * Return: 0 on success, negative error code on failure.
634  */
635 static int pipapo_realloc_mt(struct nft_pipapo_field *f,
636                              unsigned int old_rules, unsigned int rules)
637 {
638         union nft_pipapo_map_bucket *new_mt = NULL, *old_mt = f->mt;
639         const unsigned int extra = PAGE_SIZE / sizeof(*new_mt);
640         unsigned int rules_alloc = rules;
641 
642         might_sleep();
643 
644         if (unlikely(rules == 0))
645                 goto out_free;
646 
647         /* growing and enough space left, no action needed */
648         if (rules > old_rules && f->rules_alloc > rules)
649                 return 0;
650 
651         /* downsize and extra slack has not grown too large */
652         if (rules < old_rules) {
653                 unsigned int remove = f->rules_alloc - rules;
654 
655                 if (remove < (2u * extra))
656                         return 0;
657         }
658 
659         /* If set needs more than one page of memory for rules then
660          * allocate another extra page to avoid frequent reallocation.
661          */
662         if (rules > extra &&
663             check_add_overflow(rules, extra, &rules_alloc))
664                 return -EOVERFLOW;
665 
666         new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT);
667         if (!new_mt)
668                 return -ENOMEM;
669 
670         if (old_mt)
671                 memcpy(new_mt, old_mt, min(old_rules, rules) * sizeof(*new_mt));
672 
673         if (rules > old_rules) {
674                 memset(new_mt + old_rules, 0,
675                        (rules - old_rules) * sizeof(*new_mt));
676         }
677 out_free:
678         f->rules_alloc = rules_alloc;
679         f->mt = new_mt;
680 
681         kvfree(old_mt);
682 
683         return 0;
684 }
685 
686 /**
687  * pipapo_resize() - Resize lookup or mapping table, or both
688  * @f:          Field containing lookup and mapping tables
689  * @old_rules:  Previous amount of rules in field
690  * @rules:      New amount of rules
691  *
692  * Increase, decrease or maintain tables size depending on new amount of rules,
693  * and copy data over. In case the new size is smaller, throw away data for
694  * highest-numbered rules.
695  *
696  * Return: 0 on success, -ENOMEM on allocation failure.
697  */
698 static int pipapo_resize(struct nft_pipapo_field *f,
699                          unsigned int old_rules, unsigned int rules)
700 {
701         long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
702         unsigned int new_bucket_size, copy;
703         int group, bucket, err;
704 
705         if (rules >= NFT_PIPAPO_RULE0_MAX)
706                 return -ENOSPC;
707 
708         new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
709 #ifdef NFT_PIPAPO_ALIGN
710         new_bucket_size = roundup(new_bucket_size,
711                                   NFT_PIPAPO_ALIGN / sizeof(*new_lt));
712 #endif
713 
714         if (new_bucket_size == f->bsize)
715                 goto mt;
716 
717         if (new_bucket_size > f->bsize)
718                 copy = f->bsize;
719         else
720                 copy = new_bucket_size;
721 
722         new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
723                           new_bucket_size * sizeof(*new_lt) +
724                           NFT_PIPAPO_ALIGN_HEADROOM,
725                           GFP_KERNEL);
726         if (!new_lt)
727                 return -ENOMEM;
728 
729         new_p = NFT_PIPAPO_LT_ALIGN(new_lt);
730         old_p = NFT_PIPAPO_LT_ALIGN(old_lt);
731 
732         for (group = 0; group < f->groups; group++) {
733                 for (bucket = 0; bucket < NFT_PIPAPO_BUCKETS(f->bb); bucket++) {
734                         memcpy(new_p, old_p, copy * sizeof(*new_p));
735                         new_p += copy;
736                         old_p += copy;
737 
738                         if (new_bucket_size > f->bsize)
739                                 new_p += new_bucket_size - f->bsize;
740                         else
741                                 old_p += f->bsize - new_bucket_size;
742                 }
743         }
744 
745 mt:
746         err = pipapo_realloc_mt(f, old_rules, rules);
747         if (err) {
748                 kvfree(new_lt);
749                 return err;
750         }
751 
752         if (new_lt) {
753                 f->bsize = new_bucket_size;
754                 f->lt = new_lt;
755                 kvfree(old_lt);
756         }
757 
758         return 0;
759 }
760 
761 /**
762  * pipapo_bucket_set() - Set rule bit in bucket given group and group value
763  * @f:          Field containing lookup table
764  * @rule:       Rule index
765  * @group:      Group index
766  * @v:          Value of bit group
767  */
768 static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group,
769                               int v)
770 {
771         unsigned long *pos;
772 
773         pos = NFT_PIPAPO_LT_ALIGN(f->lt);
774         pos += f->bsize * NFT_PIPAPO_BUCKETS(f->bb) * group;
775         pos += f->bsize * v;
776 
777         __set_bit(rule, pos);
778 }
779 
780 /**
781  * pipapo_lt_4b_to_8b() - Switch lookup table group width from 4 bits to 8 bits
782  * @old_groups: Number of current groups
783  * @bsize:      Size of one bucket, in longs
784  * @old_lt:     Pointer to the current lookup table
785  * @new_lt:     Pointer to the new, pre-allocated lookup table
786  *
787  * Each bucket with index b in the new lookup table, belonging to group g, is
788  * filled with the bit intersection between:
789  * - bucket with index given by the upper 4 bits of b, from group g, and
790  * - bucket with index given by the lower 4 bits of b, from group g + 1
791  *
792  * That is, given buckets from the new lookup table N(x, y) and the old lookup
793  * table O(x, y), with x bucket index, and y group index:
794  *
795  *      N(b, g) := O(b / 16, g) & O(b % 16, g + 1)
796  *
797  * This ensures equivalence of the matching results on lookup. Two examples in
798  * pictures:
799  *
800  *              bucket
801  *  group  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 ... 254 255
802  *    0                ^
803  *    1                |                                                 ^
804  *   ...             ( & )                                               |
805  *                  /     \                                              |
806  *                 /       \                                         .-( & )-.
807  *                /  bucket \                                        |       |
808  *      group  0 / 1   2   3 \ 4   5   6   7   8   9  10  11  12  13 |14  15 |
809  *        0     /             \                                      |       |
810  *        1                    \                                     |       |
811  *        2                                                          |     --'
812  *        3                                                          '-
813  *       ...
814  */
815 static void pipapo_lt_4b_to_8b(int old_groups, int bsize,
816                                unsigned long *old_lt, unsigned long *new_lt)
817 {
818         int g, b, i;
819 
820         for (g = 0; g < old_groups / 2; g++) {
821                 int src_g0 = g * 2, src_g1 = g * 2 + 1;
822 
823                 for (b = 0; b < NFT_PIPAPO_BUCKETS(8); b++) {
824                         int src_b0 = b / NFT_PIPAPO_BUCKETS(4);
825                         int src_b1 = b % NFT_PIPAPO_BUCKETS(4);
826                         int src_i0 = src_g0 * NFT_PIPAPO_BUCKETS(4) + src_b0;
827                         int src_i1 = src_g1 * NFT_PIPAPO_BUCKETS(4) + src_b1;
828 
829                         for (i = 0; i < bsize; i++) {
830                                 *new_lt = old_lt[src_i0 * bsize + i] &
831                                           old_lt[src_i1 * bsize + i];
832                                 new_lt++;
833                         }
834                 }
835         }
836 }
837 
838 /**
839  * pipapo_lt_8b_to_4b() - Switch lookup table group width from 8 bits to 4 bits
840  * @old_groups: Number of current groups
841  * @bsize:      Size of one bucket, in longs
842  * @old_lt:     Pointer to the current lookup table
843  * @new_lt:     Pointer to the new, pre-allocated lookup table
844  *
845  * Each bucket with index b in the new lookup table, belonging to group g, is
846  * filled with the bit union of:
847  * - all the buckets with index such that the upper four bits of the lower byte
848  *   equal b, from group g, with g odd
849  * - all the buckets with index such that the lower four bits equal b, from
850  *   group g, with g even
851  *
852  * That is, given buckets from the new lookup table N(x, y) and the old lookup
853  * table O(x, y), with x bucket index, and y group index:
854  *
855  *      - with g odd:  N(b, g) := U(O(x, g) for each x : x = (b & 0xf0) >> 4)
856  *      - with g even: N(b, g) := U(O(x, g) for each x : x = b & 0x0f)
857  *
858  * where U() denotes the arbitrary union operation (binary OR of n terms). This
859  * ensures equivalence of the matching results on lookup.
860  */
861 static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
862                                unsigned long *old_lt, unsigned long *new_lt)
863 {
864         int g, b, bsrc, i;
865 
866         memset(new_lt, 0, old_groups * 2 * NFT_PIPAPO_BUCKETS(4) * bsize *
867                           sizeof(unsigned long));
868 
869         for (g = 0; g < old_groups * 2; g += 2) {
870                 int src_g = g / 2;
871 
872                 for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
873                         for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
874                              bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
875                              bsrc++) {
876                                 if (((bsrc & 0xf0) >> 4) != b)
877                                         continue;
878 
879                                 for (i = 0; i < bsize; i++)
880                                         new_lt[i] |= old_lt[bsrc * bsize + i];
881                         }
882 
883                         new_lt += bsize;
884                 }
885 
886                 for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
887                         for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
888                              bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
889                              bsrc++) {
890                                 if ((bsrc & 0x0f) != b)
891                                         continue;
892 
893                                 for (i = 0; i < bsize; i++)
894                                         new_lt[i] |= old_lt[bsrc * bsize + i];
895                         }
896 
897                         new_lt += bsize;
898                 }
899         }
900 }
901 
902 /**
903  * pipapo_lt_bits_adjust() - Adjust group size for lookup table if needed
904  * @f:          Field containing lookup table
905  */
906 static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
907 {
908         unsigned int groups, bb;
909         unsigned long *new_lt;
910         size_t lt_size;
911 
912         lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
913                   sizeof(*f->lt);
914 
915         if (f->bb == NFT_PIPAPO_GROUP_BITS_SMALL_SET &&
916             lt_size > NFT_PIPAPO_LT_SIZE_HIGH) {
917                 groups = f->groups * 2;
918                 bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
919 
920                 lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
921                           sizeof(*f->lt);
922         } else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
923                    lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
924                 groups = f->groups / 2;
925                 bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
926 
927                 lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
928                           sizeof(*f->lt);
929 
930                 /* Don't increase group width if the resulting lookup table size
931                  * would exceed the upper size threshold for a "small" set.
932                  */
933                 if (lt_size > NFT_PIPAPO_LT_SIZE_HIGH)
934                         return;
935         } else {
936                 return;
937         }
938 
939         new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
940         if (!new_lt)
941                 return;
942 
943         NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
944         if (f->bb == 4 && bb == 8) {
945                 pipapo_lt_4b_to_8b(f->groups, f->bsize,
946                                    NFT_PIPAPO_LT_ALIGN(f->lt),
947                                    NFT_PIPAPO_LT_ALIGN(new_lt));
948         } else if (f->bb == 8 && bb == 4) {
949                 pipapo_lt_8b_to_4b(f->groups, f->bsize,
950                                    NFT_PIPAPO_LT_ALIGN(f->lt),
951                                    NFT_PIPAPO_LT_ALIGN(new_lt));
952         } else {
953                 BUG();
954         }
955 
956         f->groups = groups;
957         f->bb = bb;
958         kvfree(f->lt);
959         f->lt = new_lt;
960 }
961 
962 /**
963  * pipapo_insert() - Insert new rule in field given input key and mask length
964  * @f:          Field containing lookup table
965  * @k:          Input key for classification, without nftables padding
966  * @mask_bits:  Length of mask; matches field length for non-ranged entry
967  *
968  * Insert a new rule reference in lookup buckets corresponding to k and
969  * mask_bits.
970  *
971  * Return: 1 on success (one rule inserted), negative error code on failure.
972  */
973 static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
974                          int mask_bits)
975 {
976         unsigned int rule = f->rules, group, ret, bit_offset = 0;
977 
978         ret = pipapo_resize(f, f->rules, f->rules + 1);
979         if (ret)
980                 return ret;
981 
982         f->rules++;
983 
984         for (group = 0; group < f->groups; group++) {
985                 int i, v;
986                 u8 mask;
987 
988                 v = k[group / (BITS_PER_BYTE / f->bb)];
989                 v &= GENMASK(BITS_PER_BYTE - bit_offset - 1, 0);
990                 v >>= (BITS_PER_BYTE - bit_offset) - f->bb;
991 
992                 bit_offset += f->bb;
993                 bit_offset %= BITS_PER_BYTE;
994 
995                 if (mask_bits >= (group + 1) * f->bb) {
996                         /* Not masked */
997                         pipapo_bucket_set(f, rule, group, v);
998                 } else if (mask_bits <= group * f->bb) {
999                         /* Completely masked */
1000                         for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++)
1001                                 pipapo_bucket_set(f, rule, group, i);
1002                 } else {
1003                         /* The mask limit falls on this group */
1004                         mask = GENMASK(f->bb - 1, 0);
1005                         mask >>= mask_bits - group * f->bb;
1006                         for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++) {
1007                                 if ((i & ~mask) == (v & ~mask))
1008                                         pipapo_bucket_set(f, rule, group, i);
1009                         }
1010                 }
1011         }
1012 
1013         pipapo_lt_bits_adjust(f);
1014 
1015         return 1;
1016 }
1017 
1018 /**
1019  * pipapo_step_diff() - Check if setting @step bit in netmask would change it
1020  * @base:       Mask we are expanding
1021  * @step:       Step bit for given expansion step
1022  * @len:        Total length of mask space (set and unset bits), bytes
1023  *
1024  * Convenience function for mask expansion.
1025  *
1026  * Return: true if step bit changes mask (i.e. isn't set), false otherwise.
1027  */
1028 static bool pipapo_step_diff(u8 *base, int step, int len)
1029 {
1030         /* Network order, byte-addressed */
1031 #ifdef __BIG_ENDIAN__
1032         return !(BIT(step % BITS_PER_BYTE) & base[step / BITS_PER_BYTE]);
1033 #else
1034         return !(BIT(step % BITS_PER_BYTE) &
1035                  base[len - 1 - step / BITS_PER_BYTE]);
1036 #endif
1037 }
1038 
1039 /**
1040  * pipapo_step_after_end() - Check if mask exceeds range end with given step
1041  * @base:       Mask we are expanding
1042  * @end:        End of range
1043  * @step:       Step bit for given expansion step, highest bit to be set
1044  * @len:        Total length of mask space (set and unset bits), bytes
1045  *
1046  * Convenience function for mask expansion.
1047  *
1048  * Return: true if mask exceeds range setting step bits, false otherwise.
1049  */
1050 static bool pipapo_step_after_end(const u8 *base, const u8 *end, int step,
1051                                   int len)
1052 {
1053         u8 tmp[NFT_PIPAPO_MAX_BYTES];
1054         int i;
1055 
1056         memcpy(tmp, base, len);
1057 
1058         /* Network order, byte-addressed */
1059         for (i = 0; i <= step; i++)
1060 #ifdef __BIG_ENDIAN__
1061                 tmp[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
1062 #else
1063                 tmp[len - 1 - i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
1064 #endif
1065 
1066         return memcmp(tmp, end, len) > 0;
1067 }
1068 
1069 /**
1070  * pipapo_base_sum() - Sum step bit to given len-sized netmask base with carry
1071  * @base:       Netmask base
1072  * @step:       Step bit to sum
1073  * @len:        Netmask length, bytes
1074  */
1075 static void pipapo_base_sum(u8 *base, int step, int len)
1076 {
1077         bool carry = false;
1078         int i;
1079 
1080         /* Network order, byte-addressed */
1081 #ifdef __BIG_ENDIAN__
1082         for (i = step / BITS_PER_BYTE; i < len; i++) {
1083 #else
1084         for (i = len - 1 - step / BITS_PER_BYTE; i >= 0; i--) {
1085 #endif
1086                 if (carry)
1087                         base[i]++;
1088                 else
1089                         base[i] += 1 << (step % BITS_PER_BYTE);
1090 
1091                 if (base[i])
1092                         break;
1093 
1094                 carry = true;
1095         }
1096 }
1097 
1098 /**
1099  * pipapo_expand() - Expand to composing netmasks, insert into lookup table
1100  * @f:          Field containing lookup table
1101  * @start:      Start of range
1102  * @end:        End of range
1103  * @len:        Length of value in bits
1104  *
1105  * Expand range to composing netmasks and insert corresponding rule references
1106  * in lookup buckets.
1107  *
1108  * Return: number of inserted rules on success, negative error code on failure.
1109  */
1110 static int pipapo_expand(struct nft_pipapo_field *f,
1111                          const u8 *start, const u8 *end, int len)
1112 {
1113         int step, masks = 0, bytes = DIV_ROUND_UP(len, BITS_PER_BYTE);
1114         u8 base[NFT_PIPAPO_MAX_BYTES];
1115 
1116         memcpy(base, start, bytes);
1117         while (memcmp(base, end, bytes) <= 0) {
1118                 int err;
1119 
1120                 step = 0;
1121                 while (pipapo_step_diff(base, step, bytes)) {
1122                         if (pipapo_step_after_end(base, end, step, bytes))
1123                                 break;
1124 
1125                         step++;
1126                         if (step >= len) {
1127                                 if (!masks) {
1128                                         err = pipapo_insert(f, base, 0);
1129                                         if (err < 0)
1130                                                 return err;
1131                                         masks = 1;
1132                                 }
1133                                 goto out;
1134                         }
1135                 }
1136 
1137                 err = pipapo_insert(f, base, len - step);
1138 
1139                 if (err < 0)
1140                         return err;
1141 
1142                 masks++;
1143                 pipapo_base_sum(base, step, bytes);
1144         }
1145 out:
1146         return masks;
1147 }
1148 
1149 /**
1150  * pipapo_map() - Insert rules in mapping tables, mapping them between fields
1151  * @m:          Matching data, including mapping table
1152  * @map:        Table of rule maps: array of first rule and amount of rules
1153  *              in next field a given rule maps to, for each field
1154  * @e:          For last field, nft_set_ext pointer matching rules map to
1155  */
1156 static void pipapo_map(struct nft_pipapo_match *m,
1157                        union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS],
1158                        struct nft_pipapo_elem *e)
1159 {
1160         struct nft_pipapo_field *f;
1161         int i, j;
1162 
1163         for (i = 0, f = m->f; i < m->field_count - 1; i++, f++) {
1164                 for (j = 0; j < map[i].n; j++) {
1165                         f->mt[map[i].to + j].to = map[i + 1].to;
1166                         f->mt[map[i].to + j].n = map[i + 1].n;
1167                 }
1168         }
1169 
1170         /* Last field: map to ext instead of mapping to next field */
1171         for (j = 0; j < map[i].n; j++)
1172                 f->mt[map[i].to + j].e = e;
1173 }
1174 
1175 /**
1176  * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
1177  * @m:          Matching data
1178  * @cpu:        CPU number
1179  */
1180 static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
1181 {
1182         struct nft_pipapo_scratch *s;
1183         void *mem;
1184 
1185         s = *per_cpu_ptr(m->scratch, cpu);
1186         if (!s)
1187                 return;
1188 
1189         mem = s;
1190         mem -= s->align_off;
1191         kfree(mem);
1192 }
1193 
1194 /**
1195  * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
1196  * @clone:      Copy of matching data with pending insertions and deletions
1197  * @bsize_max:  Maximum bucket size, scratch maps cover two buckets
1198  *
1199  * Return: 0 on success, -ENOMEM on failure.
1200  */
1201 static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
1202                                   unsigned long bsize_max)
1203 {
1204         int i;
1205 
1206         for_each_possible_cpu(i) {
1207                 struct nft_pipapo_scratch *scratch;
1208 #ifdef NFT_PIPAPO_ALIGN
1209                 void *scratch_aligned;
1210                 u32 align_off;
1211 #endif
1212                 scratch = kzalloc_node(struct_size(scratch, map,
1213                                                    bsize_max * 2) +
1214                                        NFT_PIPAPO_ALIGN_HEADROOM,
1215                                        GFP_KERNEL_ACCOUNT, cpu_to_node(i));
1216                 if (!scratch) {
1217                         /* On failure, there's no need to undo previous
1218                          * allocations: this means that some scratch maps have
1219                          * a bigger allocated size now (this is only called on
1220                          * insertion), but the extra space won't be used by any
1221                          * CPU as new elements are not inserted and m->bsize_max
1222                          * is not updated.
1223                          */
1224                         return -ENOMEM;
1225                 }
1226 
1227                 pipapo_free_scratch(clone, i);
1228 
1229 #ifdef NFT_PIPAPO_ALIGN
1230                 /* Align &scratch->map (not the struct itself): the extra
1231                  * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
1232                  * above guarantee we can waste up to those bytes in order
1233                  * to align the map field regardless of its offset within
1234                  * the struct.
1235                  */
1236                 BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
1237 
1238                 scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
1239                 scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
1240                 align_off = scratch_aligned - (void *)scratch;
1241 
1242                 scratch = scratch_aligned;
1243                 scratch->align_off = align_off;
1244 #endif
1245                 *per_cpu_ptr(clone->scratch, i) = scratch;
1246         }
1247 
1248         return 0;
1249 }
1250 
1251 static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
1252 {
1253 #ifdef CONFIG_PROVE_LOCKING
1254         const struct net *net = read_pnet(&set->net);
1255 
1256         return lockdep_is_held(&nft_pernet(net)->commit_mutex);
1257 #else
1258         return true;
1259 #endif
1260 }
1261 
1262 static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old);
1263 
1264 /**
1265  * pipapo_maybe_clone() - Build clone for pending data changes, if not existing
1266  * @set:        nftables API set representation
1267  *
1268  * Return: newly created or existing clone, if any. NULL on allocation failure
1269  */
1270 static struct nft_pipapo_match *pipapo_maybe_clone(const struct nft_set *set)
1271 {
1272         struct nft_pipapo *priv = nft_set_priv(set);
1273         struct nft_pipapo_match *m;
1274 
1275         if (priv->clone)
1276                 return priv->clone;
1277 
1278         m = rcu_dereference_protected(priv->match,
1279                                       nft_pipapo_transaction_mutex_held(set));
1280         priv->clone = pipapo_clone(m);
1281 
1282         return priv->clone;
1283 }
1284 
1285 /**
1286  * nft_pipapo_insert() - Validate and insert ranged elements
1287  * @net:        Network namespace
1288  * @set:        nftables API set representation
1289  * @elem:       nftables API element representation containing key data
1290  * @elem_priv:  Filled with pointer to &struct nft_set_ext in inserted element
1291  *
1292  * Return: 0 on success, error pointer on failure.
1293  */
1294 static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
1295                              const struct nft_set_elem *elem,
1296                              struct nft_elem_priv **elem_priv)
1297 {
1298         const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
1299         union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
1300         const u8 *start = (const u8 *)elem->key.val.data, *end;
1301         struct nft_pipapo_match *m = pipapo_maybe_clone(set);
1302         u8 genmask = nft_genmask_next(net);
1303         struct nft_pipapo_elem *e, *dup;
1304         u64 tstamp = nft_net_tstamp(net);
1305         struct nft_pipapo_field *f;
1306         const u8 *start_p, *end_p;
1307         int i, bsize_max, err = 0;
1308 
1309         if (!m)
1310                 return -ENOMEM;
1311 
1312         if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
1313                 end = (const u8 *)nft_set_ext_key_end(ext)->data;
1314         else
1315                 end = start;
1316 
1317         dup = pipapo_get(net, set, m, start, genmask, tstamp, GFP_KERNEL);
1318         if (!IS_ERR(dup)) {
1319                 /* Check if we already have the same exact entry */
1320                 const struct nft_data *dup_key, *dup_end;
1321 
1322                 dup_key = nft_set_ext_key(&dup->ext);
1323                 if (nft_set_ext_exists(&dup->ext, NFT_SET_EXT_KEY_END))
1324                         dup_end = nft_set_ext_key_end(&dup->ext);
1325                 else
1326                         dup_end = dup_key;
1327 
1328                 if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) &&
1329                     !memcmp(end, dup_end->data, sizeof(*dup_end->data))) {
1330                         *elem_priv = &dup->priv;
1331                         return -EEXIST;
1332                 }
1333 
1334                 return -ENOTEMPTY;
1335         }
1336 
1337         if (PTR_ERR(dup) == -ENOENT) {
1338                 /* Look for partially overlapping entries */
1339                 dup = pipapo_get(net, set, m, end, nft_genmask_next(net), tstamp,
1340                                  GFP_KERNEL);
1341         }
1342 
1343         if (PTR_ERR(dup) != -ENOENT) {
1344                 if (IS_ERR(dup))
1345                         return PTR_ERR(dup);
1346                 *elem_priv = &dup->priv;
1347                 return -ENOTEMPTY;
1348         }
1349 
1350         /* Validate */
1351         start_p = start;
1352         end_p = end;
1353 
1354         /* some helpers return -1, or 0 >= for valid rule pos,
1355          * so we cannot support more than INT_MAX rules at this time.
1356          */
1357         BUILD_BUG_ON(NFT_PIPAPO_RULE0_MAX > INT_MAX);
1358 
1359         nft_pipapo_for_each_field(f, i, m) {
1360                 if (f->rules >= NFT_PIPAPO_RULE0_MAX)
1361                         return -ENOSPC;
1362 
1363                 if (memcmp(start_p, end_p,
1364                            f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) > 0)
1365                         return -EINVAL;
1366 
1367                 start_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1368                 end_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1369         }
1370 
1371         /* Insert */
1372         bsize_max = m->bsize_max;
1373 
1374         nft_pipapo_for_each_field(f, i, m) {
1375                 int ret;
1376 
1377                 rulemap[i].to = f->rules;
1378 
1379                 ret = memcmp(start, end,
1380                              f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
1381                 if (!ret)
1382                         ret = pipapo_insert(f, start, f->groups * f->bb);
1383                 else
1384                         ret = pipapo_expand(f, start, end, f->groups * f->bb);
1385 
1386                 if (ret < 0)
1387                         return ret;
1388 
1389                 if (f->bsize > bsize_max)
1390                         bsize_max = f->bsize;
1391 
1392                 rulemap[i].n = ret;
1393 
1394                 start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1395                 end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
1396         }
1397 
1398         if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
1399                 put_cpu_ptr(m->scratch);
1400 
1401                 err = pipapo_realloc_scratch(m, bsize_max);
1402                 if (err)
1403                         return err;
1404 
1405                 m->bsize_max = bsize_max;
1406         } else {
1407                 put_cpu_ptr(m->scratch);
1408         }
1409 
1410         e = nft_elem_priv_cast(elem->priv);
1411         *elem_priv = &e->priv;
1412 
1413         pipapo_map(m, rulemap, e);
1414 
1415         return 0;
1416 }
1417 
1418 /**
1419  * pipapo_clone() - Clone matching data to create new working copy
1420  * @old:        Existing matching data
1421  *
1422  * Return: copy of matching data passed as 'old' or NULL.
1423  */
1424 static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
1425 {
1426         struct nft_pipapo_field *dst, *src;
1427         struct nft_pipapo_match *new;
1428         int i;
1429 
1430         new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL_ACCOUNT);
1431         if (!new)
1432                 return NULL;
1433 
1434         new->field_count = old->field_count;
1435         new->bsize_max = old->bsize_max;
1436 
1437         new->scratch = alloc_percpu(*new->scratch);
1438         if (!new->scratch)
1439                 goto out_scratch;
1440 
1441         for_each_possible_cpu(i)
1442                 *per_cpu_ptr(new->scratch, i) = NULL;
1443 
1444         if (pipapo_realloc_scratch(new, old->bsize_max))
1445                 goto out_scratch_realloc;
1446 
1447         rcu_head_init(&new->rcu);
1448 
1449         src = old->f;
1450         dst = new->f;
1451 
1452         for (i = 0; i < old->field_count; i++) {
1453                 unsigned long *new_lt;
1454 
1455                 memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
1456 
1457                 new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
1458                                   src->bsize * sizeof(*dst->lt) +
1459                                   NFT_PIPAPO_ALIGN_HEADROOM,
1460                                   GFP_KERNEL_ACCOUNT);
1461                 if (!new_lt)
1462                         goto out_lt;
1463 
1464                 dst->lt = new_lt;
1465 
1466                 memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
1467                        NFT_PIPAPO_LT_ALIGN(src->lt),
1468                        src->bsize * sizeof(*dst->lt) *
1469                        src->groups * NFT_PIPAPO_BUCKETS(src->bb));
1470 
1471                 if (src->rules > 0) {
1472                         dst->mt = kvmalloc_array(src->rules_alloc,
1473                                                  sizeof(*src->mt),
1474                                                  GFP_KERNEL_ACCOUNT);
1475                         if (!dst->mt)
1476                                 goto out_mt;
1477 
1478                         memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
1479                 } else {
1480                         dst->mt = NULL;
1481                         dst->rules_alloc = 0;
1482                 }
1483 
1484                 src++;
1485                 dst++;
1486         }
1487 
1488         return new;
1489 
1490 out_mt:
1491         kvfree(dst->lt);
1492 out_lt:
1493         for (dst--; i > 0; i--) {
1494                 kvfree(dst->mt);
1495                 kvfree(dst->lt);
1496                 dst--;
1497         }
1498 out_scratch_realloc:
1499         for_each_possible_cpu(i)
1500                 pipapo_free_scratch(new, i);
1501 out_scratch:
1502         free_percpu(new->scratch);
1503         kfree(new);
1504 
1505         return NULL;
1506 }
1507 
1508 /**
1509  * pipapo_rules_same_key() - Get number of rules originated from the same entry
1510  * @f:          Field containing mapping table
1511  * @first:      Index of first rule in set of rules mapping to same entry
1512  *
1513  * Using the fact that all rules in a field that originated from the same entry
1514  * will map to the same set of rules in the next field, or to the same element
1515  * reference, return the cardinality of the set of rules that originated from
1516  * the same entry as the rule with index @first, @first rule included.
1517  *
1518  * In pictures:
1519  *                              rules
1520  *      field #0                0    1    2    3    4
1521  *              map to:         0    1   2-4  2-4  5-9
1522  *                              .    .    .......   . ...
1523  *                              |    |    |    | \   \
1524  *                              |    |    |    |  \   \
1525  *                              |    |    |    |   \   \
1526  *                              '    '    '    '    '   \
1527  *      in field #1             0    1    2    3    4    5 ...
1528  *
1529  * if this is called for rule 2 on field #0, it will return 3, as also rules 2
1530  * and 3 in field 0 map to the same set of rules (2, 3, 4) in the next field.
1531  *
1532  * For the last field in a set, we can rely on associated entries to map to the
1533  * same element references.
1534  *
1535  * Return: Number of rules that originated from the same entry as @first.
1536  */
1537 static unsigned int pipapo_rules_same_key(struct nft_pipapo_field *f, unsigned int first)
1538 {
1539         struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
1540         unsigned int r;
1541 
1542         for (r = first; r < f->rules; r++) {
1543                 if (r != first && e != f->mt[r].e)
1544                         return r - first;
1545 
1546                 e = f->mt[r].e;
1547         }
1548 
1549         if (r != first)
1550                 return r - first;
1551 
1552         return 0;
1553 }
1554 
1555 /**
1556  * pipapo_unmap() - Remove rules from mapping tables, renumber remaining ones
1557  * @mt:         Mapping array
1558  * @rules:      Original amount of rules in mapping table
1559  * @start:      First rule index to be removed
1560  * @n:          Amount of rules to be removed
1561  * @to_offset:  First rule index, in next field, this group of rules maps to
1562  * @is_last:    If this is the last field, delete reference from mapping array
1563  *
1564  * This is used to unmap rules from the mapping table for a single field,
1565  * maintaining consistency and compactness for the existing ones.
1566  *
1567  * In pictures: let's assume that we want to delete rules 2 and 3 from the
1568  * following mapping array:
1569  *
1570  *                 rules
1571  *               0      1      2      3      4
1572  *      map to:  4-10   4-10   11-15  11-15  16-18
1573  *
1574  * the result will be:
1575  *
1576  *                 rules
1577  *               0      1      2
1578  *      map to:  4-10   4-10   11-13
1579  *
1580  * for fields before the last one. In case this is the mapping table for the
1581  * last field in a set, and rules map to pointers to &struct nft_pipapo_elem:
1582  *
1583  *                      rules
1584  *                        0      1      2      3      4
1585  *  element pointers:  0x42   0x42   0x33   0x33   0x44
1586  *
1587  * the result will be:
1588  *
1589  *                      rules
1590  *                        0      1      2
1591  *  element pointers:  0x42   0x42   0x44
1592  */
1593 static void pipapo_unmap(union nft_pipapo_map_bucket *mt, unsigned int rules,
1594                          unsigned int start, unsigned int n,
1595                          unsigned int to_offset, bool is_last)
1596 {
1597         int i;
1598 
1599         memmove(mt + start, mt + start + n, (rules - start - n) * sizeof(*mt));
1600         memset(mt + rules - n, 0, n * sizeof(*mt));
1601 
1602         if (is_last)
1603                 return;
1604 
1605         for (i = start; i < rules - n; i++)
1606                 mt[i].to -= to_offset;
1607 }
1608 
1609 /**
1610  * pipapo_drop() - Delete entry from lookup and mapping tables, given rule map
1611  * @m:          Matching data
1612  * @rulemap:    Table of rule maps, arrays of first rule and amount of rules
1613  *              in next field a given entry maps to, for each field
1614  *
1615  * For each rule in lookup table buckets mapping to this set of rules, drop
1616  * all bits set in lookup table mapping. In pictures, assuming we want to drop
1617  * rules 0 and 1 from this lookup table:
1618  *
1619  *                     bucket
1620  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
1621  *        0    0                                              1,2
1622  *        1   1,2                                      0
1623  *        2    0                                      1,2
1624  *        3    0                              1,2
1625  *        4  0,1,2
1626  *        5    0   1   2
1627  *        6  0,1,2 1   1   1   1   1   1   1   1   1   1   1   1   1   1   1
1628  *        7   1,2 1,2  1   1   1  0,1  1   1   1   1   1   1   1   1   1   1
1629  *
1630  * rule 2 becomes rule 0, and the result will be:
1631  *
1632  *                     bucket
1633  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
1634  *        0                                                    0
1635  *        1    0
1636  *        2                                            0
1637  *        3                                    0
1638  *        4    0
1639  *        5            0
1640  *        6    0
1641  *        7    0   0
1642  *
1643  * once this is done, call unmap() to drop all the corresponding rule references
1644  * from mapping tables.
1645  */
1646 static void pipapo_drop(struct nft_pipapo_match *m,
1647                         union nft_pipapo_map_bucket rulemap[])
1648 {
1649         struct nft_pipapo_field *f;
1650         int i;
1651 
1652         nft_pipapo_for_each_field(f, i, m) {
1653                 int g;
1654 
1655                 for (g = 0; g < f->groups; g++) {
1656                         unsigned long *pos;
1657                         int b;
1658 
1659                         pos = NFT_PIPAPO_LT_ALIGN(f->lt) + g *
1660                               NFT_PIPAPO_BUCKETS(f->bb) * f->bsize;
1661 
1662                         for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
1663                                 bitmap_cut(pos, pos, rulemap[i].to,
1664                                            rulemap[i].n,
1665                                            f->bsize * BITS_PER_LONG);
1666 
1667                                 pos += f->bsize;
1668                         }
1669                 }
1670 
1671                 pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n,
1672                              rulemap[i + 1].n, i == m->field_count - 1);
1673                 if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) {
1674                         /* We can ignore this, a failure to shrink tables down
1675                          * doesn't make tables invalid.
1676                          */
1677                         ;
1678                 }
1679                 f->rules -= rulemap[i].n;
1680 
1681                 pipapo_lt_bits_adjust(f);
1682         }
1683 }
1684 
1685 static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
1686                                      struct nft_pipapo_elem *e)
1687 
1688 {
1689         nft_setelem_data_deactivate(net, set, &e->priv);
1690 }
1691 
1692 /**
1693  * pipapo_gc() - Drop expired entries from set, destroy start and end elements
1694  * @set:        nftables API set representation
1695  * @m:          Matching data
1696  */
1697 static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
1698 {
1699         struct nft_pipapo *priv = nft_set_priv(set);
1700         struct net *net = read_pnet(&set->net);
1701         unsigned int rules_f0, first_rule = 0;
1702         u64 tstamp = nft_net_tstamp(net);
1703         struct nft_pipapo_elem *e;
1704         struct nft_trans_gc *gc;
1705 
1706         gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
1707         if (!gc)
1708                 return;
1709 
1710         while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
1711                 union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
1712                 const struct nft_pipapo_field *f;
1713                 unsigned int i, start, rules_fx;
1714 
1715                 start = first_rule;
1716                 rules_fx = rules_f0;
1717 
1718                 nft_pipapo_for_each_field(f, i, m) {
1719                         rulemap[i].to = start;
1720                         rulemap[i].n = rules_fx;
1721 
1722                         if (i < m->field_count - 1) {
1723                                 rules_fx = f->mt[start].n;
1724                                 start = f->mt[start].to;
1725                         }
1726                 }
1727 
1728                 /* Pick the last field, and its last index */
1729                 f--;
1730                 i--;
1731                 e = f->mt[rulemap[i].to].e;
1732 
1733                 /* synchronous gc never fails, there is no need to set on
1734                  * NFT_SET_ELEM_DEAD_BIT.
1735                  */
1736                 if (__nft_set_elem_expired(&e->ext, tstamp)) {
1737                         gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
1738                         if (!gc)
1739                                 return;
1740 
1741                         nft_pipapo_gc_deactivate(net, set, e);
1742                         pipapo_drop(m, rulemap);
1743                         nft_trans_gc_elem_add(gc, e);
1744 
1745                         /* And check again current first rule, which is now the
1746                          * first we haven't checked.
1747                          */
1748                 } else {
1749                         first_rule += rules_f0;
1750                 }
1751         }
1752 
1753         gc = nft_trans_gc_catchall_sync(gc);
1754         if (gc) {
1755                 nft_trans_gc_queue_sync_done(gc);
1756                 priv->last_gc = jiffies;
1757         }
1758 }
1759 
1760 /**
1761  * pipapo_free_fields() - Free per-field tables contained in matching data
1762  * @m:          Matching data
1763  */
1764 static void pipapo_free_fields(struct nft_pipapo_match *m)
1765 {
1766         struct nft_pipapo_field *f;
1767         int i;
1768 
1769         nft_pipapo_for_each_field(f, i, m) {
1770                 kvfree(f->lt);
1771                 kvfree(f->mt);
1772         }
1773 }
1774 
1775 static void pipapo_free_match(struct nft_pipapo_match *m)
1776 {
1777         int i;
1778 
1779         for_each_possible_cpu(i)
1780                 pipapo_free_scratch(m, i);
1781 
1782         free_percpu(m->scratch);
1783         pipapo_free_fields(m);
1784 
1785         kfree(m);
1786 }
1787 
1788 /**
1789  * pipapo_reclaim_match - RCU callback to free fields from old matching data
1790  * @rcu:        RCU head
1791  */
1792 static void pipapo_reclaim_match(struct rcu_head *rcu)
1793 {
1794         struct nft_pipapo_match *m;
1795 
1796         m = container_of(rcu, struct nft_pipapo_match, rcu);
1797         pipapo_free_match(m);
1798 }
1799 
1800 /**
1801  * nft_pipapo_commit() - Replace lookup data with current working copy
1802  * @set:        nftables API set representation
1803  *
1804  * While at it, check if we should perform garbage collection on the working
1805  * copy before committing it for lookup, and don't replace the table if the
1806  * working copy doesn't have pending changes.
1807  *
1808  * We also need to create a new working copy for subsequent insertions and
1809  * deletions.
1810  */
1811 static void nft_pipapo_commit(struct nft_set *set)
1812 {
1813         struct nft_pipapo *priv = nft_set_priv(set);
1814         struct nft_pipapo_match *old;
1815 
1816         if (!priv->clone)
1817                 return;
1818 
1819         if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
1820                 pipapo_gc(set, priv->clone);
1821 
1822         old = rcu_replace_pointer(priv->match, priv->clone,
1823                                   nft_pipapo_transaction_mutex_held(set));
1824         priv->clone = NULL;
1825 
1826         if (old)
1827                 call_rcu(&old->rcu, pipapo_reclaim_match);
1828 }
1829 
1830 static void nft_pipapo_abort(const struct nft_set *set)
1831 {
1832         struct nft_pipapo *priv = nft_set_priv(set);
1833 
1834         if (!priv->clone)
1835                 return;
1836         pipapo_free_match(priv->clone);
1837         priv->clone = NULL;
1838 }
1839 
1840 /**
1841  * nft_pipapo_activate() - Mark element reference as active given key, commit
1842  * @net:        Network namespace
1843  * @set:        nftables API set representation
1844  * @elem_priv:  nftables API element representation containing key data
1845  *
1846  * On insertion, elements are added to a copy of the matching data currently
1847  * in use for lookups, and not directly inserted into current lookup data. Both
1848  * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
1849  * element, hence we can't purpose either one as a real commit operation.
1850  */
1851 static void nft_pipapo_activate(const struct net *net,
1852                                 const struct nft_set *set,
1853                                 struct nft_elem_priv *elem_priv)
1854 {
1855         struct nft_pipapo_elem *e = nft_elem_priv_cast(elem_priv);
1856 
1857         nft_clear(net, &e->ext);
1858 }
1859 
1860 /**
1861  * nft_pipapo_deactivate() - Search for element and make it inactive
1862  * @net:        Network namespace
1863  * @set:        nftables API set representation
1864  * @elem:       nftables API element representation containing key data
1865  *
1866  * Return: deactivated element if found, NULL otherwise.
1867  */
1868 static struct nft_elem_priv *
1869 nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
1870                       const struct nft_set_elem *elem)
1871 {
1872         struct nft_pipapo_match *m = pipapo_maybe_clone(set);
1873         struct nft_pipapo_elem *e;
1874 
1875         /* removal must occur on priv->clone, if we are low on memory
1876          * we have no choice and must fail the removal request.
1877          */
1878         if (!m)
1879                 return NULL;
1880 
1881         e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
1882                        nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
1883         if (IS_ERR(e))
1884                 return NULL;
1885 
1886         nft_set_elem_change_active(net, set, &e->ext);
1887 
1888         return &e->priv;
1889 }
1890 
1891 /**
1892  * nft_pipapo_flush() - make element inactive
1893  * @net:        Network namespace
1894  * @set:        nftables API set representation
1895  * @elem_priv:  nftables API element representation containing key data
1896  *
1897  * This is functionally the same as nft_pipapo_deactivate(), with a slightly
1898  * different interface, and it's also called once for each element in a set
1899  * being flushed, so we can't implement, strictly speaking, a flush operation,
1900  * which would otherwise be as simple as allocating an empty copy of the
1901  * matching data.
1902  *
1903  * Note that we could in theory do that, mark the set as flushed, and ignore
1904  * subsequent calls, but we would leak all the elements after the first one,
1905  * because they wouldn't then be freed as result of API calls.
1906  *
1907  * Return: true if element was found and deactivated.
1908  */
1909 static void nft_pipapo_flush(const struct net *net, const struct nft_set *set,
1910                              struct nft_elem_priv *elem_priv)
1911 {
1912         struct nft_pipapo_elem *e = nft_elem_priv_cast(elem_priv);
1913 
1914         nft_set_elem_change_active(net, set, &e->ext);
1915 }
1916 
1917 /**
1918  * pipapo_get_boundaries() - Get byte interval for associated rules
1919  * @f:          Field including lookup table
1920  * @first_rule: First rule (lowest index)
1921  * @rule_count: Number of associated rules
1922  * @left:       Byte expression for left boundary (start of range)
1923  * @right:      Byte expression for right boundary (end of range)
1924  *
1925  * Given the first rule and amount of rules that originated from the same entry,
1926  * build the original range associated with the entry, and calculate the length
1927  * of the originating netmask.
1928  *
1929  * In pictures:
1930  *
1931  *                     bucket
1932  *      group  0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
1933  *        0                                                   1,2
1934  *        1   1,2
1935  *        2                                           1,2
1936  *        3                                   1,2
1937  *        4   1,2
1938  *        5        1   2
1939  *        6   1,2  1   1   1   1   1   1   1   1   1   1   1   1   1   1   1
1940  *        7   1,2 1,2  1   1   1   1   1   1   1   1   1   1   1   1   1   1
1941  *
1942  * this is the lookup table corresponding to the IPv4 range
1943  * 192.168.1.0-192.168.2.1, which was expanded to the two composing netmasks,
1944  * rule #1: 192.168.1.0/24, and rule #2: 192.168.2.0/31.
1945  *
1946  * This function fills @left and @right with the byte values of the leftmost
1947  * and rightmost bucket indices for the lowest and highest rule indices,
1948  * respectively. If @first_rule is 1 and @rule_count is 2, we obtain, in
1949  * nibbles:
1950  *   left:  < 12, 0, 10, 8, 0, 1, 0, 0 >
1951  *   right: < 12, 0, 10, 8, 0, 2, 2, 1 >
1952  * corresponding to bytes:
1953  *   left:  < 192, 168, 1, 0 >
1954  *   right: < 192, 168, 2, 1 >
1955  * with mask length irrelevant here, unused on return, as the range is already
1956  * defined by its start and end points. The mask length is relevant for a single
1957  * ranged entry instead: if @first_rule is 1 and @rule_count is 1, we ignore
1958  * rule 2 above: @left becomes < 192, 168, 1, 0 >, @right becomes
1959  * < 192, 168, 1, 255 >, and the mask length, calculated from the distances
1960  * between leftmost and rightmost bucket indices for each group, would be 24.
1961  *
1962  * Return: mask length, in bits.
1963  */
1964 static int pipapo_get_boundaries(struct nft_pipapo_field *f, int first_rule,
1965                                  int rule_count, u8 *left, u8 *right)
1966 {
1967         int g, mask_len = 0, bit_offset = 0;
1968         u8 *l = left, *r = right;
1969 
1970         for (g = 0; g < f->groups; g++) {
1971                 int b, x0, x1;
1972 
1973                 x0 = -1;
1974                 x1 = -1;
1975                 for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
1976                         unsigned long *pos;
1977 
1978                         pos = NFT_PIPAPO_LT_ALIGN(f->lt) +
1979                               (g * NFT_PIPAPO_BUCKETS(f->bb) + b) * f->bsize;
1980                         if (test_bit(first_rule, pos) && x0 == -1)
1981                                 x0 = b;
1982                         if (test_bit(first_rule + rule_count - 1, pos))
1983                                 x1 = b;
1984                 }
1985 
1986                 *l |= x0 << (BITS_PER_BYTE - f->bb - bit_offset);
1987                 *r |= x1 << (BITS_PER_BYTE - f->bb - bit_offset);
1988 
1989                 bit_offset += f->bb;
1990                 if (bit_offset >= BITS_PER_BYTE) {
1991                         bit_offset %= BITS_PER_BYTE;
1992                         l++;
1993                         r++;
1994                 }
1995 
1996                 if (x1 - x0 == 0)
1997                         mask_len += 4;
1998                 else if (x1 - x0 == 1)
1999                         mask_len += 3;
2000                 else if (x1 - x0 == 3)
2001                         mask_len += 2;
2002                 else if (x1 - x0 == 7)
2003                         mask_len += 1;
2004         }
2005 
2006         return mask_len;
2007 }
2008 
2009 /**
2010  * pipapo_match_field() - Match rules against byte ranges
2011  * @f:          Field including the lookup table
2012  * @first_rule: First of associated rules originating from same entry
2013  * @rule_count: Amount of associated rules
2014  * @start:      Start of range to be matched
2015  * @end:        End of range to be matched
2016  *
2017  * Return: true on match, false otherwise.
2018  */
2019 static bool pipapo_match_field(struct nft_pipapo_field *f,
2020                                int first_rule, int rule_count,
2021                                const u8 *start, const u8 *end)
2022 {
2023         u8 right[NFT_PIPAPO_MAX_BYTES] = { 0 };
2024         u8 left[NFT_PIPAPO_MAX_BYTES] = { 0 };
2025 
2026         pipapo_get_boundaries(f, first_rule, rule_count, left, right);
2027 
2028         return !memcmp(start, left,
2029                        f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) &&
2030                !memcmp(end, right, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
2031 }
2032 
2033 /**
2034  * nft_pipapo_remove() - Remove element given key, commit
2035  * @net:        Network namespace
2036  * @set:        nftables API set representation
2037  * @elem_priv:  nftables API element representation containing key data
2038  *
2039  * Similarly to nft_pipapo_activate(), this is used as commit operation by the
2040  * API, but it's called once per element in the pending transaction, so we can't
2041  * implement this as a single commit operation. Closest we can get is to remove
2042  * the matched element here, if any, and commit the updated matching data.
2043  */
2044 static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
2045                               struct nft_elem_priv *elem_priv)
2046 {
2047         struct nft_pipapo *priv = nft_set_priv(set);
2048         struct nft_pipapo_match *m = priv->clone;
2049         unsigned int rules_f0, first_rule = 0;
2050         struct nft_pipapo_elem *e;
2051         const u8 *data;
2052 
2053         e = nft_elem_priv_cast(elem_priv);
2054         data = (const u8 *)nft_set_ext_key(&e->ext);
2055 
2056         while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
2057                 union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
2058                 const u8 *match_start, *match_end;
2059                 struct nft_pipapo_field *f;
2060                 int i, start, rules_fx;
2061 
2062                 match_start = data;
2063 
2064                 if (nft_set_ext_exists(&e->ext, NFT_SET_EXT_KEY_END))
2065                         match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
2066                 else
2067                         match_end = data;
2068 
2069                 start = first_rule;
2070                 rules_fx = rules_f0;
2071 
2072                 nft_pipapo_for_each_field(f, i, m) {
2073                         bool last = i == m->field_count - 1;
2074 
2075                         if (!pipapo_match_field(f, start, rules_fx,
2076                                                 match_start, match_end))
2077                                 break;
2078 
2079                         rulemap[i].to = start;
2080                         rulemap[i].n = rules_fx;
2081 
2082                         rules_fx = f->mt[start].n;
2083                         start = f->mt[start].to;
2084 
2085                         match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
2086                         match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
2087 
2088                         if (last && f->mt[rulemap[i].to].e == e) {
2089                                 pipapo_drop(m, rulemap);
2090                                 return;
2091                         }
2092                 }
2093 
2094                 first_rule += rules_f0;
2095         }
2096 
2097         WARN_ON_ONCE(1); /* elem_priv not found */
2098 }
2099 
2100 /**
2101  * nft_pipapo_do_walk() - Walk over elements in m
2102  * @ctx:        nftables API context
2103  * @set:        nftables API set representation
2104  * @m:          matching data pointing to key mapping array
2105  * @iter:       Iterator
2106  *
2107  * As elements are referenced in the mapping array for the last field, directly
2108  * scan that array: there's no need to follow rule mappings from the first
2109  * field. @m is protected either by RCU read lock or by transaction mutex.
2110  */
2111 static void nft_pipapo_do_walk(const struct nft_ctx *ctx, struct nft_set *set,
2112                                const struct nft_pipapo_match *m,
2113                                struct nft_set_iter *iter)
2114 {
2115         const struct nft_pipapo_field *f;
2116         unsigned int i, r;
2117 
2118         for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
2119                 ;
2120 
2121         for (r = 0; r < f->rules; r++) {
2122                 struct nft_pipapo_elem *e;
2123 
2124                 if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
2125                         continue;
2126 
2127                 if (iter->count < iter->skip)
2128                         goto cont;
2129 
2130                 e = f->mt[r].e;
2131 
2132                 iter->err = iter->fn(ctx, set, iter, &e->priv);
2133                 if (iter->err < 0)
2134                         return;
2135 
2136 cont:
2137                 iter->count++;
2138         }
2139 }
2140 
2141 /**
2142  * nft_pipapo_walk() - Walk over elements
2143  * @ctx:        nftables API context
2144  * @set:        nftables API set representation
2145  * @iter:       Iterator
2146  *
2147  * Test if destructive action is needed or not, clone active backend if needed
2148  * and call the real function to work on the data.
2149  */
2150 static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
2151                             struct nft_set_iter *iter)
2152 {
2153         struct nft_pipapo *priv = nft_set_priv(set);
2154         const struct nft_pipapo_match *m;
2155 
2156         switch (iter->type) {
2157         case NFT_ITER_UPDATE:
2158                 m = pipapo_maybe_clone(set);
2159                 if (!m) {
2160                         iter->err = -ENOMEM;
2161                         return;
2162                 }
2163 
2164                 nft_pipapo_do_walk(ctx, set, m, iter);
2165                 break;
2166         case NFT_ITER_READ:
2167                 rcu_read_lock();
2168                 m = rcu_dereference(priv->match);
2169                 nft_pipapo_do_walk(ctx, set, m, iter);
2170                 rcu_read_unlock();
2171                 break;
2172         default:
2173                 iter->err = -EINVAL;
2174                 WARN_ON_ONCE(1);
2175                 break;
2176         }
2177 }
2178 
2179 /**
2180  * nft_pipapo_privsize() - Return the size of private data for the set
2181  * @nla:        netlink attributes, ignored as size doesn't depend on them
2182  * @desc:       Set description, ignored as size doesn't depend on it
2183  *
2184  * Return: size of private data for this set implementation, in bytes
2185  */
2186 static u64 nft_pipapo_privsize(const struct nlattr * const nla[],
2187                                const struct nft_set_desc *desc)
2188 {
2189         return sizeof(struct nft_pipapo);
2190 }
2191 
2192 /**
2193  * nft_pipapo_estimate() - Set size, space and lookup complexity
2194  * @desc:       Set description, element count and field description used
2195  * @features:   Flags: NFT_SET_INTERVAL needs to be there
2196  * @est:        Storage for estimation data
2197  *
2198  * Return: true if set description is compatible, false otherwise
2199  */
2200 static bool nft_pipapo_estimate(const struct nft_set_desc *desc, u32 features,
2201                                 struct nft_set_estimate *est)
2202 {
2203         if (!(features & NFT_SET_INTERVAL) ||
2204             desc->field_count < NFT_PIPAPO_MIN_FIELDS)
2205                 return false;
2206 
2207         est->size = pipapo_estimate_size(desc);
2208         if (!est->size)
2209                 return false;
2210 
2211         est->lookup = NFT_SET_CLASS_O_LOG_N;
2212 
2213         est->space = NFT_SET_CLASS_O_N;
2214 
2215         return true;
2216 }
2217 
2218 /**
2219  * nft_pipapo_init() - Initialise data for a set instance
2220  * @set:        nftables API set representation
2221  * @desc:       Set description
2222  * @nla:        netlink attributes
2223  *
2224  * Validate number and size of fields passed as NFTA_SET_DESC_CONCAT netlink
2225  * attributes, initialise internal set parameters, current instance of matching
2226  * data and a copy for subsequent insertions.
2227  *
2228  * Return: 0 on success, negative error code on failure.
2229  */
2230 static int nft_pipapo_init(const struct nft_set *set,
2231                            const struct nft_set_desc *desc,
2232                            const struct nlattr * const nla[])
2233 {
2234         struct nft_pipapo *priv = nft_set_priv(set);
2235         struct nft_pipapo_match *m;
2236         struct nft_pipapo_field *f;
2237         int err, i, field_count;
2238 
2239         BUILD_BUG_ON(offsetof(struct nft_pipapo_elem, priv) != 0);
2240 
2241         field_count = desc->field_count ? : 1;
2242 
2243         BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS > 255);
2244         BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS != NFT_REG32_COUNT);
2245 
2246         if (field_count > NFT_PIPAPO_MAX_FIELDS)
2247                 return -EINVAL;
2248 
2249         m = kmalloc(struct_size(m, f, field_count), GFP_KERNEL);
2250         if (!m)
2251                 return -ENOMEM;
2252 
2253         m->field_count = field_count;
2254         m->bsize_max = 0;
2255 
2256         m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
2257         if (!m->scratch) {
2258                 err = -ENOMEM;
2259                 goto out_scratch;
2260         }
2261         for_each_possible_cpu(i)
2262                 *per_cpu_ptr(m->scratch, i) = NULL;
2263 
2264         rcu_head_init(&m->rcu);
2265 
2266         nft_pipapo_for_each_field(f, i, m) {
2267                 unsigned int len = desc->field_len[i] ? : set->klen;
2268 
2269                 /* f->groups is u8 */
2270                 BUILD_BUG_ON((NFT_PIPAPO_MAX_BYTES *
2271                               BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS_LARGE_SET) >= 256);
2272 
2273                 f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
2274                 f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
2275 
2276                 priv->width += round_up(len, sizeof(u32));
2277 
2278                 f->bsize = 0;
2279                 f->rules = 0;
2280                 f->rules_alloc = 0;
2281                 f->lt = NULL;
2282                 f->mt = NULL;
2283         }
2284 
2285         rcu_assign_pointer(priv->match, m);
2286 
2287         return 0;
2288 
2289 out_scratch:
2290         kfree(m);
2291 
2292         return err;
2293 }
2294 
2295 /**
2296  * nft_set_pipapo_match_destroy() - Destroy elements from key mapping array
2297  * @ctx:        context
2298  * @set:        nftables API set representation
2299  * @m:          matching data pointing to key mapping array
2300  */
2301 static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
2302                                          const struct nft_set *set,
2303                                          struct nft_pipapo_match *m)
2304 {
2305         struct nft_pipapo_field *f;
2306         unsigned int i, r;
2307 
2308         for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
2309                 ;
2310 
2311         for (r = 0; r < f->rules; r++) {
2312                 struct nft_pipapo_elem *e;
2313 
2314                 if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e)
2315                         continue;
2316 
2317                 e = f->mt[r].e;
2318 
2319                 nf_tables_set_elem_destroy(ctx, set, &e->priv);
2320         }
2321 }
2322 
2323 /**
2324  * nft_pipapo_destroy() - Free private data for set and all committed elements
2325  * @ctx:        context
2326  * @set:        nftables API set representation
2327  */
2328 static void nft_pipapo_destroy(const struct nft_ctx *ctx,
2329                                const struct nft_set *set)
2330 {
2331         struct nft_pipapo *priv = nft_set_priv(set);
2332         struct nft_pipapo_match *m;
2333 
2334         m = rcu_dereference_protected(priv->match, true);
2335 
2336         if (priv->clone) {
2337                 nft_set_pipapo_match_destroy(ctx, set, priv->clone);
2338                 pipapo_free_match(priv->clone);
2339                 priv->clone = NULL;
2340         } else {
2341                 nft_set_pipapo_match_destroy(ctx, set, m);
2342         }
2343 
2344         pipapo_free_match(m);
2345 }
2346 
2347 /**
2348  * nft_pipapo_gc_init() - Initialise garbage collection
2349  * @set:        nftables API set representation
2350  *
2351  * Instead of actually setting up a periodic work for garbage collection, as
2352  * this operation requires a swap of matching data with the working copy, we'll
2353  * do that opportunistically with other commit operations if the interval is
2354  * elapsed, so we just need to set the current jiffies timestamp here.
2355  */
2356 static void nft_pipapo_gc_init(const struct nft_set *set)
2357 {
2358         struct nft_pipapo *priv = nft_set_priv(set);
2359 
2360         priv->last_gc = jiffies;
2361 }
2362 
2363 const struct nft_set_type nft_set_pipapo_type = {
2364         .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
2365                           NFT_SET_TIMEOUT,
2366         .ops            = {
2367                 .lookup         = nft_pipapo_lookup,
2368                 .insert         = nft_pipapo_insert,
2369                 .activate       = nft_pipapo_activate,
2370                 .deactivate     = nft_pipapo_deactivate,
2371                 .flush          = nft_pipapo_flush,
2372                 .remove         = nft_pipapo_remove,
2373                 .walk           = nft_pipapo_walk,
2374                 .get            = nft_pipapo_get,
2375                 .privsize       = nft_pipapo_privsize,
2376                 .estimate       = nft_pipapo_estimate,
2377                 .init           = nft_pipapo_init,
2378                 .destroy        = nft_pipapo_destroy,
2379                 .gc_init        = nft_pipapo_gc_init,
2380                 .commit         = nft_pipapo_commit,
2381                 .abort          = nft_pipapo_abort,
2382                 .elemsize       = offsetof(struct nft_pipapo_elem, ext),
2383         },
2384 };
2385 
2386 #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
2387 const struct nft_set_type nft_set_pipapo_avx2_type = {
2388         .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
2389                           NFT_SET_TIMEOUT,
2390         .ops            = {
2391                 .lookup         = nft_pipapo_avx2_lookup,
2392                 .insert         = nft_pipapo_insert,
2393                 .activate       = nft_pipapo_activate,
2394                 .deactivate     = nft_pipapo_deactivate,
2395                 .flush          = nft_pipapo_flush,
2396                 .remove         = nft_pipapo_remove,
2397                 .walk           = nft_pipapo_walk,
2398                 .get            = nft_pipapo_get,
2399                 .privsize       = nft_pipapo_privsize,
2400                 .estimate       = nft_pipapo_avx2_estimate,
2401                 .init           = nft_pipapo_init,
2402                 .destroy        = nft_pipapo_destroy,
2403                 .gc_init        = nft_pipapo_gc_init,
2404                 .commit         = nft_pipapo_commit,
2405                 .abort          = nft_pipapo_abort,
2406                 .elemsize       = offsetof(struct nft_pipapo_elem, ext),
2407         },
2408 };
2409 #endif
2410 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php