~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/eytzinger.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include "eytzinger.h"
  4 
  5 /**
  6  * is_aligned - is this pointer & size okay for word-wide copying?
  7  * @base: pointer to data
  8  * @size: size of each element
  9  * @align: required alignment (typically 4 or 8)
 10  *
 11  * Returns true if elements can be copied using word loads and stores.
 12  * The size must be a multiple of the alignment, and the base address must
 13  * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
 14  *
 15  * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
 16  * to "if ((a | b) & mask)", so we do that by hand.
 17  */
 18 __attribute_const__ __always_inline
 19 static bool is_aligned(const void *base, size_t size, unsigned char align)
 20 {
 21         unsigned char lsbits = (unsigned char)size;
 22 
 23         (void)base;
 24 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 25         lsbits |= (unsigned char)(uintptr_t)base;
 26 #endif
 27         return (lsbits & (align - 1)) == 0;
 28 }
 29 
 30 /**
 31  * swap_words_32 - swap two elements in 32-bit chunks
 32  * @a: pointer to the first element to swap
 33  * @b: pointer to the second element to swap
 34  * @n: element size (must be a multiple of 4)
 35  *
 36  * Exchange the two objects in memory.  This exploits base+index addressing,
 37  * which basically all CPUs have, to minimize loop overhead computations.
 38  *
 39  * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
 40  * bottom of the loop, even though the zero flag is still valid from the
 41  * subtract (since the intervening mov instructions don't alter the flags).
 42  * Gcc 8.1.0 doesn't have that problem.
 43  */
 44 static void swap_words_32(void *a, void *b, size_t n)
 45 {
 46         do {
 47                 u32 t = *(u32 *)(a + (n -= 4));
 48                 *(u32 *)(a + n) = *(u32 *)(b + n);
 49                 *(u32 *)(b + n) = t;
 50         } while (n);
 51 }
 52 
 53 /**
 54  * swap_words_64 - swap two elements in 64-bit chunks
 55  * @a: pointer to the first element to swap
 56  * @b: pointer to the second element to swap
 57  * @n: element size (must be a multiple of 8)
 58  *
 59  * Exchange the two objects in memory.  This exploits base+index
 60  * addressing, which basically all CPUs have, to minimize loop overhead
 61  * computations.
 62  *
 63  * We'd like to use 64-bit loads if possible.  If they're not, emulating
 64  * one requires base+index+4 addressing which x86 has but most other
 65  * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
 66  * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
 67  * x32 ABI).  Are there any cases the kernel needs to worry about?
 68  */
 69 static void swap_words_64(void *a, void *b, size_t n)
 70 {
 71         do {
 72 #ifdef CONFIG_64BIT
 73                 u64 t = *(u64 *)(a + (n -= 8));
 74                 *(u64 *)(a + n) = *(u64 *)(b + n);
 75                 *(u64 *)(b + n) = t;
 76 #else
 77                 /* Use two 32-bit transfers to avoid base+index+4 addressing */
 78                 u32 t = *(u32 *)(a + (n -= 4));
 79                 *(u32 *)(a + n) = *(u32 *)(b + n);
 80                 *(u32 *)(b + n) = t;
 81 
 82                 t = *(u32 *)(a + (n -= 4));
 83                 *(u32 *)(a + n) = *(u32 *)(b + n);
 84                 *(u32 *)(b + n) = t;
 85 #endif
 86         } while (n);
 87 }
 88 
 89 /**
 90  * swap_bytes - swap two elements a byte at a time
 91  * @a: pointer to the first element to swap
 92  * @b: pointer to the second element to swap
 93  * @n: element size
 94  *
 95  * This is the fallback if alignment doesn't allow using larger chunks.
 96  */
 97 static void swap_bytes(void *a, void *b, size_t n)
 98 {
 99         do {
100                 char t = ((char *)a)[--n];
101                 ((char *)a)[n] = ((char *)b)[n];
102                 ((char *)b)[n] = t;
103         } while (n);
104 }
105 
106 /*
107  * The values are arbitrary as long as they can't be confused with
108  * a pointer, but small integers make for the smallest compare
109  * instructions.
110  */
111 #define SWAP_WORDS_64 (swap_r_func_t)0
112 #define SWAP_WORDS_32 (swap_r_func_t)1
113 #define SWAP_BYTES    (swap_r_func_t)2
114 #define SWAP_WRAPPER  (swap_r_func_t)3
115 
116 struct wrapper {
117         cmp_func_t cmp;
118         swap_func_t swap_func;
119 };
120 
121 /*
122  * The function pointer is last to make tail calls most efficient if the
123  * compiler decides not to inline this function.
124  */
125 static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
126 {
127         if (swap_func == SWAP_WRAPPER) {
128                 ((const struct wrapper *)priv)->swap_func(a, b, (int)size);
129                 return;
130         }
131 
132         if (swap_func == SWAP_WORDS_64)
133                 swap_words_64(a, b, size);
134         else if (swap_func == SWAP_WORDS_32)
135                 swap_words_32(a, b, size);
136         else if (swap_func == SWAP_BYTES)
137                 swap_bytes(a, b, size);
138         else
139                 swap_func(a, b, (int)size, priv);
140 }
141 
142 #define _CMP_WRAPPER ((cmp_r_func_t)0L)
143 
144 static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
145 {
146         if (cmp == _CMP_WRAPPER)
147                 return ((const struct wrapper *)priv)->cmp(a, b);
148         return cmp(a, b, priv);
149 }
150 
151 static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
152                          cmp_r_func_t cmp_func, const void *priv,
153                          size_t l, size_t r)
154 {
155         return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
156                       base + inorder_to_eytzinger0(r, n) * size,
157                       cmp_func, priv);
158 }
159 
160 static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
161                            swap_r_func_t swap_func, const void *priv,
162                            size_t l, size_t r)
163 {
164         do_swap(base + inorder_to_eytzinger0(l, n) * size,
165                 base + inorder_to_eytzinger0(r, n) * size,
166                 size, swap_func, priv);
167 }
168 
169 void eytzinger0_sort_r(void *base, size_t n, size_t size,
170                        cmp_r_func_t cmp_func,
171                        swap_r_func_t swap_func,
172                        const void *priv)
173 {
174         int i, j, k;
175 
176         /* called from 'sort' without swap function, let's pick the default */
177         if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
178                 swap_func = NULL;
179 
180         if (!swap_func) {
181                 if (is_aligned(base, size, 8))
182                         swap_func = SWAP_WORDS_64;
183                 else if (is_aligned(base, size, 4))
184                         swap_func = SWAP_WORDS_32;
185                 else
186                         swap_func = SWAP_BYTES;
187         }
188 
189         /* heapify */
190         for (i = n / 2 - 1; i >= 0; --i) {
191                 /* Find the sift-down path all the way to the leaves. */
192                 for (j = i; k = j * 2 + 1, k + 1 < n;)
193                         j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
194 
195                 /* Special case for the last leaf with no sibling. */
196                 if (j * 2 + 2 == n)
197                         j = j * 2 + 1;
198 
199                 /* Backtrack to the correct location. */
200                 while (j != i && eytzinger0_do_cmp(base, n, size, cmp_func, priv, i, j) >= 0)
201                         j = (j - 1) / 2;
202 
203                 /* Shift the element into its correct place. */
204                 for (k = j; j != i;) {
205                         j = (j - 1) / 2;
206                         eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
207                 }
208         }
209 
210         /* sort */
211         for (i = n - 1; i > 0; --i) {
212                 eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
213 
214                 /* Find the sift-down path all the way to the leaves. */
215                 for (j = 0; k = j * 2 + 1, k + 1 < i;)
216                         j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
217 
218                 /* Special case for the last leaf with no sibling. */
219                 if (j * 2 + 2 == i)
220                         j = j * 2 + 1;
221 
222                 /* Backtrack to the correct location. */
223                 while (j && eytzinger0_do_cmp(base, n, size, cmp_func, priv, 0, j) >= 0)
224                         j = (j - 1) / 2;
225 
226                 /* Shift the element into its correct place. */
227                 for (k = j; j;) {
228                         j = (j - 1) / 2;
229                         eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
230                 }
231         }
232 }
233 
234 void eytzinger0_sort(void *base, size_t n, size_t size,
235                      cmp_func_t cmp_func,
236                      swap_func_t swap_func)
237 {
238         struct wrapper w = {
239                 .cmp  = cmp_func,
240                 .swap_func = swap_func,
241         };
242 
243         return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
244 }
245 
246 #if 0
247 #include <linux/slab.h>
248 #include <linux/random.h>
249 #include <linux/ktime.h>
250 
251 static u64 cmp_count;
252 
253 static int mycmp(const void *a, const void *b)
254 {
255         u32 _a = *(u32 *)a;
256         u32 _b = *(u32 *)b;
257 
258         cmp_count++;
259         if (_a < _b)
260                 return -1;
261         else if (_a > _b)
262                 return 1;
263         else
264                 return 0;
265 }
266 
267 static int test(void)
268 {
269         size_t N, i;
270         ktime_t start, end;
271         s64 delta;
272         u32 *arr;
273 
274         for (N = 10000; N <= 100000; N += 10000) {
275                 arr = kmalloc_array(N, sizeof(u32), GFP_KERNEL);
276                 cmp_count = 0;
277 
278                 for (i = 0; i < N; i++)
279                         arr[i] = get_random_u32();
280 
281                 start = ktime_get();
282                 eytzinger0_sort(arr, N, sizeof(u32), mycmp, NULL);
283                 end = ktime_get();
284 
285                 delta = ktime_us_delta(end, start);
286                 printk(KERN_INFO "time: %lld\n", delta);
287                 printk(KERN_INFO "comparisons: %lld\n", cmp_count);
288 
289                 u32 prev = 0;
290 
291                 eytzinger0_for_each(i, N) {
292                         if (prev > arr[i])
293                                 goto err;
294                         prev = arr[i];
295                 }
296 
297                 kfree(arr);
298         }
299         return 0;
300 
301 err:
302         kfree(arr);
303         return -1;
304 }
305 #endif
306 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php