~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/bpf/prog_tests/ringbuf.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #define _GNU_SOURCE
  3 #include <linux/compiler.h>
  4 #include <asm/barrier.h>
  5 #include <test_progs.h>
  6 #include <sys/mman.h>
  7 #include <sys/epoll.h>
  8 #include <time.h>
  9 #include <sched.h>
 10 #include <signal.h>
 11 #include <pthread.h>
 12 #include <sys/sysinfo.h>
 13 #include <linux/perf_event.h>
 14 #include <linux/ring_buffer.h>
 15 
 16 #include "test_ringbuf.lskel.h"
 17 #include "test_ringbuf_n.lskel.h"
 18 #include "test_ringbuf_map_key.lskel.h"
 19 #include "test_ringbuf_write.lskel.h"
 20 
 21 #define EDONE 7777
 22 
 23 static int duration = 0;
 24 
 25 struct sample {
 26         int pid;
 27         int seq;
 28         long value;
 29         char comm[16];
 30 };
 31 
 32 static int sample_cnt;
 33 
 34 static void atomic_inc(int *cnt)
 35 {
 36         __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
 37 }
 38 
 39 static int atomic_xchg(int *cnt, int val)
 40 {
 41         return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
 42 }
 43 
 44 static int process_sample(void *ctx, void *data, size_t len)
 45 {
 46         struct sample *s = data;
 47 
 48         atomic_inc(&sample_cnt);
 49 
 50         switch (s->seq) {
 51         case 0:
 52                 CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
 53                       333L, s->value);
 54                 return 0;
 55         case 1:
 56                 CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
 57                       777L, s->value);
 58                 return -EDONE;
 59         default:
 60                 /* we don't care about the rest */
 61                 return 0;
 62         }
 63 }
 64 
 65 static struct test_ringbuf_map_key_lskel *skel_map_key;
 66 static struct test_ringbuf_lskel *skel;
 67 static struct ring_buffer *ringbuf;
 68 
 69 static void trigger_samples()
 70 {
 71         skel->bss->dropped = 0;
 72         skel->bss->total = 0;
 73         skel->bss->discarded = 0;
 74 
 75         /* trigger exactly two samples */
 76         skel->bss->value = 333;
 77         syscall(__NR_getpgid);
 78         skel->bss->value = 777;
 79         syscall(__NR_getpgid);
 80 }
 81 
 82 static void *poll_thread(void *input)
 83 {
 84         long timeout = (long)input;
 85 
 86         return (void *)(long)ring_buffer__poll(ringbuf, timeout);
 87 }
 88 
 89 static void ringbuf_write_subtest(void)
 90 {
 91         struct test_ringbuf_write_lskel *skel;
 92         int page_size = getpagesize();
 93         size_t *mmap_ptr;
 94         int err, rb_fd;
 95 
 96         skel = test_ringbuf_write_lskel__open();
 97         if (!ASSERT_OK_PTR(skel, "skel_open"))
 98                 return;
 99 
100         skel->maps.ringbuf.max_entries = 0x4000;
101 
102         err = test_ringbuf_write_lskel__load(skel);
103         if (!ASSERT_OK(err, "skel_load"))
104                 goto cleanup;
105 
106         rb_fd = skel->maps.ringbuf.map_fd;
107 
108         mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
109         if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
110                 goto cleanup;
111         *mmap_ptr = 0x3000;
112         ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
113 
114         skel->bss->pid = getpid();
115 
116         ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
117         if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
118                 goto cleanup;
119 
120         err = test_ringbuf_write_lskel__attach(skel);
121         if (!ASSERT_OK(err, "skel_attach"))
122                 goto cleanup_ringbuf;
123 
124         skel->bss->discarded = 0;
125         skel->bss->passed = 0;
126 
127         /* trigger exactly two samples */
128         syscall(__NR_getpgid);
129         syscall(__NR_getpgid);
130 
131         ASSERT_EQ(skel->bss->discarded, 2, "discarded");
132         ASSERT_EQ(skel->bss->passed, 0, "passed");
133 
134         test_ringbuf_write_lskel__detach(skel);
135 cleanup_ringbuf:
136         ring_buffer__free(ringbuf);
137 cleanup:
138         test_ringbuf_write_lskel__destroy(skel);
139 }
140 
141 static void ringbuf_subtest(void)
142 {
143         const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
144         pthread_t thread;
145         long bg_ret = -1;
146         int err, cnt, rb_fd;
147         int page_size = getpagesize();
148         void *mmap_ptr, *tmp_ptr;
149         struct ring *ring;
150         int map_fd;
151         unsigned long avail_data, ring_size, cons_pos, prod_pos;
152 
153         skel = test_ringbuf_lskel__open();
154         if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
155                 return;
156 
157         skel->maps.ringbuf.max_entries = page_size;
158 
159         err = test_ringbuf_lskel__load(skel);
160         if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
161                 goto cleanup;
162 
163         rb_fd = skel->maps.ringbuf.map_fd;
164         /* good read/write cons_pos */
165         mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
166         ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
167         tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
168         if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
169                 goto cleanup;
170         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
171         ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
172 
173         /* bad writeable prod_pos */
174         mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
175         err = -errno;
176         ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
177         ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
178 
179         /* bad writeable data pages */
180         mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
181         err = -errno;
182         ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
183         ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
184         mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
185         ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
186         mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
187         ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
188 
189         /* good read-only pages */
190         mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
191         if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
192                 goto cleanup;
193 
194         ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
195         ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
196         ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
197         ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
198 
199         /* good read-only pages with initial offset */
200         mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
201         if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
202                 goto cleanup;
203 
204         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
205         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
206         ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
207         ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
208 
209         /* only trigger BPF program for current process */
210         skel->bss->pid = getpid();
211 
212         ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
213                                    process_sample, NULL, NULL);
214         if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
215                 goto cleanup;
216 
217         err = test_ringbuf_lskel__attach(skel);
218         if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
219                 goto cleanup;
220 
221         trigger_samples();
222 
223         ring = ring_buffer__ring(ringbuf, 0);
224         if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
225                 goto cleanup;
226 
227         map_fd = ring__map_fd(ring);
228         ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
229 
230         /* 2 submitted + 1 discarded records */
231         CHECK(skel->bss->avail_data != 3 * rec_sz,
232               "err_avail_size", "exp %ld, got %ld\n",
233               3L * rec_sz, skel->bss->avail_data);
234         CHECK(skel->bss->ring_size != page_size,
235               "err_ring_size", "exp %ld, got %ld\n",
236               (long)page_size, skel->bss->ring_size);
237         CHECK(skel->bss->cons_pos != 0,
238               "err_cons_pos", "exp %ld, got %ld\n",
239               0L, skel->bss->cons_pos);
240         CHECK(skel->bss->prod_pos != 3 * rec_sz,
241               "err_prod_pos", "exp %ld, got %ld\n",
242               3L * rec_sz, skel->bss->prod_pos);
243 
244         /* verify getting this data directly via the ring object yields the same
245          * results
246          */
247         avail_data = ring__avail_data_size(ring);
248         ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
249         ring_size = ring__size(ring);
250         ASSERT_EQ(ring_size, page_size, "ring_ring_size");
251         cons_pos = ring__consumer_pos(ring);
252         ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
253         prod_pos = ring__producer_pos(ring);
254         ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
255 
256         /* poll for samples */
257         err = ring_buffer__poll(ringbuf, -1);
258 
259         /* -EDONE is used as an indicator that we are done */
260         if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
261                 goto cleanup;
262         cnt = atomic_xchg(&sample_cnt, 0);
263         CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
264 
265         /* we expect extra polling to return nothing */
266         err = ring_buffer__poll(ringbuf, 0);
267         if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
268                 goto cleanup;
269         cnt = atomic_xchg(&sample_cnt, 0);
270         CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
271 
272         CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
273               0L, skel->bss->dropped);
274         CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
275               2L, skel->bss->total);
276         CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
277               1L, skel->bss->discarded);
278 
279         /* now validate consumer position is updated and returned */
280         trigger_samples();
281         CHECK(skel->bss->cons_pos != 3 * rec_sz,
282               "err_cons_pos", "exp %ld, got %ld\n",
283               3L * rec_sz, skel->bss->cons_pos);
284         err = ring_buffer__poll(ringbuf, -1);
285         CHECK(err <= 0, "poll_err", "err %d\n", err);
286         cnt = atomic_xchg(&sample_cnt, 0);
287         CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
288 
289         /* start poll in background w/ long timeout */
290         err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
291         if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
292                 goto cleanup;
293 
294         /* turn off notifications now */
295         skel->bss->flags = BPF_RB_NO_WAKEUP;
296 
297         /* give background thread a bit of a time */
298         usleep(50000);
299         trigger_samples();
300         /* sleeping arbitrarily is bad, but no better way to know that
301          * epoll_wait() **DID NOT** unblock in background thread
302          */
303         usleep(50000);
304         /* background poll should still be blocked */
305         err = pthread_tryjoin_np(thread, (void **)&bg_ret);
306         if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
307                 goto cleanup;
308 
309         /* BPF side did everything right */
310         CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
311               0L, skel->bss->dropped);
312         CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
313               2L, skel->bss->total);
314         CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
315               1L, skel->bss->discarded);
316         cnt = atomic_xchg(&sample_cnt, 0);
317         CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
318 
319         /* clear flags to return to "adaptive" notification mode */
320         skel->bss->flags = 0;
321 
322         /* produce new samples, no notification should be triggered, because
323          * consumer is now behind
324          */
325         trigger_samples();
326 
327         /* background poll should still be blocked */
328         err = pthread_tryjoin_np(thread, (void **)&bg_ret);
329         if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
330                 goto cleanup;
331 
332         /* still no samples, because consumer is behind */
333         cnt = atomic_xchg(&sample_cnt, 0);
334         CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
335 
336         skel->bss->dropped = 0;
337         skel->bss->total = 0;
338         skel->bss->discarded = 0;
339 
340         skel->bss->value = 333;
341         syscall(__NR_getpgid);
342         /* now force notifications */
343         skel->bss->flags = BPF_RB_FORCE_WAKEUP;
344         skel->bss->value = 777;
345         syscall(__NR_getpgid);
346 
347         /* now we should get a pending notification */
348         usleep(50000);
349         err = pthread_tryjoin_np(thread, (void **)&bg_ret);
350         if (CHECK(err, "join_bg", "err %d\n", err))
351                 goto cleanup;
352 
353         if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
354                 goto cleanup;
355 
356         /* due to timing variations, there could still be non-notified
357          * samples, so consume them here to collect all the samples
358          */
359         err = ring_buffer__consume(ringbuf);
360         CHECK(err < 0, "rb_consume", "failed: %d\b", err);
361 
362         /* also consume using ring__consume to make sure it works the same */
363         err = ring__consume(ring);
364         ASSERT_GE(err, 0, "ring_consume");
365 
366         /* 3 rounds, 2 samples each */
367         cnt = atomic_xchg(&sample_cnt, 0);
368         CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
369 
370         /* BPF side did everything right */
371         CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
372               0L, skel->bss->dropped);
373         CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
374               2L, skel->bss->total);
375         CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
376               1L, skel->bss->discarded);
377 
378         test_ringbuf_lskel__detach(skel);
379 cleanup:
380         ring_buffer__free(ringbuf);
381         test_ringbuf_lskel__destroy(skel);
382 }
383 
384 /*
385  * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
386  * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
387  */
388 #define N_TOT_SAMPLES   32
389 #define N_SAMPLES       4
390 
391 /* Sample value to verify the callback validity */
392 #define SAMPLE_VALUE    42L
393 
394 static int process_n_sample(void *ctx, void *data, size_t len)
395 {
396         struct sample *s = data;
397 
398         ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value");
399 
400         return 0;
401 }
402 
403 static void ringbuf_n_subtest(void)
404 {
405         struct test_ringbuf_n_lskel *skel_n;
406         int err, i;
407 
408         skel_n = test_ringbuf_n_lskel__open();
409         if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
410                 return;
411 
412         skel_n->maps.ringbuf.max_entries = getpagesize();
413         skel_n->bss->pid = getpid();
414 
415         err = test_ringbuf_n_lskel__load(skel_n);
416         if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
417                 goto cleanup;
418 
419         ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
420                                    process_n_sample, NULL, NULL);
421         if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
422                 goto cleanup;
423 
424         err = test_ringbuf_n_lskel__attach(skel_n);
425         if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
426                 goto cleanup_ringbuf;
427 
428         /* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */
429         skel_n->bss->value = SAMPLE_VALUE;
430         for (i = 0; i < N_TOT_SAMPLES; i++)
431                 syscall(__NR_getpgid);
432 
433         /* Consume all samples from the ring buffer in batches of N_SAMPLES */
434         for (i = 0; i < N_TOT_SAMPLES; i += err) {
435                 err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
436                 if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume"))
437                         goto cleanup_ringbuf;
438         }
439 
440 cleanup_ringbuf:
441         ring_buffer__free(ringbuf);
442 cleanup:
443         test_ringbuf_n_lskel__destroy(skel_n);
444 }
445 
446 static int process_map_key_sample(void *ctx, void *data, size_t len)
447 {
448         struct sample *s;
449         int err, val;
450 
451         s = data;
452         switch (s->seq) {
453         case 1:
454                 ASSERT_EQ(s->value, 42, "sample_value");
455                 err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
456                                           s, &val);
457                 ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
458                 ASSERT_EQ(val, 1, "hash_map val");
459                 return -EDONE;
460         default:
461                 return 0;
462         }
463 }
464 
465 static void ringbuf_map_key_subtest(void)
466 {
467         int err;
468 
469         skel_map_key = test_ringbuf_map_key_lskel__open();
470         if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
471                 return;
472 
473         skel_map_key->maps.ringbuf.max_entries = getpagesize();
474         skel_map_key->bss->pid = getpid();
475 
476         err = test_ringbuf_map_key_lskel__load(skel_map_key);
477         if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
478                 goto cleanup;
479 
480         ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
481                                    process_map_key_sample, NULL, NULL);
482         if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
483                 goto cleanup;
484 
485         err = test_ringbuf_map_key_lskel__attach(skel_map_key);
486         if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
487                 goto cleanup_ringbuf;
488 
489         syscall(__NR_getpgid);
490         ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
491         err = ring_buffer__poll(ringbuf, -1);
492         ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
493 
494 cleanup_ringbuf:
495         ring_buffer__free(ringbuf);
496 cleanup:
497         test_ringbuf_map_key_lskel__destroy(skel_map_key);
498 }
499 
500 void test_ringbuf(void)
501 {
502         if (test__start_subtest("ringbuf"))
503                 ringbuf_subtest();
504         if (test__start_subtest("ringbuf_n"))
505                 ringbuf_n_subtest();
506         if (test__start_subtest("ringbuf_map_key"))
507                 ringbuf_map_key_subtest();
508         if (test__start_subtest("ringbuf_write"))
509                 ringbuf_write_subtest();
510 }
511 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php