~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-cpumap.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /block/blk-mq-cpumap.c (Version linux-6.11.5) and /block/blk-mq-cpumap.c (Version linux-4.4.302)


  1 // SPDX-License-Identifier: GPL-2.0            << 
  2 /*                                                  1 /*
  3  * CPU <-> hardware queue mapping helpers           2  * CPU <-> hardware queue mapping helpers
  4  *                                                  3  *
  5  * Copyright (C) 2013-2014 Jens Axboe               4  * Copyright (C) 2013-2014 Jens Axboe
  6  */                                                 5  */
  7 #include <linux/kernel.h>                           6 #include <linux/kernel.h>
  8 #include <linux/threads.h>                          7 #include <linux/threads.h>
  9 #include <linux/module.h>                           8 #include <linux/module.h>
 10 #include <linux/mm.h>                               9 #include <linux/mm.h>
 11 #include <linux/smp.h>                             10 #include <linux/smp.h>
 12 #include <linux/cpu.h>                             11 #include <linux/cpu.h>
 13 #include <linux/group_cpus.h>                  << 
 14                                                    12 
                                                   >>  13 #include <linux/blk-mq.h>
 15 #include "blk.h"                                   14 #include "blk.h"
 16 #include "blk-mq.h"                                15 #include "blk-mq.h"
 17                                                    16 
 18 void blk_mq_map_queues(struct blk_mq_queue_map !!  17 static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
                                                   >>  18                               const int cpu)
 19 {                                                  19 {
 20         const struct cpumask *masks;           !!  20         return cpu * nr_queues / nr_cpus;
 21         unsigned int queue, cpu;               !!  21 }
                                                   >>  22 
                                                   >>  23 static int get_first_sibling(unsigned int cpu)
                                                   >>  24 {
                                                   >>  25         unsigned int ret;
                                                   >>  26 
                                                   >>  27         ret = cpumask_first(topology_sibling_cpumask(cpu));
                                                   >>  28         if (ret < nr_cpu_ids)
                                                   >>  29                 return ret;
                                                   >>  30 
                                                   >>  31         return cpu;
                                                   >>  32 }
                                                   >>  33 
                                                   >>  34 int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
                                                   >>  35                             const struct cpumask *online_mask)
                                                   >>  36 {
                                                   >>  37         unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
                                                   >>  38         cpumask_var_t cpus;
 22                                                    39 
 23         masks = group_cpus_evenly(qmap->nr_que !!  40         if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
 24         if (!masks) {                          !!  41                 return 1;
 25                 for_each_possible_cpu(cpu)     !!  42 
 26                         qmap->mq_map[cpu] = qm !!  43         cpumask_clear(cpus);
 27                 return;                        !!  44         nr_cpus = nr_uniq_cpus = 0;
                                                   >>  45         for_each_cpu(i, online_mask) {
                                                   >>  46                 nr_cpus++;
                                                   >>  47                 first_sibling = get_first_sibling(i);
                                                   >>  48                 if (!cpumask_test_cpu(first_sibling, cpus))
                                                   >>  49                         nr_uniq_cpus++;
                                                   >>  50                 cpumask_set_cpu(i, cpus);
 28         }                                          51         }
 29                                                    52 
 30         for (queue = 0; queue < qmap->nr_queue !!  53         queue = 0;
 31                 for_each_cpu(cpu, &masks[queue !!  54         for_each_possible_cpu(i) {
 32                         qmap->mq_map[cpu] = qm !!  55                 if (!cpumask_test_cpu(i, online_mask)) {
                                                   >>  56                         map[i] = 0;
                                                   >>  57                         continue;
                                                   >>  58                 }
                                                   >>  59 
                                                   >>  60                 /*
                                                   >>  61                  * Easy case - we have equal or more hardware queues. Or
                                                   >>  62                  * there are no thread siblings to take into account. Do
                                                   >>  63                  * 1:1 if enough, or sequential mapping if less.
                                                   >>  64                  */
                                                   >>  65                 if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
                                                   >>  66                         map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
                                                   >>  67                         queue++;
                                                   >>  68                         continue;
                                                   >>  69                 }
                                                   >>  70 
                                                   >>  71                 /*
                                                   >>  72                  * Less then nr_cpus queues, and we have some number of
                                                   >>  73                  * threads per cores. Map sibling threads to the same
                                                   >>  74                  * queue.
                                                   >>  75                  */
                                                   >>  76                 first_sibling = get_first_sibling(i);
                                                   >>  77                 if (first_sibling == i) {
                                                   >>  78                         map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
                                                   >>  79                                                         queue);
                                                   >>  80                         queue++;
                                                   >>  81                 } else
                                                   >>  82                         map[i] = map[first_sibling];
 33         }                                          83         }
 34         kfree(masks);                          !!  84 
                                                   >>  85         free_cpumask_var(cpus);
                                                   >>  86         return 0;
 35 }                                                  87 }
 36 EXPORT_SYMBOL_GPL(blk_mq_map_queues);          << 
 37                                                    88 
 38 /**                                            !!  89 unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
 39  * blk_mq_hw_queue_to_node - Look up the memor !!  90 {
 40  * @qmap: CPU to hardware queue map.           !!  91         unsigned int *map;
 41  * @index: hardware queue index.               !!  92 
 42  *                                             !!  93         /* If cpus are offline, map them to first hctx */
                                                   >>  94         map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
                                                   >>  95                                 set->numa_node);
                                                   >>  96         if (!map)
                                                   >>  97                 return NULL;
                                                   >>  98 
                                                   >>  99         if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
                                                   >> 100                 return map;
                                                   >> 101 
                                                   >> 102         kfree(map);
                                                   >> 103         return NULL;
                                                   >> 104 }
                                                   >> 105 
                                                   >> 106 /*
 43  * We have no quick way of doing reverse looku    107  * We have no quick way of doing reverse lookups. This is only used at
 44  * queue init time, so runtime isn't important    108  * queue init time, so runtime isn't important.
 45  */                                               109  */
 46 int blk_mq_hw_queue_to_node(struct blk_mq_queu !! 110 int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
 47 {                                                 111 {
 48         int i;                                    112         int i;
 49                                                   113 
 50         for_each_possible_cpu(i) {                114         for_each_possible_cpu(i) {
 51                 if (index == qmap->mq_map[i])  !! 115                 if (index == mq_map[i])
 52                         return cpu_to_node(i);    116                         return cpu_to_node(i);
 53         }                                         117         }
 54                                                   118 
 55         return NUMA_NO_NODE;                      119         return NUMA_NO_NODE;
 56 }                                                 120 }
 57                                                   121 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php