~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/cacheinfo.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kernel/cacheinfo.c (Architecture i386) and /arch/mips/kernel/cacheinfo.c (Architecture mips)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  *  ARM64 cacheinfo support                    !!   3  * MIPS cacheinfo support
  4  *                                             << 
  5  *  Copyright (C) 2015 ARM Ltd.                << 
  6  *  All Rights Reserved                        << 
  7  */                                                 4  */
  8                                                << 
  9 #include <linux/acpi.h>                        << 
 10 #include <linux/cacheinfo.h>                        5 #include <linux/cacheinfo.h>
 11 #include <linux/of.h>                          << 
 12                                                     6 
 13 #define MAX_CACHE_LEVEL                 7      !!   7 /* Populates leaf and increments to next leaf */
                                                   >>   8 #define populate_cache(cache, leaf, c_level, c_type)            \
                                                   >>   9 do {                                                            \
                                                   >>  10         leaf->type = c_type;                                    \
                                                   >>  11         leaf->level = c_level;                                  \
                                                   >>  12         leaf->coherency_line_size = c->cache.linesz;            \
                                                   >>  13         leaf->number_of_sets = c->cache.sets;                   \
                                                   >>  14         leaf->ways_of_associativity = c->cache.ways;            \
                                                   >>  15         leaf->size = c->cache.linesz * c->cache.sets *          \
                                                   >>  16                 c->cache.ways;                                  \
                                                   >>  17         leaf++;                                                 \
                                                   >>  18 } while (0)
 14                                                    19 
 15 int cache_line_size(void)                      !!  20 int init_cache_level(unsigned int cpu)
 16 {                                                  21 {
 17         if (coherency_max_size != 0)           !!  22         struct cpuinfo_mips *c = &current_cpu_data;
 18                 return coherency_max_size;     !!  23         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
                                                   >>  24         int levels = 0, leaves = 0;
 19                                                    25 
 20         return cache_line_size_of_cpu();       !!  26         /*
 21 }                                              !!  27          * If Dcache is not set, we assume the cache structures
 22 EXPORT_SYMBOL_GPL(cache_line_size);            !!  28          * are not properly initialized.
                                                   >>  29          */
                                                   >>  30         if (c->dcache.waysize)
                                                   >>  31                 levels += 1;
                                                   >>  32         else
                                                   >>  33                 return -ENOENT;
                                                   >>  34 
                                                   >>  35 
                                                   >>  36         leaves += (c->icache.waysize) ? 2 : 1;
                                                   >>  37 
                                                   >>  38         if (c->vcache.waysize) {
                                                   >>  39                 levels++;
                                                   >>  40                 leaves++;
                                                   >>  41         }
 23                                                    42 
 24 static inline enum cache_type get_cache_type(i !!  43         if (c->scache.waysize) {
 25 {                                              !!  44                 levels++;
 26         u64 clidr;                             !!  45                 leaves++;
                                                   >>  46         }
 27                                                    47 
 28         if (level > MAX_CACHE_LEVEL)           !!  48         if (c->tcache.waysize) {
 29                 return CACHE_TYPE_NOCACHE;     !!  49                 levels++;
 30         clidr = read_sysreg(clidr_el1);        !!  50                 leaves++;
 31         return CLIDR_CTYPE(clidr, level);      !!  51         }
 32 }                                              << 
 33                                                    52 
 34 static void ci_leaf_init(struct cacheinfo *thi !!  53         this_cpu_ci->num_levels = levels;
 35                          enum cache_type type, !!  54         this_cpu_ci->num_leaves = leaves;
 36 {                                              !!  55         return 0;
 37         this_leaf->level = level;              << 
 38         this_leaf->type = type;                << 
 39 }                                                  56 }
 40                                                    57 
 41 static void detect_cache_level(unsigned int *l !!  58 static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
 42 {                                                  59 {
 43         unsigned int ctype, level, leaves;     !!  60         int cpu1;
 44                                                << 
 45         for (level = 1, leaves = 0; level <= M << 
 46                 ctype = get_cache_type(level); << 
 47                 if (ctype == CACHE_TYPE_NOCACH << 
 48                         level--;               << 
 49                         break;                 << 
 50                 }                              << 
 51                 /* Separate instruction and da << 
 52                 leaves += (ctype == CACHE_TYPE << 
 53         }                                      << 
 54                                                    61 
 55         *level_p = level;                      !!  62         for_each_possible_cpu(cpu1)
 56         *leaves_p = leaves;                    !!  63                 if (cpus_are_siblings(cpu, cpu1))
                                                   >>  64                         cpumask_set_cpu(cpu1, cpu_map);
 57 }                                                  65 }
 58                                                    66 
 59 int early_cache_level(unsigned int cpu)        !!  67 static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
 60 {                                                  68 {
 61         struct cpu_cacheinfo *this_cpu_ci = ge !!  69         int cpu1;
                                                   >>  70         int cluster = cpu_cluster(&cpu_data[cpu]);
 62                                                    71 
 63         detect_cache_level(&this_cpu_ci->num_l !!  72         for_each_possible_cpu(cpu1)
 64                                                !!  73                 if (cpu_cluster(&cpu_data[cpu1]) == cluster)
 65         return 0;                              !!  74                         cpumask_set_cpu(cpu1, cpu_map);
 66 }                                                  75 }
 67                                                    76 
 68 int init_cache_level(unsigned int cpu)         !!  77 int populate_cache_leaves(unsigned int cpu)
 69 {                                                  78 {
 70         unsigned int level, leaves;            !!  79         struct cpuinfo_mips *c = &current_cpu_data;
 71         int fw_level, ret;                     << 
 72         struct cpu_cacheinfo *this_cpu_ci = ge     80         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
                                                   >>  81         struct cacheinfo *this_leaf = this_cpu_ci->info_list;
                                                   >>  82         int level = 1;
 73                                                    83 
 74         detect_cache_level(&level, &leaves);   !!  84         if (c->icache.waysize) {
 75                                                !!  85                 /* I/D caches are per core */
 76         if (acpi_disabled) {                   !!  86                 fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
 77                 fw_level = of_find_last_cache_ !!  87                 populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
                                                   >>  88                 fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
                                                   >>  89                 populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
                                                   >>  90                 level++;
 78         } else {                                   91         } else {
 79                 ret = acpi_get_cache_info(cpu, !!  92                 populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
 80                 if (ret < 0)                   !!  93                 level++;
 81                         fw_level = 0;          << 
 82         }                                      << 
 83                                                << 
 84         if (level < fw_level) {                << 
 85                 /*                             << 
 86                  * some external caches not sp << 
 87                  * the information may be avai << 
 88                  * only unified external cache << 
 89                  */                            << 
 90                 leaves += (fw_level - level);  << 
 91                 level = fw_level;              << 
 92         }                                          94         }
 93                                                    95 
 94         this_cpu_ci->num_levels = level;       !!  96         if (c->vcache.waysize) {
 95         this_cpu_ci->num_leaves = leaves;      !!  97                 /* Vcache is per core as well */
 96         return 0;                              !!  98                 fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
 97 }                                              !!  99                 populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
 98                                                !! 100                 level++;
 99 int populate_cache_leaves(unsigned int cpu)    !! 101         }
100 {                                              << 
101         unsigned int level, idx;               << 
102         enum cache_type type;                  << 
103         struct cpu_cacheinfo *this_cpu_ci = ge << 
104         struct cacheinfo *this_leaf = this_cpu << 
105                                                   102 
106         for (idx = 0, level = 1; level <= this !! 103         if (c->scache.waysize) {
107              idx < this_cpu_ci->num_leaves; id !! 104                 /* Scache is per cluster */
108                 type = get_cache_type(level);  !! 105                 fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
109                 if (type == CACHE_TYPE_SEPARAT !! 106                 populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
110                         ci_leaf_init(this_leaf !! 107                 level++;
111                         ci_leaf_init(this_leaf << 
112                 } else {                       << 
113                         ci_leaf_init(this_leaf << 
114                 }                              << 
115         }                                         108         }
                                                   >> 109 
                                                   >> 110         if (c->tcache.waysize)
                                                   >> 111                 populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
                                                   >> 112 
                                                   >> 113         this_cpu_ci->cpu_map_populated = true;
                                                   >> 114 
116         return 0;                                 115         return 0;
117 }                                                 116 }
118                                                   117 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php