1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support 4 * 5 * Author: Jianmin Lv <lvjianmin@loongson.cn> 6 * Huacai Chen <chenhuacai@loongson.cn> 7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 8 */ 9 10 #include <linux/init.h> 11 #include <linux/acpi.h> 12 #include <linux/irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/memblock.h> 15 #include <linux/of_fdt.h> 16 #include <linux/serial_core.h> 17 #include <asm/io.h> 18 #include <asm/numa.h> 19 #include <asm/loongson.h> 20 21 int acpi_disabled; 22 EXPORT_SYMBOL(acpi_disabled); 23 int acpi_noirq; 24 int acpi_pci_disabled; 25 EXPORT_SYMBOL(acpi_pci_disabled); 26 int acpi_strict = 1; /* We have no workarounds on LoongArch */ 27 int num_processors; 28 int disabled_cpus; 29 30 u64 acpi_saved_sp; 31 32 #define PREFIX "ACPI: " 33 34 struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC]; 35 36 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size) 37 { 38 39 if (!phys || !size) 40 return NULL; 41 42 return early_memremap(phys, size); 43 } 44 void __init __acpi_unmap_table(void __iomem *map, unsigned long size) 45 { 46 if (!map || !size) 47 return; 48 49 early_memunmap(map, size); 50 } 51 52 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) 53 { 54 if (!memblock_is_memory(phys)) 55 return ioremap(phys, size); 56 else 57 return ioremap_cache(phys, size); 58 } 59 60 static int cpu_enumerated = 0; 61 62 #ifdef CONFIG_SMP 63 static int set_processor_mask(u32 id, u32 flags) 64 { 65 int nr_cpus; 66 int cpu, cpuid = id; 67 68 if (!cpu_enumerated) 69 nr_cpus = NR_CPUS; 70 else 71 nr_cpus = nr_cpu_ids; 72 73 if (num_processors >= nr_cpus) { 74 pr_warn(PREFIX "nr_cpus limit of %i reached." 75 " processor 0x%x ignored.\n", nr_cpus, cpuid); 76 77 return -ENODEV; 78 79 } 80 if (cpuid == loongson_sysconf.boot_cpu_id) 81 cpu = 0; 82 else 83 cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS); 84 85 if (!cpu_enumerated) 86 set_cpu_possible(cpu, true); 87 88 if (flags & ACPI_MADT_ENABLED) { 89 num_processors++; 90 set_cpu_present(cpu, true); 91 __cpu_number_map[cpuid] = cpu; 92 __cpu_logical_map[cpu] = cpuid; 93 } else 94 disabled_cpus++; 95 96 return cpu; 97 } 98 #endif 99 100 static int __init 101 acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end) 102 { 103 struct acpi_madt_core_pic *processor = NULL; 104 105 processor = (struct acpi_madt_core_pic *)header; 106 if (BAD_MADT_ENTRY(processor, end)) 107 return -EINVAL; 108 109 acpi_table_print_madt_entry(&header->common); 110 #ifdef CONFIG_SMP 111 acpi_core_pic[processor->core_id] = *processor; 112 set_processor_mask(processor->core_id, processor->flags); 113 #endif 114 115 return 0; 116 } 117 118 static int __init 119 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end) 120 { 121 static int core = 0; 122 struct acpi_madt_eio_pic *eiointc = NULL; 123 124 eiointc = (struct acpi_madt_eio_pic *)header; 125 if (BAD_MADT_ENTRY(eiointc, end)) 126 return -EINVAL; 127 128 core = eiointc->node * CORES_PER_EIO_NODE; 129 set_bit(core, loongson_sysconf.cores_io_master); 130 131 return 0; 132 } 133 134 static void __init acpi_process_madt(void) 135 { 136 #ifdef CONFIG_SMP 137 int i; 138 139 for (i = 0; i < NR_CPUS; i++) { 140 __cpu_number_map[i] = -1; 141 __cpu_logical_map[i] = -1; 142 } 143 #endif 144 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, 145 acpi_parse_processor, MAX_CORE_PIC); 146 147 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, 148 acpi_parse_eio_master, MAX_IO_PICS); 149 150 cpu_enumerated = 1; 151 loongson_sysconf.nr_cpus = num_processors; 152 } 153 154 int pptt_enabled; 155 156 int __init parse_acpi_topology(void) 157 { 158 int cpu, topology_id; 159 160 for_each_possible_cpu(cpu) { 161 topology_id = find_acpi_cpu_topology(cpu, 0); 162 if (topology_id < 0) { 163 pr_warn("Invalid BIOS PPTT\n"); 164 return -ENOENT; 165 } 166 167 if (acpi_pptt_cpu_is_thread(cpu) <= 0) 168 cpu_data[cpu].core = topology_id; 169 else { 170 topology_id = find_acpi_cpu_topology(cpu, 1); 171 if (topology_id < 0) 172 return -ENOENT; 173 174 cpu_data[cpu].core = topology_id; 175 } 176 } 177 178 pptt_enabled = 1; 179 180 return 0; 181 } 182 183 #ifndef CONFIG_SUSPEND 184 int (*acpi_suspend_lowlevel)(void); 185 #else 186 int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend; 187 #endif 188 189 void __init acpi_boot_table_init(void) 190 { 191 /* 192 * If acpi_disabled, bail out 193 */ 194 if (acpi_disabled) 195 goto fdt_earlycon; 196 197 /* 198 * Initialize the ACPI boot-time table parser. 199 */ 200 if (acpi_table_init()) { 201 disable_acpi(); 202 goto fdt_earlycon; 203 } 204 205 loongson_sysconf.boot_cpu_id = read_csr_cpuid(); 206 207 /* 208 * Process the Multiple APIC Description Table (MADT), if present 209 */ 210 acpi_process_madt(); 211 212 /* Do not enable ACPI SPCR console by default */ 213 acpi_parse_spcr(earlycon_acpi_spcr_enable, false); 214 215 return; 216 217 fdt_earlycon: 218 if (earlycon_acpi_spcr_enable) 219 early_init_dt_scan_chosen_stdout(); 220 } 221 222 #ifdef CONFIG_ACPI_NUMA 223 224 static __init int setup_node(int pxm) 225 { 226 return acpi_map_pxm_to_node(pxm); 227 } 228 229 /* 230 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for 231 * I/O localities since SRAT does not list them. I/O localities are 232 * not supported at this point. 233 */ 234 unsigned int numa_distance_cnt; 235 236 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) 237 { 238 return slit->locality_count; 239 } 240 241 void __init numa_set_distance(int from, int to, int distance) 242 { 243 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { 244 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 245 from, to, distance); 246 return; 247 } 248 249 node_distances[from][to] = distance; 250 } 251 252 /* Callback for Proximity Domain -> CPUID mapping */ 253 void __init 254 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) 255 { 256 int pxm, node; 257 258 if (srat_disabled()) 259 return; 260 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { 261 bad_srat(); 262 return; 263 } 264 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) 265 return; 266 pxm = pa->proximity_domain_lo; 267 if (acpi_srat_revision >= 2) { 268 pxm |= (pa->proximity_domain_hi[0] << 8); 269 pxm |= (pa->proximity_domain_hi[1] << 16); 270 pxm |= (pa->proximity_domain_hi[2] << 24); 271 } 272 node = setup_node(pxm); 273 if (node < 0) { 274 pr_err("SRAT: Too many proximity domains %x\n", pxm); 275 bad_srat(); 276 return; 277 } 278 279 if (pa->apic_id >= CONFIG_NR_CPUS) { 280 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", 281 pxm, pa->apic_id, node); 282 return; 283 } 284 285 early_numa_add_cpu(pa->apic_id, node); 286 287 set_cpuid_to_node(pa->apic_id, node); 288 node_set(node, numa_nodes_parsed); 289 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); 290 } 291 292 #endif 293 294 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) 295 { 296 memblock_reserve(addr, size); 297 } 298 299 #ifdef CONFIG_ACPI_HOTPLUG_CPU 300 301 #include <acpi/processor.h> 302 303 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 304 { 305 #ifdef CONFIG_ACPI_NUMA 306 int nid; 307 308 nid = acpi_get_node(handle); 309 if (nid != NUMA_NO_NODE) { 310 set_cpuid_to_node(physid, nid); 311 node_set(nid, numa_nodes_parsed); 312 set_cpu_numa_node(cpu, nid); 313 cpumask_set_cpu(cpu, cpumask_of_node(nid)); 314 } 315 #endif 316 return 0; 317 } 318 319 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu) 320 { 321 int cpu; 322 323 cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); 324 if (cpu < 0) { 325 pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); 326 return cpu; 327 } 328 329 acpi_map_cpu2node(handle, cpu, physid); 330 331 *pcpu = cpu; 332 333 return 0; 334 } 335 EXPORT_SYMBOL(acpi_map_cpu); 336 337 int acpi_unmap_cpu(int cpu) 338 { 339 #ifdef CONFIG_ACPI_NUMA 340 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE); 341 #endif 342 set_cpu_present(cpu, false); 343 num_processors--; 344 345 pr_info("cpu%d hot remove!\n", cpu); 346 347 return 0; 348 } 349 EXPORT_SYMBOL(acpi_unmap_cpu); 350 351 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 352
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.