~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/amd_nb.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Shared support code for AMD K8 northbridges and derivatives.
  4  * Copyright 2006 Andi Kleen, SUSE Labs.
  5  */
  6 
  7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8 
  9 #include <linux/types.h>
 10 #include <linux/slab.h>
 11 #include <linux/init.h>
 12 #include <linux/errno.h>
 13 #include <linux/export.h>
 14 #include <linux/spinlock.h>
 15 #include <linux/pci_ids.h>
 16 #include <asm/amd_nb.h>
 17 
 18 #define PCI_DEVICE_ID_AMD_17H_ROOT              0x1450
 19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT         0x15d0
 20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT         0x1480
 21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT         0x1630
 22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT         0x14b5
 23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT         0x14a4
 24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT         0x14b5
 25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT         0x14d8
 26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT         0x14e8
 27 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT         0x153a
 28 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT         0x1507
 29 #define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT         0x1122
 30 #define PCI_DEVICE_ID_AMD_MI200_ROOT            0x14bb
 31 #define PCI_DEVICE_ID_AMD_MI300_ROOT            0x14f8
 32 
 33 #define PCI_DEVICE_ID_AMD_17H_DF_F4             0x1464
 34 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4        0x15ec
 35 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4        0x1494
 36 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4        0x144c
 37 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4        0x1444
 38 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4        0x1728
 39 #define PCI_DEVICE_ID_AMD_19H_DF_F4             0x1654
 40 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4        0x14b1
 41 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4        0x167d
 42 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4        0x166e
 43 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4        0x14e4
 44 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4        0x14f4
 45 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4        0x12fc
 46 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4        0x12c4
 47 #define PCI_DEVICE_ID_AMD_MI200_DF_F4           0x14d4
 48 #define PCI_DEVICE_ID_AMD_MI300_DF_F4           0x152c
 49 
 50 /* Protect the PCI config register pairs used for SMN. */
 51 static DEFINE_MUTEX(smn_mutex);
 52 
 53 static u32 *flush_words;
 54 
 55 static const struct pci_device_id amd_root_ids[] = {
 56         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
 57         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
 58         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
 59         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
 60         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
 61         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
 62         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
 63         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
 64         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
 65         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
 66         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
 67         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
 68         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
 69         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
 70         {}
 71 };
 72 
 73 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
 74 
 75 static const struct pci_device_id amd_nb_misc_ids[] = {
 76         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 77         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 78         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 79         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 80         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 81         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
 82         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 83         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 84         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
 85         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
 86         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
 87         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
 88         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
 89         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 90         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
 91         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
 92         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
 93         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
 94         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
 95         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
 96         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
 97         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
 98         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
 99         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
100         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
101         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
102         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
103         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
104         {}
105 };
106 
107 static const struct pci_device_id amd_nb_link_ids[] = {
108         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
109         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
110         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
111         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
112         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
113         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
114         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
115         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
116         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
117         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
118         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
119         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
120         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
121         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
122         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
123         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
124         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
125         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
126         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
127         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
128         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
129         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
130         {}
131 };
132 
133 static const struct pci_device_id hygon_root_ids[] = {
134         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
135         {}
136 };
137 
138 static const struct pci_device_id hygon_nb_misc_ids[] = {
139         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
140         {}
141 };
142 
143 static const struct pci_device_id hygon_nb_link_ids[] = {
144         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
145         {}
146 };
147 
148 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
149         { 0x00, 0x18, 0x20 },
150         { 0xff, 0x00, 0x20 },
151         { 0xfe, 0x00, 0x20 },
152         { }
153 };
154 
155 static struct amd_northbridge_info amd_northbridges;
156 
157 u16 amd_nb_num(void)
158 {
159         return amd_northbridges.num;
160 }
161 EXPORT_SYMBOL_GPL(amd_nb_num);
162 
163 bool amd_nb_has_feature(unsigned int feature)
164 {
165         return ((amd_northbridges.flags & feature) == feature);
166 }
167 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
168 
169 struct amd_northbridge *node_to_amd_nb(int node)
170 {
171         return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
172 }
173 EXPORT_SYMBOL_GPL(node_to_amd_nb);
174 
175 static struct pci_dev *next_northbridge(struct pci_dev *dev,
176                                         const struct pci_device_id *ids)
177 {
178         do {
179                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
180                 if (!dev)
181                         break;
182         } while (!pci_match_id(ids, dev));
183         return dev;
184 }
185 
186 /*
187  * SMN accesses may fail in ways that are difficult to detect here in the called
188  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
189  * their own checking based on what behavior they expect.
190  *
191  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
192  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
193  * can be checked here, and a proper error code can be returned.
194  *
195  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
196  * correct in some cases, so callers must check that this correct is for the
197  * register/fields they need.
198  *
199  * For SMN writes, success can be determined through a "write and read back"
200  * However, this is not robust when done here.
201  *
202  * Possible issues:
203  *
204  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
205  *    *not* match the write value.
206  *
207  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
208  *    known here.
209  *
210  * 3) Bits that are "Reserved / Set to 1". Ditto above.
211  *
212  * Callers of amd_smn_write() should do the "write and read back" check
213  * themselves, if needed.
214  *
215  * For #1, they can see if their target bits got cleared.
216  *
217  * For #2 and #3, they can check if their target bits got set as intended.
218  *
219  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
220  * the operation is considered a success, and the caller does their own
221  * checking.
222  */
223 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
224 {
225         struct pci_dev *root;
226         int err = -ENODEV;
227 
228         if (node >= amd_northbridges.num)
229                 goto out;
230 
231         root = node_to_amd_nb(node)->root;
232         if (!root)
233                 goto out;
234 
235         mutex_lock(&smn_mutex);
236 
237         err = pci_write_config_dword(root, 0x60, address);
238         if (err) {
239                 pr_warn("Error programming SMN address 0x%x.\n", address);
240                 goto out_unlock;
241         }
242 
243         err = (write ? pci_write_config_dword(root, 0x64, *value)
244                      : pci_read_config_dword(root, 0x64, value));
245 
246 out_unlock:
247         mutex_unlock(&smn_mutex);
248 
249 out:
250         return err;
251 }
252 
253 int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
254 {
255         int err = __amd_smn_rw(node, address, value, false);
256 
257         if (PCI_POSSIBLE_ERROR(*value)) {
258                 err = -ENODEV;
259                 *value = 0;
260         }
261 
262         return err;
263 }
264 EXPORT_SYMBOL_GPL(amd_smn_read);
265 
266 int __must_check amd_smn_write(u16 node, u32 address, u32 value)
267 {
268         return __amd_smn_rw(node, address, &value, true);
269 }
270 EXPORT_SYMBOL_GPL(amd_smn_write);
271 
272 
273 static int amd_cache_northbridges(void)
274 {
275         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
276         const struct pci_device_id *link_ids = amd_nb_link_ids;
277         const struct pci_device_id *root_ids = amd_root_ids;
278         struct pci_dev *root, *misc, *link;
279         struct amd_northbridge *nb;
280         u16 roots_per_misc = 0;
281         u16 misc_count = 0;
282         u16 root_count = 0;
283         u16 i, j;
284 
285         if (amd_northbridges.num)
286                 return 0;
287 
288         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
289                 root_ids = hygon_root_ids;
290                 misc_ids = hygon_nb_misc_ids;
291                 link_ids = hygon_nb_link_ids;
292         }
293 
294         misc = NULL;
295         while ((misc = next_northbridge(misc, misc_ids)))
296                 misc_count++;
297 
298         if (!misc_count)
299                 return -ENODEV;
300 
301         root = NULL;
302         while ((root = next_northbridge(root, root_ids)))
303                 root_count++;
304 
305         if (root_count) {
306                 roots_per_misc = root_count / misc_count;
307 
308                 /*
309                  * There should be _exactly_ N roots for each DF/SMN
310                  * interface.
311                  */
312                 if (!roots_per_misc || (root_count % roots_per_misc)) {
313                         pr_info("Unsupported AMD DF/PCI configuration found\n");
314                         return -ENODEV;
315                 }
316         }
317 
318         nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
319         if (!nb)
320                 return -ENOMEM;
321 
322         amd_northbridges.nb = nb;
323         amd_northbridges.num = misc_count;
324 
325         link = misc = root = NULL;
326         for (i = 0; i < amd_northbridges.num; i++) {
327                 node_to_amd_nb(i)->root = root =
328                         next_northbridge(root, root_ids);
329                 node_to_amd_nb(i)->misc = misc =
330                         next_northbridge(misc, misc_ids);
331                 node_to_amd_nb(i)->link = link =
332                         next_northbridge(link, link_ids);
333 
334                 /*
335                  * If there are more PCI root devices than data fabric/
336                  * system management network interfaces, then the (N)
337                  * PCI roots per DF/SMN interface are functionally the
338                  * same (for DF/SMN access) and N-1 are redundant.  N-1
339                  * PCI roots should be skipped per DF/SMN interface so
340                  * the following DF/SMN interfaces get mapped to
341                  * correct PCI roots.
342                  */
343                 for (j = 1; j < roots_per_misc; j++)
344                         root = next_northbridge(root, root_ids);
345         }
346 
347         if (amd_gart_present())
348                 amd_northbridges.flags |= AMD_NB_GART;
349 
350         /*
351          * Check for L3 cache presence.
352          */
353         if (!cpuid_edx(0x80000006))
354                 return 0;
355 
356         /*
357          * Some CPU families support L3 Cache Index Disable. There are some
358          * limitations because of E382 and E388 on family 0x10.
359          */
360         if (boot_cpu_data.x86 == 0x10 &&
361             boot_cpu_data.x86_model >= 0x8 &&
362             (boot_cpu_data.x86_model > 0x9 ||
363              boot_cpu_data.x86_stepping >= 0x1))
364                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
365 
366         if (boot_cpu_data.x86 == 0x15)
367                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
368 
369         /* L3 cache partitioning is supported on family 0x15 */
370         if (boot_cpu_data.x86 == 0x15)
371                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
372 
373         return 0;
374 }
375 
376 /*
377  * Ignores subdevice/subvendor but as far as I can figure out
378  * they're useless anyways
379  */
380 bool __init early_is_amd_nb(u32 device)
381 {
382         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
383         const struct pci_device_id *id;
384         u32 vendor = device & 0xffff;
385 
386         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
387             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
388                 return false;
389 
390         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
391                 misc_ids = hygon_nb_misc_ids;
392 
393         device >>= 16;
394         for (id = misc_ids; id->vendor; id++)
395                 if (vendor == id->vendor && device == id->device)
396                         return true;
397         return false;
398 }
399 
400 struct resource *amd_get_mmconfig_range(struct resource *res)
401 {
402         u32 address;
403         u64 base, msr;
404         unsigned int segn_busn_bits;
405 
406         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
407             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
408                 return NULL;
409 
410         /* assume all cpus from fam10h have mmconfig */
411         if (boot_cpu_data.x86 < 0x10)
412                 return NULL;
413 
414         address = MSR_FAM10H_MMIO_CONF_BASE;
415         rdmsrl(address, msr);
416 
417         /* mmconfig is not enabled */
418         if (!(msr & FAM10H_MMIO_CONF_ENABLE))
419                 return NULL;
420 
421         base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
422 
423         segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
424                          FAM10H_MMIO_CONF_BUSRANGE_MASK;
425 
426         res->flags = IORESOURCE_MEM;
427         res->start = base;
428         res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
429         return res;
430 }
431 
432 int amd_get_subcaches(int cpu)
433 {
434         struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link;
435         unsigned int mask;
436 
437         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
438                 return 0;
439 
440         pci_read_config_dword(link, 0x1d4, &mask);
441 
442         return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
443 }
444 
445 int amd_set_subcaches(int cpu, unsigned long mask)
446 {
447         static unsigned int reset, ban;
448         struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu));
449         unsigned int reg;
450         int cuid;
451 
452         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
453                 return -EINVAL;
454 
455         /* if necessary, collect reset state of L3 partitioning and BAN mode */
456         if (reset == 0) {
457                 pci_read_config_dword(nb->link, 0x1d4, &reset);
458                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
459                 ban &= 0x180000;
460         }
461 
462         /* deactivate BAN mode if any subcaches are to be disabled */
463         if (mask != 0xf) {
464                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
465                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
466         }
467 
468         cuid = cpu_data(cpu).topo.core_id;
469         mask <<= 4 * cuid;
470         mask |= (0xf ^ (1 << cuid)) << 26;
471 
472         pci_write_config_dword(nb->link, 0x1d4, mask);
473 
474         /* reset BAN mode if L3 partitioning returned to reset state */
475         pci_read_config_dword(nb->link, 0x1d4, &reg);
476         if (reg == reset) {
477                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
478                 reg &= ~0x180000;
479                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
480         }
481 
482         return 0;
483 }
484 
485 static void amd_cache_gart(void)
486 {
487         u16 i;
488 
489         if (!amd_nb_has_feature(AMD_NB_GART))
490                 return;
491 
492         flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
493         if (!flush_words) {
494                 amd_northbridges.flags &= ~AMD_NB_GART;
495                 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
496                 return;
497         }
498 
499         for (i = 0; i != amd_northbridges.num; i++)
500                 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
501 }
502 
503 void amd_flush_garts(void)
504 {
505         int flushed, i;
506         unsigned long flags;
507         static DEFINE_SPINLOCK(gart_lock);
508 
509         if (!amd_nb_has_feature(AMD_NB_GART))
510                 return;
511 
512         /*
513          * Avoid races between AGP and IOMMU. In theory it's not needed
514          * but I'm not sure if the hardware won't lose flush requests
515          * when another is pending. This whole thing is so expensive anyways
516          * that it doesn't matter to serialize more. -AK
517          */
518         spin_lock_irqsave(&gart_lock, flags);
519         flushed = 0;
520         for (i = 0; i < amd_northbridges.num; i++) {
521                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
522                                        flush_words[i] | 1);
523                 flushed++;
524         }
525         for (i = 0; i < amd_northbridges.num; i++) {
526                 u32 w;
527                 /* Make sure the hardware actually executed the flush*/
528                 for (;;) {
529                         pci_read_config_dword(node_to_amd_nb(i)->misc,
530                                               0x9c, &w);
531                         if (!(w & 1))
532                                 break;
533                         cpu_relax();
534                 }
535         }
536         spin_unlock_irqrestore(&gart_lock, flags);
537         if (!flushed)
538                 pr_notice("nothing to flush?\n");
539 }
540 EXPORT_SYMBOL_GPL(amd_flush_garts);
541 
542 static void __fix_erratum_688(void *info)
543 {
544 #define MSR_AMD64_IC_CFG 0xC0011021
545 
546         msr_set_bit(MSR_AMD64_IC_CFG, 3);
547         msr_set_bit(MSR_AMD64_IC_CFG, 14);
548 }
549 
550 /* Apply erratum 688 fix so machines without a BIOS fix work. */
551 static __init void fix_erratum_688(void)
552 {
553         struct pci_dev *F4;
554         u32 val;
555 
556         if (boot_cpu_data.x86 != 0x14)
557                 return;
558 
559         if (!amd_northbridges.num)
560                 return;
561 
562         F4 = node_to_amd_nb(0)->link;
563         if (!F4)
564                 return;
565 
566         if (pci_read_config_dword(F4, 0x164, &val))
567                 return;
568 
569         if (val & BIT(2))
570                 return;
571 
572         on_each_cpu(__fix_erratum_688, NULL, 0);
573 
574         pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
575 }
576 
577 static __init int init_amd_nbs(void)
578 {
579         amd_cache_northbridges();
580         amd_cache_gart();
581 
582         fix_erratum_688();
583 
584         return 0;
585 }
586 
587 /* This has to go after the PCI subsystem */
588 fs_initcall(init_amd_nbs);
589 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php