blob: 18f6b7c4bd79fad9b64dca2a69b27cd38be72176 [file] [log] [blame]
Thomas Gleixnerf9724742019-06-04 10:11:10 +02001// SPDX-License-Identifier: GPL-2.0-only
Andi Kleena32073b2006-06-26 13:56:40 +02002/*
3 * Shared support code for AMD K8 northbridges and derivates.
Thomas Gleixnerf9724742019-06-04 10:11:10 +02004 * Copyright 2006 Andi Kleen, SUSE Labs.
Andi Kleena32073b2006-06-26 13:56:40 +02005 */
Joe Perchesc767a542012-05-21 19:50:07 -07006
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
Andi Kleena32073b2006-06-26 13:56:40 +02009#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Andi Kleena32073b2006-06-26 13:56:40 +020011#include <linux/init.h>
12#include <linux/errno.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040013#include <linux/export.h>
Andi Kleena32073b2006-06-26 13:56:40 +020014#include <linux/spinlock.h>
Woods, Briandedf7dc2018-11-06 20:08:14 +000015#include <linux/pci_ids.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020016#include <asm/amd_nb.h>
Andi Kleena32073b2006-06-26 13:56:40 +020017
Yazen Ghannamddfe43c2016-11-10 15:10:56 -060018#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
Guenter Roeckf9bc6b22018-05-04 13:01:32 -070019#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
Woods, Brianbe3518a2018-11-06 20:08:18 +000020#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
Alexander Monakova4e918252020-05-10 20:48:40 +000021#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
Yazen Ghannamb791c6b2016-11-10 15:10:55 -060022#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
Guenter Roeckf9bc6b22018-05-04 13:01:32 -070023#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
Woods, Brianbe3518a2018-11-06 20:08:18 +000024#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
Alexander Monakova4e918252020-05-10 20:48:40 +000025#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
Marcel Bocuaf4e1c52019-07-22 20:45:10 +030026#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
Yazen Ghannamb3f79ae2020-01-10 01:56:49 +000027#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
Yazen Ghannamb791c6b2016-11-10 15:10:55 -060028
Yazen Ghannamddfe43c2016-11-10 15:10:56 -060029/* Protect the PCI config register pairs used for SMN and DF indirect access. */
30static DEFINE_MUTEX(smn_mutex);
31
Andi Kleena32073b2006-06-26 13:56:40 +020032static u32 *flush_words;
33
Yazen Ghannamddfe43c2016-11-10 15:10:56 -060034static const struct pci_device_id amd_root_ids[] = {
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
Guenter Roeckf9bc6b22018-05-04 13:01:32 -070036 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
Woods, Brianbe3518a2018-11-06 20:08:18 +000037 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
Alexander Monakova4e918252020-05-10 20:48:40 +000038 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
Yazen Ghannamddfe43c2016-11-10 15:10:56 -060039 {}
40};
41
Borislav Petkovbfc11682017-10-22 12:47:31 +020042#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
43
Borislav Petkov19d33352020-03-16 13:23:21 +010044static const struct pci_device_id amd_nb_misc_ids[] = {
Joerg Roedelcf169702008-09-02 13:13:40 +020045 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
Borislav Petkovcb293252011-01-19 18:22:11 +010047 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
Borislav Petkov24214442012-05-04 18:28:21 +020048 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050049 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
Aravind Gopalakrishnan15895a72014-09-18 14:56:45 -050050 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
Aravind Gopalakrishnan94c1acf2013-04-17 14:57:13 -050051 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
Aravind Gopalakrishnan85a88852014-02-20 10:28:46 -060052 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
Yazen Ghannamb791c6b2016-11-10 15:10:55 -060053 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
Guenter Roeckf9bc6b22018-05-04 13:01:32 -070054 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
Woods, Brianbe3518a2018-11-06 20:08:18 +000055 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
Alexander Monakova4e918252020-05-10 20:48:40 +000056 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
Borislav Petkovbfc11682017-10-22 12:47:31 +020057 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
Marcel Bocuaf4e1c52019-07-22 20:45:10 +030058 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
Yazen Ghannamb3f79ae2020-01-10 01:56:49 +000059 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
Andi Kleena32073b2006-06-26 13:56:40 +020060 {}
61};
Andi Kleena32073b2006-06-26 13:56:40 +020062
Jan Beulichc391c782013-03-11 09:56:05 +000063static const struct pci_device_id amd_nb_link_ids[] = {
Borislav Petkovcb6c8522011-03-30 20:34:47 +020064 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050065 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
Aravind Gopalakrishnan15895a72014-09-18 14:56:45 -050066 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
Aravind Gopalakrishnan94c1acf2013-04-17 14:57:13 -050067 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
Aravind Gopalakrishnan85a88852014-02-20 10:28:46 -060068 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
Yazen Ghannamb791c6b2016-11-10 15:10:55 -060069 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
Guenter Roeckf9bc6b22018-05-04 13:01:32 -070070 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
Woods, Brianbe3518a2018-11-06 20:08:18 +000071 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
Alexander Monakova4e918252020-05-10 20:48:40 +000072 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
Marcel Bocuaf4e1c52019-07-22 20:45:10 +030073 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
Yazen Ghannamb3f79ae2020-01-10 01:56:49 +000074 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
Borislav Petkovbfc11682017-10-22 12:47:31 +020075 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
Hans Rosenfeld41b26102011-01-24 16:05:42 +010076 {}
77};
78
Pu Wenc6babb52018-09-25 22:46:11 +080079static const struct pci_device_id hygon_root_ids[] = {
80 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
81 {}
82};
83
YueHaibing025e3202019-06-14 23:54:41 +080084static const struct pci_device_id hygon_nb_misc_ids[] = {
Pu Wenc6babb52018-09-25 22:46:11 +080085 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
86 {}
87};
88
89static const struct pci_device_id hygon_nb_link_ids[] = {
90 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
91 {}
92};
93
Jan Beulich24d9b702011-01-10 16:20:23 +000094const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
95 { 0x00, 0x18, 0x20 },
96 { 0xff, 0x00, 0x20 },
97 { 0xfe, 0x00, 0x20 },
98 { }
99};
100
Yazen Ghannamc7993892016-11-10 15:10:53 -0600101static struct amd_northbridge_info amd_northbridges;
102
103u16 amd_nb_num(void)
104{
105 return amd_northbridges.num;
106}
Yazen Ghannamde6bd082016-11-10 15:10:54 -0600107EXPORT_SYMBOL_GPL(amd_nb_num);
Yazen Ghannamc7993892016-11-10 15:10:53 -0600108
109bool amd_nb_has_feature(unsigned int feature)
110{
111 return ((amd_northbridges.flags & feature) == feature);
112}
Yazen Ghannamde6bd082016-11-10 15:10:54 -0600113EXPORT_SYMBOL_GPL(amd_nb_has_feature);
Yazen Ghannamc7993892016-11-10 15:10:53 -0600114
115struct amd_northbridge *node_to_amd_nb(int node)
116{
117 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
118}
Yazen Ghannamde6bd082016-11-10 15:10:54 -0600119EXPORT_SYMBOL_GPL(node_to_amd_nb);
Andi Kleena32073b2006-06-26 13:56:40 +0200120
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200121static struct pci_dev *next_northbridge(struct pci_dev *dev,
Jan Beulich691269f2011-02-09 08:26:53 +0000122 const struct pci_device_id *ids)
Andi Kleena32073b2006-06-26 13:56:40 +0200123{
124 do {
125 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
126 if (!dev)
127 break;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200128 } while (!pci_match_id(ids, dev));
Andi Kleena32073b2006-06-26 13:56:40 +0200129 return dev;
130}
131
Yazen Ghannamddfe43c2016-11-10 15:10:56 -0600132static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
133{
134 struct pci_dev *root;
135 int err = -ENODEV;
136
137 if (node >= amd_northbridges.num)
138 goto out;
139
140 root = node_to_amd_nb(node)->root;
141 if (!root)
142 goto out;
143
144 mutex_lock(&smn_mutex);
145
146 err = pci_write_config_dword(root, 0x60, address);
147 if (err) {
148 pr_warn("Error programming SMN address 0x%x.\n", address);
149 goto out_unlock;
150 }
151
152 err = (write ? pci_write_config_dword(root, 0x64, *value)
153 : pci_read_config_dword(root, 0x64, value));
154 if (err)
155 pr_warn("Error %s SMN address 0x%x.\n",
156 (write ? "writing to" : "reading from"), address);
157
158out_unlock:
159 mutex_unlock(&smn_mutex);
160
161out:
162 return err;
163}
164
165int amd_smn_read(u16 node, u32 address, u32 *value)
166{
167 return __amd_smn_rw(node, address, value, false);
168}
169EXPORT_SYMBOL_GPL(amd_smn_read);
170
171int amd_smn_write(u16 node, u32 address, u32 value)
172{
173 return __amd_smn_rw(node, address, &value, true);
174}
175EXPORT_SYMBOL_GPL(amd_smn_write);
176
177/*
178 * Data Fabric Indirect Access uses FICAA/FICAD.
179 *
180 * Fabric Indirect Configuration Access Address (FICAA): Constructed based
181 * on the device's Instance Id and the PCI function and register offset of
182 * the desired register.
183 *
184 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
185 * and FICAD HI registers but so far we only need the LO register.
186 */
187int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
188{
189 struct pci_dev *F4;
190 u32 ficaa;
191 int err = -ENODEV;
192
193 if (node >= amd_northbridges.num)
194 goto out;
195
196 F4 = node_to_amd_nb(node)->link;
197 if (!F4)
198 goto out;
199
200 ficaa = 1;
201 ficaa |= reg & 0x3FC;
202 ficaa |= (func & 0x7) << 11;
203 ficaa |= instance_id << 16;
204
205 mutex_lock(&smn_mutex);
206
207 err = pci_write_config_dword(F4, 0x5C, ficaa);
208 if (err) {
209 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
210 goto out_unlock;
211 }
212
213 err = pci_read_config_dword(F4, 0x98, lo);
214 if (err)
215 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
216
217out_unlock:
218 mutex_unlock(&smn_mutex);
219
220out:
221 return err;
222}
223EXPORT_SYMBOL_GPL(amd_df_indirect_read);
224
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200225int amd_cache_northbridges(void)
Andi Kleena32073b2006-06-26 13:56:40 +0200226{
Pu Wenc6babb52018-09-25 22:46:11 +0800227 const struct pci_device_id *misc_ids = amd_nb_misc_ids;
228 const struct pci_device_id *link_ids = amd_nb_link_ids;
229 const struct pci_device_id *root_ids = amd_root_ids;
Yazen Ghannamddfe43c2016-11-10 15:10:56 -0600230 struct pci_dev *root, *misc, *link;
Pu Wenc6babb52018-09-25 22:46:11 +0800231 struct amd_northbridge *nb;
Woods, Brian556e4c622018-11-06 20:08:16 +0000232 u16 roots_per_misc = 0;
233 u16 misc_count = 0;
234 u16 root_count = 0;
235 u16 i, j;
Ben Collins3c6df2a2007-05-23 13:57:43 -0700236
Yazen Ghannamc7993892016-11-10 15:10:53 -0600237 if (amd_northbridges.num)
Andi Kleena32073b2006-06-26 13:56:40 +0200238 return 0;
239
Pu Wenc6babb52018-09-25 22:46:11 +0800240 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
241 root_ids = hygon_root_ids;
242 misc_ids = hygon_nb_misc_ids;
243 link_ids = hygon_nb_link_ids;
244 }
245
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200246 misc = NULL;
Pu Wenc6babb52018-09-25 22:46:11 +0800247 while ((misc = next_northbridge(misc, misc_ids)) != NULL)
Woods, Brian556e4c622018-11-06 20:08:16 +0000248 misc_count++;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200249
Woods, Brian556e4c622018-11-06 20:08:16 +0000250 if (!misc_count)
Borislav Petkov1ead8522016-06-16 19:13:49 +0200251 return -ENODEV;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200252
Woods, Brian556e4c622018-11-06 20:08:16 +0000253 root = NULL;
254 while ((root = next_northbridge(root, root_ids)) != NULL)
255 root_count++;
256
257 if (root_count) {
258 roots_per_misc = root_count / misc_count;
259
260 /*
261 * There should be _exactly_ N roots for each DF/SMN
262 * interface.
263 */
264 if (!roots_per_misc || (root_count % roots_per_misc)) {
265 pr_info("Unsupported AMD DF/PCI configuration found\n");
266 return -ENODEV;
267 }
268 }
269
270 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200271 if (!nb)
272 return -ENOMEM;
273
274 amd_northbridges.nb = nb;
Woods, Brian556e4c622018-11-06 20:08:16 +0000275 amd_northbridges.num = misc_count;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200276
Yazen Ghannamddfe43c2016-11-10 15:10:56 -0600277 link = misc = root = NULL;
Woods, Brian556e4c622018-11-06 20:08:16 +0000278 for (i = 0; i < amd_northbridges.num; i++) {
Yazen Ghannamddfe43c2016-11-10 15:10:56 -0600279 node_to_amd_nb(i)->root = root =
Pu Wenc6babb52018-09-25 22:46:11 +0800280 next_northbridge(root, root_ids);
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200281 node_to_amd_nb(i)->misc = misc =
Pu Wenc6babb52018-09-25 22:46:11 +0800282 next_northbridge(misc, misc_ids);
Hans Rosenfeld41b26102011-01-24 16:05:42 +0100283 node_to_amd_nb(i)->link = link =
Pu Wenc6babb52018-09-25 22:46:11 +0800284 next_northbridge(link, link_ids);
Woods, Brian556e4c622018-11-06 20:08:16 +0000285
286 /*
287 * If there are more PCI root devices than data fabric/
288 * system management network interfaces, then the (N)
289 * PCI roots per DF/SMN interface are functionally the
290 * same (for DF/SMN access) and N-1 are redundant. N-1
291 * PCI roots should be skipped per DF/SMN interface so
292 * the following DF/SMN interfaces get mapped to
293 * correct PCI roots.
294 */
295 for (j = 1; j < roots_per_misc; j++)
296 root = next_northbridge(root, root_ids);
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -0500297 }
Andi Kleena32073b2006-06-26 13:56:40 +0200298
Aravind Gopalakrishnan1b457422015-04-07 16:46:37 -0500299 if (amd_gart_present())
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200300 amd_northbridges.flags |= AMD_NB_GART;
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200301
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200302 /*
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -0500303 * Check for L3 cache presence.
304 */
305 if (!cpuid_edx(0x80000006))
306 return 0;
307
308 /*
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200309 * Some CPU families support L3 Cache Index Disable. There are some
310 * limitations because of E382 and E388 on family 0x10.
311 */
312 if (boot_cpu_data.x86 == 0x10 &&
313 boot_cpu_data.x86_model >= 0x8 &&
314 (boot_cpu_data.x86_model > 0x9 ||
Jia Zhangb3991512018-01-01 09:52:10 +0800315 boot_cpu_data.x86_stepping >= 0x1))
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200316 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
317
Hans Rosenfeldb453de02011-01-24 16:05:41 +0100318 if (boot_cpu_data.x86 == 0x15)
319 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100321 /* L3 cache partitioning is supported on family 0x15 */
322 if (boot_cpu_data.x86 == 0x15)
323 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
324
Andi Kleena32073b2006-06-26 13:56:40 +0200325 return 0;
326}
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200327EXPORT_SYMBOL_GPL(amd_cache_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +0200328
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100329/*
330 * Ignores subdevice/subvendor but as far as I can figure out
331 * they're useless anyways
332 */
333bool __init early_is_amd_nb(u32 device)
Andi Kleena32073b2006-06-26 13:56:40 +0200334{
Pu Wenc6babb52018-09-25 22:46:11 +0800335 const struct pci_device_id *misc_ids = amd_nb_misc_ids;
Jan Beulich691269f2011-02-09 08:26:53 +0000336 const struct pci_device_id *id;
Andi Kleena32073b2006-06-26 13:56:40 +0200337 u32 vendor = device & 0xffff;
Jan Beulich691269f2011-02-09 08:26:53 +0000338
Pu Wenb7a5cb42018-09-25 22:45:01 +0800339 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
340 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
341 return false;
342
Pu Wenc6babb52018-09-25 22:46:11 +0800343 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
344 misc_ids = hygon_nb_misc_ids;
345
Andi Kleena32073b2006-06-26 13:56:40 +0200346 device >>= 16;
Pu Wenc6babb52018-09-25 22:46:11 +0800347 for (id = misc_ids; id->vendor; id++)
Andi Kleena32073b2006-06-26 13:56:40 +0200348 if (vendor == id->vendor && device == id->device)
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100349 return true;
350 return false;
Andi Kleena32073b2006-06-26 13:56:40 +0200351}
352
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700353struct resource *amd_get_mmconfig_range(struct resource *res)
354{
355 u32 address;
356 u64 base, msr;
Yazen Ghannamde6bd082016-11-10 15:10:54 -0600357 unsigned int segn_busn_bits;
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700358
Pu Wenc6babb52018-09-25 22:46:11 +0800359 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
360 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700361 return NULL;
362
363 /* assume all cpus from fam10h have mmconfig */
Yazen Ghannamde6bd082016-11-10 15:10:54 -0600364 if (boot_cpu_data.x86 < 0x10)
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700365 return NULL;
366
367 address = MSR_FAM10H_MMIO_CONF_BASE;
368 rdmsrl(address, msr);
369
370 /* mmconfig is not enabled */
371 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
372 return NULL;
373
374 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
375
376 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
377 FAM10H_MMIO_CONF_BUSRANGE_MASK;
378
379 res->flags = IORESOURCE_MEM;
380 res->start = base;
381 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
382 return res;
383}
384
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100385int amd_get_subcaches(int cpu)
386{
387 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
388 unsigned int mask;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100389
390 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
391 return 0;
392
393 pci_read_config_dword(link, 0x1d4, &mask);
394
Borislav Petkov8196dab2016-03-25 15:52:36 +0100395 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100396}
397
Dan Carpenter2993ae32014-01-21 10:22:09 +0300398int amd_set_subcaches(int cpu, unsigned long mask)
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100399{
400 static unsigned int reset, ban;
401 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
402 unsigned int reg;
Kevin Winchester141168c2011-12-20 20:52:22 -0400403 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100404
405 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
406 return -EINVAL;
407
408 /* if necessary, collect reset state of L3 partitioning and BAN mode */
409 if (reset == 0) {
410 pci_read_config_dword(nb->link, 0x1d4, &reset);
411 pci_read_config_dword(nb->misc, 0x1b8, &ban);
412 ban &= 0x180000;
413 }
414
415 /* deactivate BAN mode if any subcaches are to be disabled */
416 if (mask != 0xf) {
417 pci_read_config_dword(nb->misc, 0x1b8, &reg);
418 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
419 }
420
Borislav Petkov8196dab2016-03-25 15:52:36 +0100421 cuid = cpu_data(cpu).cpu_core_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100422 mask <<= 4 * cuid;
423 mask |= (0xf ^ (1 << cuid)) << 26;
424
425 pci_write_config_dword(nb->link, 0x1d4, mask);
426
427 /* reset BAN mode if L3 partitioning returned to reset state */
428 pci_read_config_dword(nb->link, 0x1d4, &reg);
429 if (reg == reset) {
430 pci_read_config_dword(nb->misc, 0x1b8, &reg);
431 reg &= ~0x180000;
432 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
433 }
434
435 return 0;
436}
437
Borislav Petkov09c6c302016-06-16 19:13:50 +0200438static void amd_cache_gart(void)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200439{
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100440 u16 i;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200441
Borislav Petkov09c6c302016-06-16 19:13:50 +0200442 if (!amd_nb_has_feature(AMD_NB_GART))
443 return;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200444
Yazen Ghannamc7993892016-11-10 15:10:53 -0600445 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
Borislav Petkov09c6c302016-06-16 19:13:50 +0200446 if (!flush_words) {
447 amd_northbridges.flags &= ~AMD_NB_GART;
448 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
449 return;
450 }
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200451
Yazen Ghannamc7993892016-11-10 15:10:53 -0600452 for (i = 0; i != amd_northbridges.num; i++)
Borislav Petkov09c6c302016-06-16 19:13:50 +0200453 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200454}
455
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200456void amd_flush_garts(void)
Andi Kleena32073b2006-06-26 13:56:40 +0200457{
458 int flushed, i;
459 unsigned long flags;
460 static DEFINE_SPINLOCK(gart_lock);
461
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200462 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200463 return;
464
Yazen Ghannamde6bd082016-11-10 15:10:54 -0600465 /*
466 * Avoid races between AGP and IOMMU. In theory it's not needed
467 * but I'm not sure if the hardware won't lose flush requests
468 * when another is pending. This whole thing is so expensive anyways
469 * that it doesn't matter to serialize more. -AK
470 */
Andi Kleena32073b2006-06-26 13:56:40 +0200471 spin_lock_irqsave(&gart_lock, flags);
472 flushed = 0;
Yazen Ghannamc7993892016-11-10 15:10:53 -0600473 for (i = 0; i < amd_northbridges.num; i++) {
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200474 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
475 flush_words[i] | 1);
Andi Kleena32073b2006-06-26 13:56:40 +0200476 flushed++;
477 }
Yazen Ghannamc7993892016-11-10 15:10:53 -0600478 for (i = 0; i < amd_northbridges.num; i++) {
Andi Kleena32073b2006-06-26 13:56:40 +0200479 u32 w;
480 /* Make sure the hardware actually executed the flush*/
481 for (;;) {
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200482 pci_read_config_dword(node_to_amd_nb(i)->misc,
Andi Kleena32073b2006-06-26 13:56:40 +0200483 0x9c, &w);
484 if (!(w & 1))
485 break;
486 cpu_relax();
487 }
488 }
489 spin_unlock_irqrestore(&gart_lock, flags);
490 if (!flushed)
Joe Perchesc767a542012-05-21 19:50:07 -0700491 pr_notice("nothing to flush?\n");
Andi Kleena32073b2006-06-26 13:56:40 +0200492}
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200493EXPORT_SYMBOL_GPL(amd_flush_garts);
Andi Kleena32073b2006-06-26 13:56:40 +0200494
Borislav Petkovbfc11682017-10-22 12:47:31 +0200495static void __fix_erratum_688(void *info)
496{
497#define MSR_AMD64_IC_CFG 0xC0011021
498
499 msr_set_bit(MSR_AMD64_IC_CFG, 3);
500 msr_set_bit(MSR_AMD64_IC_CFG, 14);
501}
502
503/* Apply erratum 688 fix so machines without a BIOS fix work. */
504static __init void fix_erratum_688(void)
505{
506 struct pci_dev *F4;
507 u32 val;
508
509 if (boot_cpu_data.x86 != 0x14)
510 return;
511
512 if (!amd_northbridges.num)
513 return;
514
515 F4 = node_to_amd_nb(0)->link;
516 if (!F4)
517 return;
518
519 if (pci_read_config_dword(F4, 0x164, &val))
520 return;
521
522 if (val & BIT(2))
523 return;
524
525 on_each_cpu(__fix_erratum_688, NULL, 0);
526
527 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
528}
529
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200530static __init int init_amd_nbs(void)
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100531{
Borislav Petkov09c6c302016-06-16 19:13:50 +0200532 amd_cache_northbridges();
533 amd_cache_gart();
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100534
Borislav Petkovbfc11682017-10-22 12:47:31 +0200535 fix_erratum_688();
536
Borislav Petkov09c6c302016-06-16 19:13:50 +0200537 return 0;
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100538}
539
540/* This has to go after the PCI subsystem */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200541fs_initcall(init_amd_nbs);