blob: 304d8bad6559f94f0e278e80496ca841741574b4 [file] [log] [blame]
Andi Kleena32073b2006-06-26 13:56:40 +02001/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
5#include <linux/gfp.h>
6#include <linux/types.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <asm/k8.h>
12
13int num_k8_northbridges;
14EXPORT_SYMBOL(num_k8_northbridges);
15
16static u32 *flush_words;
17
18struct pci_device_id k8_nb_ids[] = {
Joerg Roedelcf169702008-09-02 13:13:40 +020019 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
Andi Kleena32073b2006-06-26 13:56:40 +020022 {}
23};
24EXPORT_SYMBOL(k8_nb_ids);
25
26struct pci_dev **k8_northbridges;
27EXPORT_SYMBOL(k8_northbridges);
28
29static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
30{
31 do {
32 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
33 if (!dev)
34 break;
35 } while (!pci_match_id(&k8_nb_ids[0], dev));
36 return dev;
37}
38
39int cache_k8_northbridges(void)
40{
41 int i;
42 struct pci_dev *dev;
Ben Collins3c6df2a2007-05-23 13:57:43 -070043
Andi Kleena32073b2006-06-26 13:56:40 +020044 if (num_k8_northbridges)
45 return 0;
46
Andi Kleena32073b2006-06-26 13:56:40 +020047 dev = NULL;
48 while ((dev = next_k8_northbridge(dev)) != NULL)
49 num_k8_northbridges++;
50
51 k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
52 GFP_KERNEL);
53 if (!k8_northbridges)
54 return -ENOMEM;
55
Ben Collins3c6df2a2007-05-23 13:57:43 -070056 if (!num_k8_northbridges) {
57 k8_northbridges[0] = NULL;
58 return 0;
59 }
60
Andi Kleena32073b2006-06-26 13:56:40 +020061 flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
62 if (!flush_words) {
63 kfree(k8_northbridges);
64 return -ENOMEM;
65 }
66
67 dev = NULL;
68 i = 0;
69 while ((dev = next_k8_northbridge(dev)) != NULL) {
Badari Pulavarty6f29e352007-04-13 08:13:42 -070070 k8_northbridges[i] = dev;
71 pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
Andi Kleena32073b2006-06-26 13:56:40 +020072 }
73 k8_northbridges[i] = NULL;
74 return 0;
75}
76EXPORT_SYMBOL_GPL(cache_k8_northbridges);
77
78/* Ignores subdevice/subvendor but as far as I can figure out
79 they're useless anyways */
80int __init early_is_k8_nb(u32 device)
81{
82 struct pci_device_id *id;
83 u32 vendor = device & 0xffff;
84 device >>= 16;
85 for (id = k8_nb_ids; id->vendor; id++)
86 if (vendor == id->vendor && device == id->device)
87 return 1;
88 return 0;
89}
90
91void k8_flush_garts(void)
92{
93 int flushed, i;
94 unsigned long flags;
95 static DEFINE_SPINLOCK(gart_lock);
96
97 /* Avoid races between AGP and IOMMU. In theory it's not needed
98 but I'm not sure if the hardware won't lose flush requests
99 when another is pending. This whole thing is so expensive anyways
100 that it doesn't matter to serialize more. -AK */
101 spin_lock_irqsave(&gart_lock, flags);
102 flushed = 0;
103 for (i = 0; i < num_k8_northbridges; i++) {
104 pci_write_config_dword(k8_northbridges[i], 0x9c,
105 flush_words[i]|1);
106 flushed++;
107 }
108 for (i = 0; i < num_k8_northbridges; i++) {
109 u32 w;
110 /* Make sure the hardware actually executed the flush*/
111 for (;;) {
112 pci_read_config_dword(k8_northbridges[i],
113 0x9c, &w);
114 if (!(w & 1))
115 break;
116 cpu_relax();
117 }
118 }
119 spin_unlock_irqrestore(&gart_lock, flags);
120 if (!flushed)
121 printk("nothing to flush?\n");
122}
123EXPORT_SYMBOL_GPL(k8_flush_garts);
124