blob: 98c759dc0d5dbe1e882a16ebb1a6afea77991838 [file] [log] [blame]
Alan Coxda9bb1d2006-01-18 17:44:13 -08001/*
2 * edac_mc kernel module
Doug Thompson49c0dab72006-07-10 04:45:19 -07003 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
Alan Coxda9bb1d2006-01-18 17:44:13 -08004 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
Alan Coxda9bb1d2006-01-18 17:44:13 -080015#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080028#include <linux/ctype.h>
Dave Jiangc0d12172007-07-19 01:49:46 -070029#include <linux/edac.h>
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030030#include <linux/bitops.h>
Alan Coxda9bb1d2006-01-18 17:44:13 -080031#include <asm/uaccess.h>
32#include <asm/page.h>
33#include <asm/edac.h>
Douglas Thompson20bcb7a2007-07-19 01:49:47 -070034#include "edac_core.h"
Douglas Thompson7c9281d2007-07-19 01:49:33 -070035#include "edac_module.h"
Alan Coxda9bb1d2006-01-18 17:44:13 -080036
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -030037#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
Alan Coxda9bb1d2006-01-18 17:44:13 -080041/* lock to memory controller's control array */
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -070042static DEFINE_MUTEX(mem_ctls_mutex);
Robert P. J. Dayff6ac2a2008-04-29 01:03:17 -070043static LIST_HEAD(mc_devices);
Alan Coxda9bb1d2006-01-18 17:44:13 -080044
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030045unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
46 unsigned len)
47{
48 struct mem_ctl_info *mci = dimm->mci;
49 int i, n, count = 0;
50 char *p = buf;
51
52 for (i = 0; i < mci->n_layers; i++) {
53 n = snprintf(p, len, "%s %d ",
54 edac_layer_name[mci->layers[i].type],
55 dimm->location[i]);
56 p += n;
57 len -= n;
58 count += n;
59 if (!len)
60 break;
61 }
62
63 return count;
64}
65
Alan Coxda9bb1d2006-01-18 17:44:13 -080066#ifdef CONFIG_EDAC_DEBUG
67
Mauro Carvalho Chehaba4b4be32012-01-27 10:26:13 -030068static void edac_mc_dump_channel(struct rank_info *chan)
Alan Coxda9bb1d2006-01-18 17:44:13 -080069{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030070 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
71 edac_dbg(4, " channel = %p\n", chan);
72 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
73 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030074}
75
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030076static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030077{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030078 char location[80];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -030079
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030080 edac_dimm_info_location(dimm, location, sizeof(location));
81
82 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
83 dimm->mci->mem_is_per_rank ? "rank" : "dimm",
84 number, location, dimm->csrow, dimm->cschannel);
85 edac_dbg(4, " dimm = %p\n", dimm);
86 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
87 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
88 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
89 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
Alan Coxda9bb1d2006-01-18 17:44:13 -080090}
91
Adrian Bunk2da1c112007-07-19 01:49:32 -070092static void edac_mc_dump_csrow(struct csrow_info *csrow)
Alan Coxda9bb1d2006-01-18 17:44:13 -080093{
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -030094 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
95 edac_dbg(4, " csrow = %p\n", csrow);
96 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
97 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
98 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
99 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
100 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
101 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800102}
103
Adrian Bunk2da1c112007-07-19 01:49:32 -0700104static void edac_mc_dump_mci(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800105{
Joe Perches956b9ba12012-04-29 17:08:39 -0300106 edac_dbg(3, "\tmci = %p\n", mci);
107 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
108 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
109 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
110 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
111 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
112 mci->nr_csrows, mci->csrows);
113 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
114 mci->tot_dimms, mci->dimms);
115 edac_dbg(3, "\tdev = %p\n", mci->pdev);
116 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
117 mci->mod_name, mci->ctl_name);
118 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800119}
120
Borislav Petkov24f9a7f2010-10-07 18:29:15 +0200121#endif /* CONFIG_EDAC_DEBUG */
122
Borislav Petkov239642f2009-11-12 15:33:16 +0100123/*
124 * keep those in sync with the enum mem_type
125 */
126const char *edac_mem_types[] = {
127 "Empty csrow",
128 "Reserved csrow type",
129 "Unknown csrow type",
130 "Fast page mode RAM",
131 "Extended data out RAM",
132 "Burst Extended data out RAM",
133 "Single data rate SDRAM",
134 "Registered single data rate SDRAM",
135 "Double data rate SDRAM",
136 "Registered Double data rate SDRAM",
137 "Rambus DRAM",
138 "Unbuffered DDR2 RAM",
139 "Fully buffered DDR2",
140 "Registered DDR2 RAM",
141 "Rambus XDR",
142 "Unbuffered DDR3 RAM",
143 "Registered DDR3 RAM",
144};
145EXPORT_SYMBOL_GPL(edac_mem_types);
146
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300147/**
148 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
149 * @p: pointer to a pointer with the memory offset to be used. At
150 * return, this will be incremented to point to the next offset
151 * @size: Size of the data structure to be reserved
152 * @n_elems: Number of elements that should be reserved
Alan Coxda9bb1d2006-01-18 17:44:13 -0800153 *
154 * If 'size' is a constant, the compiler will optimize this whole function
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300155 * down to either a no-op or the addition of a constant to the value of '*p'.
156 *
157 * The 'p' pointer is absolutely needed to keep the proper advancing
158 * further in memory to the proper offsets when allocating the struct along
159 * with its embedded structs, as edac_device_alloc_ctl_info() does it
160 * above, for example.
161 *
162 * At return, the pointer 'p' will be incremented to be used on a next call
163 * to this function.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800164 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300165void *edac_align_ptr(void **p, unsigned size, int n_elems)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800166{
167 unsigned align, r;
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300168 void *ptr = *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800169
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300170 *p += size * n_elems;
171
172 /*
173 * 'p' can possibly be an unaligned item X such that sizeof(X) is
174 * 'size'. Adjust 'p' so that its alignment is at least as
175 * stringent as what the compiler would provide for X and return
176 * the aligned result.
177 * Here we assume that the alignment of a "long long" is the most
Alan Coxda9bb1d2006-01-18 17:44:13 -0800178 * stringent alignment that the compiler will ever provide by default.
179 * As far as I know, this is a reasonable assumption.
180 */
181 if (size > sizeof(long))
182 align = sizeof(long long);
183 else if (size > sizeof(int))
184 align = sizeof(long);
185 else if (size > sizeof(short))
186 align = sizeof(int);
187 else if (size > sizeof(char))
188 align = sizeof(short);
189 else
Douglas Thompson079708b2007-07-19 01:49:58 -0700190 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800191
192 r = size % align;
193
194 if (r == 0)
Douglas Thompson079708b2007-07-19 01:49:58 -0700195 return (char *)ptr;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800196
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300197 *p += align - r;
198
Douglas Thompson7391c6d2007-07-19 01:50:21 -0700199 return (void *)(((unsigned long)ptr) + align - r);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800200}
201
Alan Coxda9bb1d2006-01-18 17:44:13 -0800202/**
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300203 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
204 * @mc_num: Memory controller number
205 * @n_layers: Number of MC hierarchy layers
206 * layers: Describes each layer as seen by the Memory Controller
207 * @size_pvt: size of private storage needed
208 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800209 *
210 * Everything is kmalloc'ed as one big chunk - more efficient.
211 * Only can be used if all structures have the same lifetime - otherwise
212 * you have to allocate and initialize your own structures.
213 *
214 * Use edac_mc_free() to free mc structures allocated by this function.
215 *
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300216 * NOTE: drivers handle multi-rank memories in different ways: in some
217 * drivers, one multi-rank memory stick is mapped as one entry, while, in
218 * others, a single multi-rank memory stick would be mapped into several
219 * entries. Currently, this function will allocate multiple struct dimm_info
220 * on such scenarios, as grouping the multiple ranks require drivers change.
221 *
Alan Coxda9bb1d2006-01-18 17:44:13 -0800222 * Returns:
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300223 * On failure: NULL
224 * On success: struct mem_ctl_info pointer
Alan Coxda9bb1d2006-01-18 17:44:13 -0800225 */
Mauro Carvalho Chehabca0907b2012-05-02 14:37:00 -0300226struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
227 unsigned n_layers,
228 struct edac_mc_layer *layers,
229 unsigned sz_pvt)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800230{
231 struct mem_ctl_info *mci;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300232 struct edac_mc_layer *layer;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300233 struct csrow_info *csr;
234 struct rank_info *chan;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300235 struct dimm_info *dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300236 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
237 unsigned pos[EDAC_MAX_LAYERS];
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300238 unsigned size, tot_dimms = 1, count = 1;
239 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300240 void *pvt, *p, *ptr = NULL;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300241 int i, j, row, chn, n, len, off;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300242 bool per_rank = false;
243
244 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
245 /*
246 * Calculate the total amount of dimms and csrows/cschannels while
247 * in the old API emulation mode
248 */
249 for (i = 0; i < n_layers; i++) {
250 tot_dimms *= layers[i].size;
251 if (layers[i].is_virt_csrow)
252 tot_csrows *= layers[i].size;
253 else
254 tot_channels *= layers[i].size;
255
256 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
257 per_rank = true;
258 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800259
260 /* Figure out the offsets of the various items from the start of an mc
261 * structure. We want the alignment of each item to be at least as
262 * stringent as what the compiler would provide if we could simply
263 * hardcode everything into a single struct.
264 */
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300265 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300266 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300267 for (i = 0; i < n_layers; i++) {
268 count *= layers[i].size;
Joe Perches956b9ba12012-04-29 17:08:39 -0300269 edac_dbg(4, "errcount layer %d size %d\n", i, count);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300270 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
271 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
272 tot_errcount += 2 * count;
273 }
274
Joe Perches956b9ba12012-04-29 17:08:39 -0300275 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
Mauro Carvalho Chehab93e4fe62012-04-16 10:18:12 -0300276 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
Douglas Thompson079708b2007-07-19 01:49:58 -0700277 size = ((unsigned long)pvt) + sz_pvt;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800278
Joe Perches956b9ba12012-04-29 17:08:39 -0300279 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
280 size,
281 tot_dimms,
282 per_rank ? "ranks" : "dimms",
283 tot_csrows * tot_channels);
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300284
Doug Thompson8096cfa2007-07-19 01:50:27 -0700285 mci = kzalloc(size, GFP_KERNEL);
286 if (mci == NULL)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800287 return NULL;
288
289 /* Adjust pointers so they point within the memory we just allocated
290 * rather than an imaginary chunk of memory located at address 0.
291 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300292 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300293 for (i = 0; i < n_layers; i++) {
294 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
295 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
296 }
Douglas Thompson079708b2007-07-19 01:49:58 -0700297 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800298
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700299 /* setup index and various internal pointers */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300300 mci->mc_idx = mc_num;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300301 mci->tot_dimms = tot_dimms;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800302 mci->pvt_info = pvt;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300303 mci->n_layers = n_layers;
304 mci->layers = layer;
305 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
306 mci->nr_csrows = tot_csrows;
307 mci->num_cschannel = tot_channels;
308 mci->mem_is_per_rank = per_rank;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800309
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300310 /*
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300311 * Alocate and fill the csrow/channels structs
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300312 */
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300313 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
314 if (!mci->csrows)
315 goto error;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300316 for (row = 0; row < tot_csrows; row++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300317 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
318 if (!csr)
319 goto error;
320 mci->csrows[row] = csr;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300321 csr->csrow_idx = row;
322 csr->mci = mci;
323 csr->nr_channels = tot_channels;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300324 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
325 GFP_KERNEL);
326 if (!csr->channels)
327 goto error;
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300328
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300329 for (chn = 0; chn < tot_channels; chn++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300330 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
331 if (!chan)
332 goto error;
333 csr->channels[chn] = chan;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800334 chan->chan_idx = chn;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300335 chan->csrow = csr;
336 }
337 }
Mauro Carvalho Chehaba7d7d2e2012-01-27 14:12:32 -0300338
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300339 /*
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300340 * Allocate and fill the dimm structs
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300341 */
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300342 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
343 if (!mci->dimms)
344 goto error;
345
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300346 memset(&pos, 0, sizeof(pos));
347 row = 0;
348 chn = 0;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300349 for (i = 0; i < tot_dimms; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300350 chan = mci->csrows[row]->channels[chn];
351 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
352 if (off < 0 || off >= tot_dimms) {
353 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
354 goto error;
355 }
356
357 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
358 mci->dimms[off] = dimm;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300359 dimm->mci = mci;
360
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300361 /*
362 * Copy DIMM location and initialize it.
363 */
364 len = sizeof(dimm->label);
365 p = dimm->label;
366 n = snprintf(p, len, "mc#%u", mc_num);
367 p += n;
368 len -= n;
369 for (j = 0; j < n_layers; j++) {
370 n = snprintf(p, len, "%s#%u",
371 edac_layer_name[layers[j].type],
372 pos[j]);
373 p += n;
374 len -= n;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300375 dimm->location[j] = pos[j];
376
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300377 if (len <= 0)
378 break;
379 }
380
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300381 /* Link it to the csrows old API data */
382 chan->dimm = dimm;
383 dimm->csrow = row;
384 dimm->cschannel = chn;
385
386 /* Increment csrow location */
387 row++;
388 if (row == tot_csrows) {
389 row = 0;
390 chn++;
391 }
392
393 /* Increment dimm location */
394 for (j = n_layers - 1; j >= 0; j--) {
395 pos[j]++;
396 if (pos[j] < layers[j].size)
397 break;
398 pos[j] = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800399 }
400 }
401
Dave Jiang81d87cb2007-07-19 01:49:52 -0700402 mci->op_state = OP_ALLOC;
403
Doug Thompson8096cfa2007-07-19 01:50:27 -0700404 /* at this point, the root kobj is valid, and in order to
405 * 'free' the object, then the function:
406 * edac_mc_unregister_sysfs_main_kobj() must be called
407 * which will perform kobj unregistration and the actual free
408 * will occur during the kobject callback operation
409 */
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300410
Alan Coxda9bb1d2006-01-18 17:44:13 -0800411 return mci;
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300412
413error:
414 if (mci->dimms) {
415 for (i = 0; i < tot_dimms; i++)
416 kfree(mci->dimms[i]);
417 kfree(mci->dimms);
418 }
419 if (mci->csrows) {
420 for (chn = 0; chn < tot_channels; chn++) {
421 csr = mci->csrows[chn];
422 if (csr) {
423 for (chn = 0; chn < tot_channels; chn++)
424 kfree(csr->channels[chn]);
425 kfree(csr);
426 }
427 kfree(mci->csrows[i]);
428 }
429 kfree(mci->csrows);
430 }
431 kfree(mci);
432
433 return NULL;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800434}
Dave Peterson91105402006-03-26 01:38:55 -0800435EXPORT_SYMBOL_GPL(edac_mc_alloc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800436
Alan Coxda9bb1d2006-01-18 17:44:13 -0800437/**
Doug Thompson8096cfa2007-07-19 01:50:27 -0700438 * edac_mc_free
439 * 'Free' a previously allocated 'mci' structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800440 * @mci: pointer to a struct mem_ctl_info structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800441 */
442void edac_mc_free(struct mem_ctl_info *mci)
443{
Joe Perches956b9ba12012-04-29 17:08:39 -0300444 edac_dbg(1, "\n");
Mauro Carvalho Chehabbbc560a2010-08-16 18:22:43 -0300445
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300446 /* the mci instance is freed here, when the sysfs object is dropped */
Mauro Carvalho Chehab7a623c02012-04-16 16:41:11 -0300447 edac_unregister_sysfs(mci);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800448}
Dave Peterson91105402006-03-26 01:38:55 -0800449EXPORT_SYMBOL_GPL(edac_mc_free);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800450
Doug Thompsonbce19682007-07-26 10:41:14 -0700451
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300452/**
Doug Thompsonbce19682007-07-26 10:41:14 -0700453 * find_mci_by_dev
454 *
455 * scan list of controllers looking for the one that manages
456 * the 'dev' device
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300457 * @dev: pointer to a struct device related with the MCI
Doug Thompsonbce19682007-07-26 10:41:14 -0700458 */
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300459struct mem_ctl_info *find_mci_by_dev(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800460{
461 struct mem_ctl_info *mci;
462 struct list_head *item;
463
Joe Perches956b9ba12012-04-29 17:08:39 -0300464 edac_dbg(3, "\n");
Alan Coxda9bb1d2006-01-18 17:44:13 -0800465
466 list_for_each(item, &mc_devices) {
467 mci = list_entry(item, struct mem_ctl_info, link);
468
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300469 if (mci->pdev == dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800470 return mci;
471 }
472
473 return NULL;
474}
Mauro Carvalho Chehab939747bd2010-08-10 11:22:01 -0300475EXPORT_SYMBOL_GPL(find_mci_by_dev);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800476
Dave Jiang81d87cb2007-07-19 01:49:52 -0700477/*
478 * handler for EDAC to check if NMI type handler has asserted interrupt
479 */
480static int edac_mc_assert_error_check_and_clear(void)
481{
Dave Jiang66ee2f92007-07-19 01:49:54 -0700482 int old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700483
Douglas Thompson079708b2007-07-19 01:49:58 -0700484 if (edac_op_state == EDAC_OPSTATE_POLL)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700485 return 1;
486
Dave Jiang66ee2f92007-07-19 01:49:54 -0700487 old_state = edac_err_assert;
488 edac_err_assert = 0;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700489
Dave Jiang66ee2f92007-07-19 01:49:54 -0700490 return old_state;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700491}
492
493/*
494 * edac_mc_workq_function
495 * performs the operation scheduled by a workq request
496 */
Dave Jiang81d87cb2007-07-19 01:49:52 -0700497static void edac_mc_workq_function(struct work_struct *work_req)
498{
Jean Delvarefbeb4382009-04-13 14:40:21 -0700499 struct delayed_work *d_work = to_delayed_work(work_req);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700500 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700501
502 mutex_lock(&mem_ctls_mutex);
503
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700504 /* if this control struct has movd to offline state, we are done */
505 if (mci->op_state == OP_OFFLINE) {
506 mutex_unlock(&mem_ctls_mutex);
507 return;
508 }
509
Dave Jiang81d87cb2007-07-19 01:49:52 -0700510 /* Only poll controllers that are running polled and have a check */
511 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
512 mci->edac_check(mci);
513
Dave Jiang81d87cb2007-07-19 01:49:52 -0700514 mutex_unlock(&mem_ctls_mutex);
515
516 /* Reschedule */
Dave Jiang4de78c62007-07-19 01:49:54 -0700517 queue_delayed_work(edac_workqueue, &mci->work,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700518 msecs_to_jiffies(edac_mc_get_poll_msec()));
Dave Jiang81d87cb2007-07-19 01:49:52 -0700519}
520
521/*
522 * edac_mc_workq_setup
523 * initialize a workq item for this mci
524 * passing in the new delay period in msec
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700525 *
526 * locking model:
527 *
528 * called with the mem_ctls_mutex held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700529 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700530static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700531{
Joe Perches956b9ba12012-04-29 17:08:39 -0300532 edac_dbg(0, "\n");
Dave Jiang81d87cb2007-07-19 01:49:52 -0700533
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700534 /* if this instance is not in the POLL state, then simply return */
535 if (mci->op_state != OP_RUNNING_POLL)
536 return;
537
Dave Jiang81d87cb2007-07-19 01:49:52 -0700538 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700539 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
540}
541
542/*
543 * edac_mc_workq_teardown
544 * stop the workq processing on this mci
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700545 *
546 * locking model:
547 *
548 * called WITHOUT lock held
Dave Jiang81d87cb2007-07-19 01:49:52 -0700549 */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700550static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700551{
552 int status;
553
Borislav Petkov00740c52010-09-26 12:42:23 +0200554 if (mci->op_state != OP_RUNNING_POLL)
555 return;
556
Doug Thompsonbce19682007-07-26 10:41:14 -0700557 status = cancel_delayed_work(&mci->work);
558 if (status == 0) {
Joe Perches956b9ba12012-04-29 17:08:39 -0300559 edac_dbg(0, "not canceled, flush the queue\n");
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700560
Doug Thompsonbce19682007-07-26 10:41:14 -0700561 /* workq instance might be running, wait for it */
562 flush_workqueue(edac_workqueue);
Dave Jiang81d87cb2007-07-19 01:49:52 -0700563 }
564}
565
566/*
Doug Thompsonbce19682007-07-26 10:41:14 -0700567 * edac_mc_reset_delay_period(unsigned long value)
568 *
569 * user space has updated our poll period value, need to
570 * reset our workq delays
Dave Jiang81d87cb2007-07-19 01:49:52 -0700571 */
Doug Thompsonbce19682007-07-26 10:41:14 -0700572void edac_mc_reset_delay_period(int value)
Dave Jiang81d87cb2007-07-19 01:49:52 -0700573{
Doug Thompsonbce19682007-07-26 10:41:14 -0700574 struct mem_ctl_info *mci;
575 struct list_head *item;
Dave Jiang81d87cb2007-07-19 01:49:52 -0700576
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700577 mutex_lock(&mem_ctls_mutex);
578
Doug Thompsonbce19682007-07-26 10:41:14 -0700579 /* scan the list and turn off all workq timers, doing so under lock
580 */
581 list_for_each(item, &mc_devices) {
582 mci = list_entry(item, struct mem_ctl_info, link);
583
584 if (mci->op_state == OP_RUNNING_POLL)
585 cancel_delayed_work(&mci->work);
586 }
587
588 mutex_unlock(&mem_ctls_mutex);
589
590
591 /* re-walk the list, and reset the poll delay */
592 mutex_lock(&mem_ctls_mutex);
593
594 list_for_each(item, &mc_devices) {
595 mci = list_entry(item, struct mem_ctl_info, link);
596
597 edac_mc_workq_setup(mci, (unsigned long) value);
598 }
Dave Jiang81d87cb2007-07-19 01:49:52 -0700599
600 mutex_unlock(&mem_ctls_mutex);
601}
602
Doug Thompsonbce19682007-07-26 10:41:14 -0700603
604
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700605/* Return 0 on success, 1 on failure.
606 * Before calling this function, caller must
607 * assign a unique value to mci->mc_idx.
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700608 *
609 * locking model:
610 *
611 * called with the mem_ctls_mutex lock held
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700612 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700613static int add_mc_to_global_list(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800614{
615 struct list_head *item, *insert_before;
616 struct mem_ctl_info *p;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800617
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700618 insert_before = &mc_devices;
619
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300620 p = find_mci_by_dev(mci->pdev);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700621 if (unlikely(p != NULL))
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700622 goto fail0;
623
624 list_for_each(item, &mc_devices) {
625 p = list_entry(item, struct mem_ctl_info, link);
626
627 if (p->mc_idx >= mci->mc_idx) {
628 if (unlikely(p->mc_idx == mci->mc_idx))
629 goto fail1;
630
631 insert_before = item;
632 break;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800633 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800634 }
635
636 list_add_tail_rcu(&mci->link, insert_before);
Dave Jiangc0d12172007-07-19 01:49:46 -0700637 atomic_inc(&edac_handlers);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800638 return 0;
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700639
Douglas Thompson052dfb42007-07-19 01:50:13 -0700640fail0:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700641 edac_printk(KERN_WARNING, EDAC_MC,
Mauro Carvalho Chehabfd687502012-03-16 07:44:18 -0300642 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000643 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700644 return 1;
645
Douglas Thompson052dfb42007-07-19 01:50:13 -0700646fail1:
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700647 edac_printk(KERN_WARNING, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700648 "bug in low-level driver: attempt to assign\n"
649 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
Doug Thompson2d7bbb92006-06-30 01:56:08 -0700650 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800651}
652
Dave Petersone7ecd892006-03-26 01:38:52 -0800653static void del_mc_from_global_list(struct mem_ctl_info *mci)
Dave Petersona1d03fc2006-03-26 01:38:46 -0800654{
Dave Jiangc0d12172007-07-19 01:49:46 -0700655 atomic_dec(&edac_handlers);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800656 list_del_rcu(&mci->link);
Lai Jiangshane2e77092011-05-26 16:25:58 -0700657
658 /* these are for safe removal of devices from global list while
659 * NMI handlers may be traversing list
660 */
661 synchronize_rcu();
662 INIT_LIST_HEAD(&mci->link);
Dave Petersona1d03fc2006-03-26 01:38:46 -0800663}
664
Alan Coxda9bb1d2006-01-18 17:44:13 -0800665/**
Douglas Thompson5da08312007-07-19 01:49:31 -0700666 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
667 *
668 * If found, return a pointer to the structure.
669 * Else return NULL.
670 *
671 * Caller must hold mem_ctls_mutex.
672 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700673struct mem_ctl_info *edac_mc_find(int idx)
Douglas Thompson5da08312007-07-19 01:49:31 -0700674{
675 struct list_head *item;
676 struct mem_ctl_info *mci;
677
678 list_for_each(item, &mc_devices) {
679 mci = list_entry(item, struct mem_ctl_info, link);
680
681 if (mci->mc_idx >= idx) {
682 if (mci->mc_idx == idx)
683 return mci;
684
685 break;
686 }
687 }
688
689 return NULL;
690}
691EXPORT_SYMBOL(edac_mc_find);
692
693/**
Dave Peterson472678e2006-03-26 01:38:49 -0800694 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
695 * create sysfs entries associated with mci structure
Alan Coxda9bb1d2006-01-18 17:44:13 -0800696 * @mci: pointer to the mci structure to be added to the list
697 *
698 * Return:
699 * 0 Success
700 * !0 Failure
701 */
702
703/* FIXME - should a warning be printed if no error detection? correction? */
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700704int edac_mc_add_mc(struct mem_ctl_info *mci)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800705{
Joe Perches956b9ba12012-04-29 17:08:39 -0300706 edac_dbg(0, "\n");
Doug Thompsonb8f6f972007-07-19 01:50:26 -0700707
Alan Coxda9bb1d2006-01-18 17:44:13 -0800708#ifdef CONFIG_EDAC_DEBUG
709 if (edac_debug_level >= 3)
710 edac_mc_dump_mci(mci);
Dave Petersone7ecd892006-03-26 01:38:52 -0800711
Alan Coxda9bb1d2006-01-18 17:44:13 -0800712 if (edac_debug_level >= 4) {
713 int i;
714
715 for (i = 0; i < mci->nr_csrows; i++) {
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300716 struct csrow_info *csrow = mci->csrows[i];
717 u32 nr_pages = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800718 int j;
Dave Petersone7ecd892006-03-26 01:38:52 -0800719
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300720 for (j = 0; j < csrow->nr_channels; j++)
721 nr_pages += csrow->channels[j]->dimm->nr_pages;
722 if (!nr_pages)
723 continue;
724 edac_mc_dump_csrow(csrow);
725 for (j = 0; j < csrow->nr_channels; j++)
726 if (csrow->channels[j]->dimm->nr_pages)
727 edac_mc_dump_channel(csrow->channels[j]);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800728 }
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300729 for (i = 0; i < mci->tot_dimms; i++)
Mauro Carvalho Chehab6e84d352012-04-30 10:24:43 -0300730 if (mci->dimms[i]->nr_pages)
731 edac_mc_dump_dimm(mci->dimms[i], i);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800732 }
733#endif
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700734 mutex_lock(&mem_ctls_mutex);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800735
736 if (add_mc_to_global_list(mci))
Dave Peterson028a7b62006-03-26 01:38:47 -0800737 goto fail0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800738
739 /* set load time so that error rate can be tracked */
740 mci->start_time = jiffies;
741
eric wollesen9794f332007-02-12 00:53:08 -0800742 if (edac_create_sysfs_mci_device(mci)) {
743 edac_mc_printk(mci, KERN_WARNING,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700744 "failed to create sysfs device\n");
eric wollesen9794f332007-02-12 00:53:08 -0800745 goto fail1;
746 }
Alan Coxda9bb1d2006-01-18 17:44:13 -0800747
Dave Jiang81d87cb2007-07-19 01:49:52 -0700748 /* If there IS a check routine, then we are running POLLED */
749 if (mci->edac_check != NULL) {
750 /* This instance is NOW RUNNING */
751 mci->op_state = OP_RUNNING_POLL;
752
753 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
754 } else {
755 mci->op_state = OP_RUNNING_INTERRUPT;
756 }
757
Alan Coxda9bb1d2006-01-18 17:44:13 -0800758 /* Report action taken */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700759 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000760 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Alan Coxda9bb1d2006-01-18 17:44:13 -0800761
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700762 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800763 return 0;
764
Douglas Thompson052dfb42007-07-19 01:50:13 -0700765fail1:
Dave Peterson028a7b62006-03-26 01:38:47 -0800766 del_mc_from_global_list(mci);
767
Douglas Thompson052dfb42007-07-19 01:50:13 -0700768fail0:
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700769 mutex_unlock(&mem_ctls_mutex);
Dave Peterson028a7b62006-03-26 01:38:47 -0800770 return 1;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800771}
Dave Peterson91105402006-03-26 01:38:55 -0800772EXPORT_SYMBOL_GPL(edac_mc_add_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800773
Alan Coxda9bb1d2006-01-18 17:44:13 -0800774/**
Dave Peterson472678e2006-03-26 01:38:49 -0800775 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
776 * remove mci structure from global list
Doug Thompson37f04582006-06-30 01:56:07 -0700777 * @pdev: Pointer to 'struct device' representing mci structure to remove.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800778 *
Dave Peterson18dbc332006-03-26 01:38:50 -0800779 * Return pointer to removed mci structure, or NULL if device not found.
Alan Coxda9bb1d2006-01-18 17:44:13 -0800780 */
Douglas Thompson079708b2007-07-19 01:49:58 -0700781struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800782{
Dave Peterson18dbc332006-03-26 01:38:50 -0800783 struct mem_ctl_info *mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800784
Joe Perches956b9ba12012-04-29 17:08:39 -0300785 edac_dbg(0, "\n");
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700786
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700787 mutex_lock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800788
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700789 /* find the requested mci struct in the global list */
790 mci = find_mci_by_dev(dev);
791 if (mci == NULL) {
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700792 mutex_unlock(&mem_ctls_mutex);
Dave Peterson18dbc332006-03-26 01:38:50 -0800793 return NULL;
794 }
795
Alan Coxda9bb1d2006-01-18 17:44:13 -0800796 del_mc_from_global_list(mci);
Matthias Kaehlcke63b7df92007-07-19 01:49:38 -0700797 mutex_unlock(&mem_ctls_mutex);
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700798
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100799 /* flush workq processes */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700800 edac_mc_workq_teardown(mci);
Borislav Petkovbb31b3122010-12-02 17:48:35 +0100801
802 /* marking MCI offline */
803 mci->op_state = OP_OFFLINE;
804
805 /* remove from sysfs */
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700806 edac_remove_sysfs_mci_device(mci);
807
Dave Peterson537fba22006-03-26 01:38:40 -0800808 edac_printk(KERN_INFO, EDAC_MC,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700809 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
Stephen Rothwell17aa7e02008-05-05 13:54:19 +1000810 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
Doug Thompsonbf52fa42007-07-19 01:50:30 -0700811
Dave Peterson18dbc332006-03-26 01:38:50 -0800812 return mci;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800813}
Dave Peterson91105402006-03-26 01:38:55 -0800814EXPORT_SYMBOL_GPL(edac_mc_del_mc);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800815
Adrian Bunk2da1c112007-07-19 01:49:32 -0700816static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
817 u32 size)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800818{
819 struct page *pg;
820 void *virt_addr;
821 unsigned long flags = 0;
822
Joe Perches956b9ba12012-04-29 17:08:39 -0300823 edac_dbg(3, "\n");
Alan Coxda9bb1d2006-01-18 17:44:13 -0800824
825 /* ECC error page was not in our memory. Ignore it. */
Douglas Thompson079708b2007-07-19 01:49:58 -0700826 if (!pfn_valid(page))
Alan Coxda9bb1d2006-01-18 17:44:13 -0800827 return;
828
829 /* Find the actual page structure then map it and fix */
830 pg = pfn_to_page(page);
831
832 if (PageHighMem(pg))
833 local_irq_save(flags);
834
Cong Wang4e5df7c2011-11-25 23:14:19 +0800835 virt_addr = kmap_atomic(pg);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800836
837 /* Perform architecture specific atomic scrub operation */
838 atomic_scrub(virt_addr + offset, size);
839
840 /* Unmap and complete */
Cong Wang4e5df7c2011-11-25 23:14:19 +0800841 kunmap_atomic(virt_addr);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800842
843 if (PageHighMem(pg))
844 local_irq_restore(flags);
845}
846
Alan Coxda9bb1d2006-01-18 17:44:13 -0800847/* FIXME - should return -1 */
Dave Petersone7ecd892006-03-26 01:38:52 -0800848int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800849{
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300850 struct csrow_info **csrows = mci->csrows;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300851 int row, i, j, n;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800852
Joe Perches956b9ba12012-04-29 17:08:39 -0300853 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800854 row = -1;
855
856 for (i = 0; i < mci->nr_csrows; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300857 struct csrow_info *csrow = csrows[i];
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300858 n = 0;
859 for (j = 0; j < csrow->nr_channels; j++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -0300860 struct dimm_info *dimm = csrow->channels[j]->dimm;
Mauro Carvalho Chehaba895bf82012-01-28 09:09:38 -0300861 n += dimm->nr_pages;
862 }
863 if (n == 0)
Alan Coxda9bb1d2006-01-18 17:44:13 -0800864 continue;
865
Joe Perches956b9ba12012-04-29 17:08:39 -0300866 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
867 mci->mc_idx,
868 csrow->first_page, page, csrow->last_page,
869 csrow->page_mask);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800870
871 if ((page >= csrow->first_page) &&
872 (page <= csrow->last_page) &&
873 ((page & csrow->page_mask) ==
874 (csrow->first_page & csrow->page_mask))) {
875 row = i;
876 break;
877 }
878 }
879
880 if (row == -1)
Dave Peterson537fba22006-03-26 01:38:40 -0800881 edac_mc_printk(mci, KERN_ERR,
Douglas Thompson052dfb42007-07-19 01:50:13 -0700882 "could not look up page error address %lx\n",
883 (unsigned long)page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800884
885 return row;
886}
Dave Peterson91105402006-03-26 01:38:55 -0800887EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800888
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300889const char *edac_layer_name[] = {
890 [EDAC_MC_LAYER_BRANCH] = "branch",
891 [EDAC_MC_LAYER_CHANNEL] = "channel",
892 [EDAC_MC_LAYER_SLOT] = "slot",
893 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
894};
895EXPORT_SYMBOL_GPL(edac_layer_name);
896
897static void edac_inc_ce_error(struct mem_ctl_info *mci,
898 bool enable_per_layer_report,
899 const int pos[EDAC_MAX_LAYERS])
Alan Coxda9bb1d2006-01-18 17:44:13 -0800900{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300901 int i, index = 0;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800902
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300903 mci->ce_mc++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300904
905 if (!enable_per_layer_report) {
906 mci->ce_noinfo_count++;
907 return;
908 }
909
910 for (i = 0; i < mci->n_layers; i++) {
911 if (pos[i] < 0)
912 break;
913 index += pos[i];
914 mci->ce_per_layer[i][index]++;
915
916 if (i < mci->n_layers - 1)
917 index *= mci->layers[i + 1].size;
918 }
919}
920
921static void edac_inc_ue_error(struct mem_ctl_info *mci,
922 bool enable_per_layer_report,
923 const int pos[EDAC_MAX_LAYERS])
924{
925 int i, index = 0;
926
Mauro Carvalho Chehab5926ff52012-02-09 11:05:20 -0300927 mci->ue_mc++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300928
929 if (!enable_per_layer_report) {
930 mci->ce_noinfo_count++;
931 return;
932 }
933
934 for (i = 0; i < mci->n_layers; i++) {
935 if (pos[i] < 0)
936 break;
937 index += pos[i];
938 mci->ue_per_layer[i][index]++;
939
940 if (i < mci->n_layers - 1)
941 index *= mci->layers[i + 1].size;
942 }
943}
944
945static void edac_ce_error(struct mem_ctl_info *mci,
946 const int pos[EDAC_MAX_LAYERS],
947 const char *msg,
948 const char *location,
949 const char *label,
950 const char *detail,
951 const char *other_detail,
952 const bool enable_per_layer_report,
953 const unsigned long page_frame_number,
954 const unsigned long offset_in_page,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300955 long grain)
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300956{
957 unsigned long remapped_page;
958
959 if (edac_mc_get_log_ce()) {
960 if (other_detail && *other_detail)
961 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300962 "CE %s on %s (%s %s - %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300963 msg, label, location,
964 detail, other_detail);
965 else
966 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -0300967 "CE %s on %s (%s %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300968 msg, label, location,
969 detail);
970 }
971 edac_inc_ce_error(mci, enable_per_layer_report, pos);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800972
973 if (mci->scrub_mode & SCRUB_SW_SRC) {
974 /*
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300975 * Some memory controllers (called MCs below) can remap
976 * memory so that it is still available at a different
977 * address when PCI devices map into memory.
978 * MC's that can't do this, lose the memory where PCI
979 * devices are mapped. This mapping is MC-dependent
980 * and so we call back into the MC driver for it to
981 * map the MC page to a physical (CPU) page which can
982 * then be mapped to a virtual page - which can then
983 * be scrubbed.
984 */
Alan Coxda9bb1d2006-01-18 17:44:13 -0800985 remapped_page = mci->ctl_page_to_phys ?
Douglas Thompson052dfb42007-07-19 01:50:13 -0700986 mci->ctl_page_to_phys(mci, page_frame_number) :
987 page_frame_number;
Alan Coxda9bb1d2006-01-18 17:44:13 -0800988
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300989 edac_mc_scrub_block(remapped_page,
990 offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -0800991 }
992}
993
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -0300994static void edac_ue_error(struct mem_ctl_info *mci,
995 const int pos[EDAC_MAX_LAYERS],
996 const char *msg,
997 const char *location,
998 const char *label,
999 const char *detail,
1000 const char *other_detail,
1001 const bool enable_per_layer_report)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001002{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001003 if (edac_mc_get_log_ue()) {
1004 if (other_detail && *other_detail)
1005 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001006 "UE %s on %s (%s %s - %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001007 msg, label, location, detail,
1008 other_detail);
1009 else
1010 edac_mc_printk(mci, KERN_WARNING,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001011 "UE %s on %s (%s %s)\n",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001012 msg, label, location, detail);
1013 }
Dave Petersone7ecd892006-03-26 01:38:52 -08001014
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001015 if (edac_mc_get_panic_on_ue()) {
1016 if (other_detail && *other_detail)
1017 panic("UE %s on %s (%s%s - %s)\n",
1018 msg, label, location, detail, other_detail);
1019 else
1020 panic("UE %s on %s (%s%s)\n",
1021 msg, label, location, detail);
1022 }
1023
1024 edac_inc_ue_error(mci, enable_per_layer_report, pos);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001025}
1026
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001027#define OTHER_LABEL " or "
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001028
1029/**
1030 * edac_mc_handle_error - reports a memory event to userspace
1031 *
1032 * @type: severity of the error (CE/UE/Fatal)
1033 * @mci: a struct mem_ctl_info pointer
1034 * @page_frame_number: mem page where the error occurred
1035 * @offset_in_page: offset of the error inside the page
1036 * @syndrome: ECC syndrome
1037 * @top_layer: Memory layer[0] position
1038 * @mid_layer: Memory layer[1] position
1039 * @low_layer: Memory layer[2] position
1040 * @msg: Message meaningful to the end users that
1041 * explains the event
1042 * @other_detail: Technical details about the event that
1043 * may help hardware manufacturers and
1044 * EDAC developers to analyse the event
1045 * @arch_log: Architecture-specific struct that can
1046 * be used to add extended information to the
1047 * tracepoint, like dumping MCE registers.
1048 */
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001049void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1050 struct mem_ctl_info *mci,
1051 const unsigned long page_frame_number,
1052 const unsigned long offset_in_page,
1053 const unsigned long syndrome,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001054 const int top_layer,
1055 const int mid_layer,
1056 const int low_layer,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001057 const char *msg,
1058 const char *other_detail,
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001059 const void *arch_log)
Alan Coxda9bb1d2006-01-18 17:44:13 -08001060{
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001061 /* FIXME: too much for stack: move it to some pre-alocated area */
1062 char detail[80], location[80];
1063 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1064 char *p;
1065 int row = -1, chan = -1;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001066 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001067 int i;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001068 long grain;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001069 bool enable_per_layer_report = false;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001070 u16 error_count; /* FIXME: make it a parameter */
1071 u8 grain_bits;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001072
Joe Perches956b9ba12012-04-29 17:08:39 -03001073 edac_dbg(3, "MC%d\n", mci->mc_idx);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001074
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001075 /*
1076 * Check if the event report is consistent and if the memory
1077 * location is known. If it is known, enable_per_layer_report will be
1078 * true, the DIMM(s) label info will be filled and the per-layer
1079 * error counters will be incremented.
1080 */
1081 for (i = 0; i < mci->n_layers; i++) {
1082 if (pos[i] >= (int)mci->layers[i].size) {
1083 if (type == HW_EVENT_ERR_CORRECTED)
1084 p = "CE";
1085 else
1086 p = "UE";
1087
1088 edac_mc_printk(mci, KERN_ERR,
1089 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1090 edac_layer_name[mci->layers[i].type],
1091 pos[i], mci->layers[i].size);
1092 /*
1093 * Instead of just returning it, let's use what's
1094 * known about the error. The increment routines and
1095 * the DIMM filter logic will do the right thing by
1096 * pointing the likely damaged DIMMs.
1097 */
1098 pos[i] = -1;
1099 }
1100 if (pos[i] >= 0)
1101 enable_per_layer_report = true;
Alan Coxda9bb1d2006-01-18 17:44:13 -08001102 }
1103
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001104 /*
1105 * Get the dimm label/grain that applies to the match criteria.
1106 * As the error algorithm may not be able to point to just one memory
1107 * stick, the logic here will get all possible labels that could
1108 * pottentially be affected by the error.
1109 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1110 * to have only the MC channel and the MC dimm (also called "branch")
1111 * but the channel is not known, as the memory is arranged in pairs,
1112 * where each memory belongs to a separate channel within the same
1113 * branch.
1114 */
1115 grain = 0;
1116 p = label;
1117 *p = '\0';
1118 for (i = 0; i < mci->tot_dimms; i++) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -03001119 struct dimm_info *dimm = mci->dimms[i];
Dave Petersone7ecd892006-03-26 01:38:52 -08001120
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001121 if (top_layer >= 0 && top_layer != dimm->location[0])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001122 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001123 if (mid_layer >= 0 && mid_layer != dimm->location[1])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001124 continue;
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001125 if (low_layer >= 0 && low_layer != dimm->location[2])
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001126 continue;
1127
1128 /* get the max grain, over the error match range */
1129 if (dimm->grain > grain)
1130 grain = dimm->grain;
1131
1132 /*
1133 * If the error is memory-controller wide, there's no need to
1134 * seek for the affected DIMMs because the whole
1135 * channel/memory controller/... may be affected.
1136 * Also, don't show errors for empty DIMM slots.
1137 */
1138 if (enable_per_layer_report && dimm->nr_pages) {
1139 if (p != label) {
1140 strcpy(p, OTHER_LABEL);
1141 p += strlen(OTHER_LABEL);
1142 }
1143 strcpy(p, dimm->label);
1144 p += strlen(p);
1145 *p = '\0';
1146
1147 /*
1148 * get csrow/channel of the DIMM, in order to allow
1149 * incrementing the compat API counters
1150 */
Joe Perches956b9ba12012-04-29 17:08:39 -03001151 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1152 mci->mem_is_per_rank ? "rank" : "dimm",
1153 dimm->csrow, dimm->cschannel);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001154 if (row == -1)
1155 row = dimm->csrow;
1156 else if (row >= 0 && row != dimm->csrow)
1157 row = -2;
1158
1159 if (chan == -1)
1160 chan = dimm->cschannel;
1161 else if (chan >= 0 && chan != dimm->cschannel)
1162 chan = -2;
1163 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001164 }
1165
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001166 if (!enable_per_layer_report) {
1167 strcpy(label, "any memory");
1168 } else {
Joe Perches956b9ba12012-04-29 17:08:39 -03001169 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001170 if (p == label)
1171 strcpy(label, "unknown memory");
1172 if (type == HW_EVENT_ERR_CORRECTED) {
1173 if (row >= 0) {
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -03001174 mci->csrows[row]->ce_count++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001175 if (chan >= 0)
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -03001176 mci->csrows[row]->channels[chan]->ce_count++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001177 }
1178 } else
1179 if (row >= 0)
Mauro Carvalho Chehabde3910eb2012-04-24 15:05:43 -03001180 mci->csrows[row]->ue_count++;
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001181 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001182
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001183 /* Fill the RAM location data */
1184 p = location;
1185 for (i = 0; i < mci->n_layers; i++) {
1186 if (pos[i] < 0)
1187 continue;
1188
1189 p += sprintf(p, "%s:%d ",
1190 edac_layer_name[mci->layers[i].type],
1191 pos[i]);
1192 }
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001193 if (p > location)
1194 *(p - 1) = '\0';
1195
1196 /* Report the error via the trace interface */
1197
1198 error_count = 1; /* FIXME: allow change it */
1199 grain_bits = fls_long(grain) + 1;
1200 trace_mc_event(type, msg, label, error_count,
1201 mci->mc_idx, top_layer, mid_layer, low_layer,
1202 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1203 grain_bits, syndrome, other_detail);
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001204
1205 /* Memory type dependent details about the error */
1206 if (type == HW_EVENT_ERR_CORRECTED) {
1207 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001208 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
Douglas Thompson052dfb42007-07-19 01:50:13 -07001209 page_frame_number, offset_in_page,
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001210 grain, syndrome);
1211 edac_ce_error(mci, pos, msg, location, label, detail,
1212 other_detail, enable_per_layer_report,
1213 page_frame_number, offset_in_page, grain);
1214 } else {
1215 snprintf(detail, sizeof(detail),
Mauro Carvalho Chehab53f2d022012-02-23 08:10:34 -03001216 "page:0x%lx offset:0x%lx grain:%ld",
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001217 page_frame_number, offset_in_page, grain);
Alan Coxda9bb1d2006-01-18 17:44:13 -08001218
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001219 edac_ue_error(mci, pos, msg, location, label, detail,
1220 other_detail, enable_per_layer_report);
1221 }
Alan Coxda9bb1d2006-01-18 17:44:13 -08001222}
Mauro Carvalho Chehab4275be62012-04-18 15:20:50 -03001223EXPORT_SYMBOL_GPL(edac_mc_handle_error);