i7core_edac.c 63.4 KB
Newer Older
1 2
/* Intel i7 core/Nehalem Memory Controller kernel module
 *
3
 * This driver supports the memory controllers found on the Intel
4 5 6
 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
 * and Westmere-EP.
7 8 9 10
 *
 * This file may be distributed under the terms of the
 * GNU General Public License version 2 only.
 *
11
 * Copyright (c) 2009-2010 by:
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *	 Mauro Carvalho Chehab <mchehab@redhat.com>
 *
 * Red Hat Inc. http://www.redhat.com
 *
 * Forked and adapted from the i5400_edac driver
 *
 * Based on the following public Intel datasheets:
 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
 * Datasheet, Volume 2:
 *	http://download.intel.com/design/processor/datashts/320835.pdf
 * Intel Xeon Processor 5500 Series Datasheet Volume 2
 *	http://www.intel.com/Assets/PDF/datasheet/321322.pdf
 * also available at:
 * 	http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
Randy Dunlap's avatar
Randy Dunlap committed
33
#include <linux/delay.h>
Nils Carlson's avatar
Nils Carlson committed
34
#include <linux/dmi.h>
35 36
#include <linux/edac.h>
#include <linux/mmzone.h>
37
#include <linux/smp.h>
38
#include <asm/mce.h>
39
#include <asm/processor.h>
40
#include <asm/div64.h>
41 42 43

#include "edac_core.h"

44 45 46 47 48
/* Static vars */
static LIST_HEAD(i7core_edac_list);
static DEFINE_MUTEX(i7core_edac_lock);
static int probed;

49 50 51
static int use_pci_fixup;
module_param(use_pci_fixup, int, 0444);
MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
52 53 54 55 56 57 58 59 60
/*
 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
 * registers start at bus 255, and are not reported by BIOS.
 * We currently find devices with only 2 sockets. In order to support more QPI
 * Quick Path Interconnect, just increment this number.
 */
#define MAX_SOCKET_BUSES	2


61 62 63
/*
 * Alter this version for the module when modifications are made
 */
Michal Marek's avatar
Michal Marek committed
64
#define I7CORE_REVISION    " Ver: 1.0.0"
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#define EDAC_MOD_STR      "i7core_edac"

/*
 * Debug macros
 */
#define i7core_printk(level, fmt, arg...)			\
	edac_printk(level, "i7core", fmt, ##arg)

#define i7core_mc_printk(mci, level, fmt, arg...)		\
	edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)

/*
 * i7core Memory Controller Registers
 */

80 81 82
	/* OFFSETS for Device 0 Function 0 */

#define MC_CFG_CONTROL	0x90
83 84
  #define MC_CFG_UNLOCK		0x02
  #define MC_CFG_LOCK		0x00
85

86 87 88 89 90 91
	/* OFFSETS for Device 3 Function 0 */

#define MC_CONTROL	0x48
#define MC_STATUS	0x4c
#define MC_MAX_DOD	0x64

92
/*
David Mackey's avatar
David Mackey committed
93
 * OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet:
94 95 96 97 98 99 100 101 102 103
 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
 */

#define MC_TEST_ERR_RCV1	0x60
  #define DIMM2_COR_ERR(r)			((r) & 0x7fff)

#define MC_TEST_ERR_RCV0	0x64
  #define DIMM1_COR_ERR(r)			(((r) >> 16) & 0x7fff)
  #define DIMM0_COR_ERR(r)			((r) & 0x7fff)

David Mackey's avatar
David Mackey committed
104
/* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */
105 106 107 108 109 110 111
#define MC_SSRCONTROL		0x48
  #define SSR_MODE_DISABLE	0x00
  #define SSR_MODE_ENABLE	0x01
  #define SSR_MODE_MASK		0x03

#define MC_SCRUB_CONTROL	0x4c
  #define STARTSCRUB		(1 << 24)
Nils Carlson's avatar
Nils Carlson committed
112
  #define SCRUBINTERVAL_MASK    0xffffff
113

114 115 116 117 118 119 120 121 122 123 124
#define MC_COR_ECC_CNT_0	0x80
#define MC_COR_ECC_CNT_1	0x84
#define MC_COR_ECC_CNT_2	0x88
#define MC_COR_ECC_CNT_3	0x8c
#define MC_COR_ECC_CNT_4	0x90
#define MC_COR_ECC_CNT_5	0x94

#define DIMM_TOP_COR_ERR(r)			(((r) >> 16) & 0x7fff)
#define DIMM_BOT_COR_ERR(r)			((r) & 0x7fff)


125 126
	/* OFFSETS for Devices 4,5 and 6 Function 0 */

127 128 129 130 131 132
#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
  #define THREE_DIMMS_PRESENT		(1 << 24)
  #define SINGLE_QUAD_RANK_PRESENT	(1 << 23)
  #define QUAD_RANK_PRESENT		(1 << 22)
  #define REGISTERED_DIMM		(1 << 15)

133 134 135 136
#define MC_CHANNEL_MAPPER	0x60
  #define RDLCH(r, ch)		((((r) >> (3 + (ch * 6))) & 0x07) - 1)
  #define WRLCH(r, ch)		((((r) >> (ch * 6)) & 0x07) - 1)

137 138 139
#define MC_CHANNEL_RANK_PRESENT 0x7c
  #define RANK_PRESENT_MASK		0xffff

140
#define MC_CHANNEL_ADDR_MATCH	0xf0
141 142 143 144 145 146 147 148 149 150
#define MC_CHANNEL_ERROR_MASK	0xf8
#define MC_CHANNEL_ERROR_INJECT	0xfc
  #define INJECT_ADDR_PARITY	0x10
  #define INJECT_ECC		0x08
  #define MASK_CACHELINE	0x06
  #define MASK_FULL_CACHELINE	0x06
  #define MASK_MSB32_CACHELINE	0x04
  #define MASK_LSB32_CACHELINE	0x02
  #define NO_MASK_CACHELINE	0x00
  #define REPEAT_EN		0x01
151

152
	/* OFFSETS for Devices 4,5 and 6 Function 1 */
153

154 155 156 157 158 159 160
#define MC_DOD_CH_DIMM0		0x48
#define MC_DOD_CH_DIMM1		0x4c
#define MC_DOD_CH_DIMM2		0x50
  #define RANKOFFSET_MASK	((1 << 12) | (1 << 11) | (1 << 10))
  #define RANKOFFSET(x)		((x & RANKOFFSET_MASK) >> 10)
  #define DIMM_PRESENT_MASK	(1 << 9)
  #define DIMM_PRESENT(x)	(((x) & DIMM_PRESENT_MASK) >> 9)
161 162 163 164
  #define MC_DOD_NUMBANK_MASK		((1 << 8) | (1 << 7))
  #define MC_DOD_NUMBANK(x)		(((x) & MC_DOD_NUMBANK_MASK) >> 7)
  #define MC_DOD_NUMRANK_MASK		((1 << 6) | (1 << 5))
  #define MC_DOD_NUMRANK(x)		(((x) & MC_DOD_NUMRANK_MASK) >> 5)
165
  #define MC_DOD_NUMROW_MASK		((1 << 4) | (1 << 3) | (1 << 2))
166
  #define MC_DOD_NUMROW(x)		(((x) & MC_DOD_NUMROW_MASK) >> 2)
167 168
  #define MC_DOD_NUMCOL_MASK		3
  #define MC_DOD_NUMCOL(x)		((x) & MC_DOD_NUMCOL_MASK)
169

170 171
#define MC_RANK_PRESENT		0x7c

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
#define MC_SAG_CH_0	0x80
#define MC_SAG_CH_1	0x84
#define MC_SAG_CH_2	0x88
#define MC_SAG_CH_3	0x8c
#define MC_SAG_CH_4	0x90
#define MC_SAG_CH_5	0x94
#define MC_SAG_CH_6	0x98
#define MC_SAG_CH_7	0x9c

#define MC_RIR_LIMIT_CH_0	0x40
#define MC_RIR_LIMIT_CH_1	0x44
#define MC_RIR_LIMIT_CH_2	0x48
#define MC_RIR_LIMIT_CH_3	0x4C
#define MC_RIR_LIMIT_CH_4	0x50
#define MC_RIR_LIMIT_CH_5	0x54
#define MC_RIR_LIMIT_CH_6	0x58
#define MC_RIR_LIMIT_CH_7	0x5C
#define MC_RIR_LIMIT_MASK	((1 << 10) - 1)

#define MC_RIR_WAY_CH		0x80
  #define MC_RIR_WAY_OFFSET_MASK	(((1 << 14) - 1) & ~0x7)
  #define MC_RIR_WAY_RANK_MASK		0x7

195 196 197 198 199
/*
 * i7core structs
 */

#define NUM_CHANS 3
200 201 202
#define MAX_DIMMS 3		/* Max DIMMS per channel */
#define MAX_MCR_FUNC  4
#define MAX_CHAN_FUNC 3
203 204 205 206 207

struct i7core_info {
	u32	mc_control;
	u32	mc_status;
	u32	max_dod;
208
	u32	ch_map;
209 210
};

211 212 213 214 215 216 217 218 219 220 221 222

struct i7core_inject {
	int	enable;

	u32	section;
	u32	type;
	u32	eccmask;

	/* Error address mask */
	int channel, dimm, rank, bank, page, col;
};

223
struct i7core_channel {
224 225 226
	bool		is_3dimms_present;
	bool		is_single_4rank;
	bool		has_4rank;
227
	u32		dimms;
228 229
};

230
struct pci_id_descr {
231 232 233
	int			dev;
	int			func;
	int 			dev_id;
234
	int			optional;
235 236
};

237
struct pci_id_table {
238 239
	const struct pci_id_descr	*descr;
	int				n_devs;
240 241
};

242 243 244 245
struct i7core_dev {
	struct list_head	list;
	u8			socket;
	struct pci_dev		**pdev;
246
	int			n_devs;
247 248 249
	struct mem_ctl_info	*mci;
};

250
struct i7core_pvt {
251
	struct device *addrmatch_dev, *chancounts_dev;
252

253 254 255 256 257
	struct pci_dev	*pci_noncore;
	struct pci_dev	*pci_mcr[MAX_MCR_FUNC + 1];
	struct pci_dev	*pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];

	struct i7core_dev *i7core_dev;
258

259
	struct i7core_info	info;
260
	struct i7core_inject	inject;
261
	struct i7core_channel	channel[NUM_CHANS];
262

263
	int		ce_count_available;
264 265

			/* ECC corrected errors counts per udimm */
266 267
	unsigned long	udimm_ce_count[MAX_DIMMS];
	int		udimm_last_ce_count[MAX_DIMMS];
268
			/* ECC corrected errors counts per rdimm */
269 270
	unsigned long	rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
	int		rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
271

272
	bool		is_registered, enable_scrub;
273

274
	/* Fifo double buffers */
275
	struct mce		mce_entry[MCE_LOG_LEN];
276 277 278 279 280 281 282
	struct mce		mce_outentry[MCE_LOG_LEN];

	/* Fifo in/out counters */
	unsigned		mce_in, mce_out;

	/* Count indicator to show errors not got */
	unsigned		mce_overrun;
283

Nils Carlson's avatar
Nils Carlson committed
284 285 286
	/* DCLK Frequency used for computing scrub rate */
	int			dclk_freq;

287 288
	/* Struct to control EDAC polling */
	struct edac_pci_ctl_info *i7core_pci;
289 290
};

291 292 293 294 295
#define PCI_DESCR(device, function, device_id)	\
	.dev = (device),			\
	.func = (function),			\
	.dev_id = (device_id)

296
static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
297 298 299
		/* Memory controller */
	{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR)     },
	{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD)  },
300
			/* Exists only for RDIMM */
301
	{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1  },
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },

		/* Channel 0 */
	{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
	{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
	{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
	{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC)   },

		/* Channel 1 */
	{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
	{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
	{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
	{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC)   },

		/* Channel 2 */
	{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
	{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
	{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
	{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC)   },
321 322 323 324 325 326 327 328 329 330

		/* Generic Non-core registers */
	/*
	 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
	 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
	 * the probing code needs to test for the other address in case of
	 * failure of this one
	 */
	{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE)  },

331
};
332

333
static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
334 335 336 337 338 339 340 341 342
	{ PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR)         },
	{ PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD)      },
	{ PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST)     },

	{ PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
	{ PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
	{ PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
	{ PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC)   },

343 344 345 346
	{ PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
	{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
	{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
	{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC)   },
347 348 349 350 351 352

	/*
	 * This is the PCI device has an alternate address on some
	 * processors like Core i7 860
	 */
	{ PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE)     },
353 354
};

355
static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
		/* Memory controller */
	{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2)     },
	{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2)  },
			/* Exists only for RDIMM */
	{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1  },
	{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },

		/* Channel 0 */
	{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
	{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
	{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
	{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2)   },

		/* Channel 1 */
	{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
	{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
	{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
	{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2)   },

		/* Channel 2 */
	{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
	{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
	{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
	{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2)   },
380 381 382 383

		/* Generic Non-core registers */
	{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2)  },

384 385
};

386 387
#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
static const struct pci_id_table pci_dev_table[] = {
388 389 390
	PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
	PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
	PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
391
	{0,}			/* 0 terminated list. */
392 393
};

394 395 396
/*
 *	pci_device_id	table for which devices we are looking for
 */
397
static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
398
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
399
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
400 401 402
	{0,}			/* 0 terminated list. */
};

403
/****************************************************************************
David Mackey's avatar
David Mackey committed
404
			Ancillary status routines
405 406 407
 ****************************************************************************/

	/* MC_CONTROL bits */
408 409
#define CH_ACTIVE(pvt, ch)	((pvt)->info.mc_control & (1 << (8 + ch)))
#define ECCx8(pvt)		((pvt)->info.mc_control & (1 << 1))
410 411

	/* MC_STATUS bits */
412
#define ECC_ENABLED(pvt)	((pvt)->info.mc_status & (1 << 4))
413
#define CH_DISABLED(pvt, ch)	((pvt)->info.mc_status & (1 << ch))
414 415

	/* MC_MAX_DOD read functions */
416
static inline int numdimms(u32 dimms)
417
{
418
	return (dimms & 0x3) + 1;
419 420
}

421
static inline int numrank(u32 rank)
422
{
423
	static const int ranks[] = { 1, 2, 4, -EINVAL };
424

425
	return ranks[rank & 0x3];
426 427
}

428
static inline int numbank(u32 bank)
429
{
430
	static const int banks[] = { 4, 8, 16, -EINVAL };
431

432
	return banks[bank & 0x3];
433 434
}

435
static inline int numrow(u32 row)
436
{
437
	static const int rows[] = {
438 439 440 441
		1 << 12, 1 << 13, 1 << 14, 1 << 15,
		1 << 16, -EINVAL, -EINVAL, -EINVAL,
	};

442
	return rows[row & 0x7];
443 444
}

445
static inline int numcol(u32 col)
446
{
447
	static const int cols[] = {
448 449
		1 << 10, 1 << 11, 1 << 12, -EINVAL,
	};
450
	return cols[col & 0x3];
451 452
}

453
static struct i7core_dev *get_i7core_dev(u8 socket)
454 455 456 457 458 459 460 461 462 463 464
{
	struct i7core_dev *i7core_dev;

	list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
		if (i7core_dev->socket == socket)
			return i7core_dev;
	}

	return NULL;
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
static struct i7core_dev *alloc_i7core_dev(u8 socket,
					   const struct pci_id_table *table)
{
	struct i7core_dev *i7core_dev;

	i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
	if (!i7core_dev)
		return NULL;

	i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
				   GFP_KERNEL);
	if (!i7core_dev->pdev) {
		kfree(i7core_dev);
		return NULL;
	}

	i7core_dev->socket = socket;
	i7core_dev->n_devs = table->n_devs;
	list_add_tail(&i7core_dev->list, &i7core_edac_list);

	return i7core_dev;
}

488 489 490 491 492 493 494
static void free_i7core_dev(struct i7core_dev *i7core_dev)
{
	list_del(&i7core_dev->list);
	kfree(i7core_dev->pdev);
	kfree(i7core_dev);
}

495 496 497
/****************************************************************************
			Memory check routines
 ****************************************************************************/
498

499
static int get_dimm_config(struct mem_ctl_info *mci)
500 501
{
	struct i7core_pvt *pvt = mci->pvt_info;
502
	struct pci_dev *pdev;
503
	int i, j;
504
	enum edac_type mode;
505
	enum mem_type mtype;
506
	struct dimm_info *dimm;
507

508
	/* Get data from the MC register, function 0 */
509
	pdev = pvt->pci_mcr[0];
510
	if (!pdev)
511 512
		return -ENODEV;

513
	/* Device 3 function 0 reads */
514 515 516 517
	pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
	pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
	pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
	pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
518

519 520 521
	edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
		 pvt->i7core_dev->socket, pvt->info.mc_control,
		 pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
522

523
	if (ECC_ENABLED(pvt)) {
524
		edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
525 526 527 528 529
		if (ECCx8(pvt))
			mode = EDAC_S8ECD8ED;
		else
			mode = EDAC_S4ECD4ED;
	} else {
530
		edac_dbg(0, "ECC disabled\n");
531 532
		mode = EDAC_NONE;
	}
533 534

	/* FIXME: need to handle the error codes */
535 536 537 538 539 540
	edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
		 numdimms(pvt->info.max_dod),
		 numrank(pvt->info.max_dod >> 2),
		 numbank(pvt->info.max_dod >> 4),
		 numrow(pvt->info.max_dod >> 6),
		 numcol(pvt->info.max_dod >> 9));
541

542
	for (i = 0; i < NUM_CHANS; i++) {
543
		u32 data, dimm_dod[3], value[8];
544

545 546 547
		if (!pvt->pci_ch[i][0])
			continue;

548
		if (!CH_ACTIVE(pvt, i)) {
549
			edac_dbg(0, "Channel %i is not active\n", i);
550 551 552
			continue;
		}
		if (CH_DISABLED(pvt, i)) {
553
			edac_dbg(0, "Channel %i is disabled\n", i);
554 555 556
			continue;
		}

557
		/* Devices 4-6 function 0 */
558
		pci_read_config_dword(pvt->pci_ch[i][0],
559 560
				MC_CHANNEL_DIMM_INIT_PARAMS, &data);

561 562 563 564 565 566 567 568 569

		if (data & THREE_DIMMS_PRESENT)
			pvt->channel[i].is_3dimms_present = true;

		if (data & SINGLE_QUAD_RANK_PRESENT)
			pvt->channel[i].is_single_4rank = true;

		if (data & QUAD_RANK_PRESENT)
			pvt->channel[i].has_4rank = true;
570

571 572
		if (data & REGISTERED_DIMM)
			mtype = MEM_RDDR3;
573
		else
574 575 576
			mtype = MEM_DDR3;

		/* Devices 4-6 function 1 */
577
		pci_read_config_dword(pvt->pci_ch[i][1],
578
				MC_DOD_CH_DIMM0, &dimm_dod[0]);
579
		pci_read_config_dword(pvt->pci_ch[i][1],
580
				MC_DOD_CH_DIMM1, &dimm_dod[1]);
581
		pci_read_config_dword(pvt->pci_ch[i][1],
582
				MC_DOD_CH_DIMM2, &dimm_dod[2]);
583

584 585 586 587 588 589 590 591
		edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
			 i,
			 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
			 data,
			 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
			 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
			 pvt->channel[i].has_4rank ? "HAS_4R " : "",
			 (data & REGISTERED_DIMM) ? 'R' : 'U');
592 593 594

		for (j = 0; j < 3; j++) {
			u32 banks, ranks, rows, cols;
595
			u32 size, npages;
596 597 598 599

			if (!DIMM_PRESENT(dimm_dod[j]))
				continue;

600 601
			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
				       i, j, 0);
602 603 604 605 606
			banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
			ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
			rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
			cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));

607 608 609
			/* DDR3 has 8 I/O banks */
			size = (rows * cols * banks * ranks) >> (20 - 3);

610 611 612 613
			edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
				 j, size,
				 RANKOFFSET(dimm_dod[j]),
				 banks, ranks, rows, cols);
614

615
			npages = MiB_TO_PAGES(size);
616

617
			dimm->nr_pages = npages;
618

619 620
			switch (banks) {
			case 4:
621
				dimm->dtype = DEV_X4;
622 623
				break;
			case 8:
624
				dimm->dtype = DEV_X8;
625 626
				break;
			case 16:
627
				dimm->dtype = DEV_X16;
628 629
				break;
			default:
630
				dimm->dtype = DEV_UNKNOWN;
631 632
			}

633 634 635 636 637 638
			snprintf(dimm->label, sizeof(dimm->label),
				 "CPU#%uChannel#%u_DIMM#%u",
				 pvt->i7core_dev->socket, i, j);
			dimm->grain = 8;
			dimm->edac_mode = mode;
			dimm->mtype = mtype;
639
		}
640

641 642 643 644 645 646 647 648
		pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
		pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
		pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
		pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
		pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
		pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
		pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
		pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
649
		edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
650
		for (j = 0; j < 8; j++)
651 652 653 654
			edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
				 (value[j] >> 27) & 0x1,
				 (value[j] >> 24) & 0x7,
				 (value[j] & ((1 << 24) - 1)));
655 656
	}

657 658 659
	return 0;
}

660 661 662 663
/****************************************************************************
			Error insertion routines
 ****************************************************************************/

664 665
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)

666 667 668 669 670 671 672
/* The i7core has independent error injection features per channel.
   However, to have a simpler code, we don't allow enabling error injection
   on more than one channel.
   Also, since a change at an inject parameter will be applied only at enable,
   we're disabling error injection on all write calls to the sysfs nodes that
   controls the error code injection.
 */
673
static int disable_inject(const struct mem_ctl_info *mci)
674 675 676 677 678
{
	struct i7core_pvt *pvt = mci->pvt_info;

	pvt->inject.enable = 0;

679
	if (!pvt->pci_ch[pvt->inject.channel][0])
680 681
		return -ENODEV;

682
	pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
683
				MC_CHANNEL_ERROR_INJECT, 0);
684 685

	return 0;
686 687 688 689 690 691 692 693 694
}

/*
 * i7core inject inject.section
 *
 *	accept and store error injection inject.section value
 *	bit 0 - refers to the lower 32-byte half cacheline
 *	bit 1 - refers to the upper 32-byte half cacheline
 */
695 696
static ssize_t i7core_inject_section_store(struct device *dev,
					   struct device_attribute *mattr,
697 698
					   const char *data, size_t count)
{
699
	struct mem_ctl_info *mci = to_mci(dev);
700 701 702 703 704
	struct i7core_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int rc;

	if (pvt->inject.enable)
705
		disable_inject(mci);
706

707
	rc = kstrtoul(data, 10, &value);
708
	if ((rc < 0) || (value > 3))
709
		return -EIO;
710 711 712 713 714

	pvt->inject.section = (u32) value;
	return count;
}

715 716 717
static ssize_t i7core_inject_section_show(struct device *dev,
					  struct device_attribute *mattr,
					  char *data)
718
{
719
	struct mem_ctl_info *mci = to_mci(dev);
720 721 722 723 724 725 726 727 728 729 730 731
	struct i7core_pvt *pvt = mci->pvt_info;
	return sprintf(data, "0x%08x\n", pvt->inject.section);
}

/*
 * i7core inject.type
 *
 *	accept and store error injection inject.section value
 *	bit 0 - repeat enable - Enable error repetition
 *	bit 1 - inject ECC error
 *	bit 2 - inject parity error
 */
732 733
static ssize_t i7core_inject_type_store(struct device *dev,
					struct device_attribute *mattr,
734 735
					const char *data, size_t count)
{
736 737
	struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
738 739 740 741
	unsigned long value;
	int rc;

	if (pvt->inject.enable)
742
		disable_inject(mci);
743

744
	rc = kstrtoul(data, 10, &value);
745
	if ((rc < 0) || (value > 7))
746
		return -EIO;
747 748 749 750 751

	pvt->inject.type = (u32) value;
	return count;
}

752 753 754
static ssize_t i7core_inject_type_show(struct device *dev,
				       struct device_attribute *mattr,
				       char *data)
755
{
756
	struct mem_ctl_info *mci = to_mci(dev);
757
	struct i7core_pvt *pvt = mci->pvt_info;
758

759 760 761 762 763 764 765 766 767 768 769 770 771
	return sprintf(data, "0x%08x\n", pvt->inject.type);
}

/*
 * i7core_inject_inject.eccmask_store
 *
 * The type of error (UE/CE) will depend on the inject.eccmask value:
 *   Any bits set to a 1 will flip the corresponding ECC bit
 *   Correctable errors can be injected by flipping 1 bit or the bits within
 *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
 *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
 *   uncorrectable error to be injected.
 */
772 773 774
static ssize_t i7core_inject_eccmask_store(struct device *dev,
					   struct device_attribute *mattr,
					   const char *data, size_t count)
775
{
776
	struct mem_ctl_info *mci = to_mci(dev);
777 778 779 780 781
	struct i7core_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int rc;

	if (pvt->inject.enable)
782
		disable_inject(mci);
783

784
	rc = kstrtoul(data, 10, &value);
785
	if (rc < 0)
786
		return -EIO;
787 788 789 790 791

	pvt->inject.eccmask = (u32) value;
	return count;
}

792 793 794
static ssize_t i7core_inject_eccmask_show(struct device *dev,
					  struct device_attribute *mattr,
					  char *data)
795
{
796
	struct mem_ctl_info *mci = to_mci(dev);
797
	struct i7core_pvt *pvt = mci->pvt_info;
798

799 800 801 802 803 804 805 806 807 808 809 810 811 812
	return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
}

/*
 * i7core_addrmatch
 *
 * The type of error (UE/CE) will depend on the inject.eccmask value:
 *   Any bits set to a 1 will flip the corresponding ECC bit
 *   Correctable errors can be injected by flipping 1 bit or the bits within
 *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
 *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
 *   uncorrectable error to be injected.
 */

813 814
#define DECLARE_ADDR_MATCH(param, limit)			\
static ssize_t i7core_inject_store_##param(			\
815 816 817
	struct device *dev,					\
	struct device_attribute *mattr,				\
	const char *data, size_t count)				\
818
{								\
819
	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
820
	struct i7core_pvt *pvt;					\
821 822 823
	long value;						\
	int rc;							\
								\
824
	edac_dbg(1, "\n");					\
825 826
	pvt = mci->pvt_info;					\
								\
827 828 829
	if (pvt->inject.enable)					\
		disable_inject(mci);				\
								\
830
	if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
831 832
		value = -1;					\
	else {							\
833
		rc = kstrtoul(data, 10, &value);		\
834 835 836 837 838 839 840 841 842 843
		if ((rc < 0) || (value >= limit))		\
			return -EIO;				\
	}							\
								\
	pvt->inject.param = value;				\
								\
	return count;						\
}								\
								\
static ssize_t i7core_inject_show_##param(			\
844 845 846
	struct device *dev,					\
	struct device_attribute *mattr,				\
	char *data)						\
847
{								\
848
	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
849 850 851
	struct i7core_pvt *pvt;					\
								\
	pvt = mci->pvt_info;					\
852
	edac_dbg(1, "pvt=%p\n", pvt);				\
853 854 855 856
	if (pvt->inject.param < 0)				\
		return sprintf(data, "any\n");			\
	else							\
		return sprintf(data, "%d\n", pvt->inject.param);\
857 858
}

859
#define ATTR_ADDR_MATCH(param)					\
860 861 862
	static DEVICE_ATTR(param, S_IRUGO | S_IWUSR,		\
		    i7core_inject_show_##param,			\
		    i7core_inject_store_##param)
863

864 865 866 867 868 869
DECLARE_ADDR_MATCH(channel, 3);
DECLARE_ADDR_MATCH(dimm, 3);
DECLARE_ADDR_MATCH(rank, 4);
DECLARE_ADDR_MATCH(bank, 32);
DECLARE_ADDR_MATCH(page, 0x10000);
DECLARE_ADDR_MATCH(col, 0x4000);
870

871 872 873 874 875 876 877
ATTR_ADDR_MATCH(channel);
ATTR_ADDR_MATCH(dimm);
ATTR_ADDR_MATCH(rank);
ATTR_ADDR_MATCH(bank);
ATTR_ADDR_MATCH(page);
ATTR_ADDR_MATCH(col);

878
static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
879 880 881 882
{
	u32 read;
	int count;

883 884 885
	edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
		 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
		 where, val);
886

887 888
	for (count = 0; count < 10; count++) {
		if (count)
889
			msleep(100);
890 891 892 893 894 895 896
		pci_write_config_dword(dev, where, val);
		pci_read_config_dword(dev, where, &read);

		if (read == val)
			return 0;
	}

897 898 899 900
	i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
		"write=%08x. Read=%08x\n",
		dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
		where, val, read);
901 902 903 904

	return -EINVAL;
}

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
/*
 * This routine prepares the Memory Controller for error injection.
 * The error will be injected when some process tries to write to the
 * memory that matches the given criteria.
 * The criteria can be set in terms of a mask where dimm, rank, bank, page
 * and col can be specified.
 * A -1 value for any of the mask items will make the MCU to ignore
 * that matching criteria for error injection.
 *
 * It should be noticed that the error will only happen after a write operation
 * on a memory that matches the condition. if REPEAT_EN is not enabled at
 * inject mask, then it will produce just one error. Otherwise, it will repeat
 * until the injectmask would be cleaned.
 *
 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
 *    is reliable enough to check if the MC is using the
 *    three channels. However, this is not clear at the datasheet.
 */
923 924 925
static ssize_t i7core_inject_enable_store(struct device *dev,
					  struct device_attribute *mattr,
					  const char *data, size_t count)
926
{
927
	struct mem_ctl_info *mci = to_mci(dev);
928 929 930 931 932 933
	struct i7core_pvt *pvt = mci->pvt_info;
	u32 injectmask;
	u64 mask = 0;
	int  rc;
	long enable;

934
	if (!pvt->pci_ch[pvt->inject.channel][0])
935 936
		return 0;

937
	rc = kstrtoul(data, 10, &enable);
938 939 940 941 942 943 944 945 946 947 948 949
	if ((rc < 0))
		return 0;

	if (enable) {
		pvt->inject.enable = 1;
	} else {
		disable_inject(mci);
		return count;
	}

	/* Sets pvt->inject.dimm mask */
	if (pvt->inject.dimm < 0)
950
		mask |= 1LL << 41;
951
	else {
952
		if (pvt->channel[pvt->inject.channel].dimms > 2)
953
			mask |= (pvt->inject.dimm & 0x3LL) << 35;
954
		else
955
			mask |= (pvt->inject.dimm & 0x1LL) << 36;
956 957 958 959
	}

	/* Sets pvt->inject.rank mask */
	if (pvt->inject.rank < 0)
960
		mask |= 1LL << 40;
961
	else {
962
		if (pvt->channel[pvt->inject.channel].dimms > 2)
963
			mask |= (pvt->inject.rank & 0x1LL) << 34;
964
		else
965
			mask |= (pvt->inject.rank & 0x3LL) << 34;
966 967 968 969
	}

	/* Sets pvt->inject.bank mask */
	if (pvt->inject.bank < 0)
970
		mask |= 1LL << 39;
971
	else
972
		mask |= (pvt->inject.bank & 0x15LL) << 30;
973 974 975

	/* Sets pvt->inject.page mask */
	if (pvt->inject.page < 0)
976
		mask |= 1LL << 38;
977
	else
978
		mask |= (pvt->inject.page & 0xffff) << 14;
979 980 981

	/* Sets pvt->inject.column mask */
	if (pvt->inject.col < 0)
982
		mask |= 1LL << 37;
983
	else
984
		mask |= (pvt->inject.col & 0x3fff);
985

986 987 988 989 990 991 992 993 994 995 996 997
	/*
	 * bit    0: REPEAT_EN
	 * bits 1-2: MASK_HALF_CACHELINE
	 * bit    3: INJECT_ECC
	 * bit    4: INJECT_ADDR_PARITY
	 */

	injectmask = (pvt->inject.type & 1) |
		     (pvt->inject.section & 0x3) << 1 |
		     (pvt->inject.type & 0x6) << (3 - 1);

	/* Unlock writes to registers - this register is write only */
998
	pci_write_config_dword(pvt->pci_noncore,
999
			       MC_CFG_CONTROL, 0x2);
1000

1001
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1002
			       MC_CHANNEL_ADDR_MATCH, mask);
1003
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1004 1005
			       MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);

1006
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1007 1008
			       MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);

1009
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1010
			       MC_CHANNEL_ERROR_INJECT, injectmask);
1011

1012
	/*
1013 1014 1015
	 * This is something undocumented, based on my tests
	 * Without writing 8 to this register, errors aren't injected. Not sure
	 * why.
1016
	 */
1017
	pci_write_config_dword(pvt->pci_noncore,
1018
			       MC_CFG_CONTROL, 8);
1019

1020 1021
	edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
		 mask, pvt->inject.eccmask, injectmask);
1022

1023

1024 1025 1026
	return count;
}

1027 1028 1029
static ssize_t i7core_inject_enable_show(struct device *dev,
					 struct device_attribute *mattr,
					 char *data)
1030
{
1031
	struct mem_ctl_info *mci = to_mci(dev);
1032
	struct i7core_pvt *pvt = mci->pvt_info;
1033 1034
	u32 injectmask;

1035 1036 1037
	if (!pvt->pci_ch[pvt->inject.channel][0])
		return 0;

1038
	pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1039
			       MC_CHANNEL_ERROR_INJECT, &injectmask);
1040

1041
	edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
1042 1043 1044 1045

	if (injectmask & 0x0c)
		pvt->inject.enable = 1;

1046 1047 1048
	return sprintf(data, "%d\n", pvt->inject.enable);
}

1049 1050
#define DECLARE_COUNTER(param)					\
static ssize_t i7core_show_counter_##param(			\
1051 1052 1053
	struct device *dev,					\
	struct device_attribute *mattr,				\
	char *data)						\
1054
{								\
1055
	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
1056 1057
	struct i7core_pvt *pvt = mci->pvt_info;			\
								\
1058
	edac_dbg(1, "\n");					\
1059 1060 1061 1062 1063
	if (!pvt->ce_count_available || (pvt->is_registered))	\
		return sprintf(data, "data unavailable\n");	\
	return sprintf(data, "%lu\n",				\
			pvt->udimm_ce_count[param]);		\
}
1064

1065
#define ATTR_COUNTER(param)					\
1066 1067 1068
	static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR,	\
		    i7core_show_counter_##param,		\
		    NULL)
1069

1070 1071 1072
DECLARE_COUNTER(0);
DECLARE_COUNTER(1);
DECLARE_COUNTER(2);
1073

1074 1075 1076 1077
ATTR_COUNTER(0);
ATTR_COUNTER(1);
ATTR_COUNTER(2);

1078
/*
1079
 * inject_addrmatch device sysfs struct
1080
 */
1081

1082 1083 1084 1085 1086 1087 1088 1089
static struct attribute *i7core_addrmatch_attrs[] = {
	&dev_attr_channel.attr,
	&dev_attr_dimm.attr,
	&dev_attr_rank.attr,
	&dev_attr_bank.attr,
	&dev_attr_page.attr,
	&dev_attr_col.attr,
	NULL
1090 1091
};

1092 1093
static struct attribute_group addrmatch_grp = {
	.attrs	= i7core_addrmatch_attrs,
1094 1095
};

1096 1097 1098
static const struct attribute_group *addrmatch_groups[] = {
	&addrmatch_grp,
	NULL
1099 1100
};

1101 1102
static void addrmatch_release(struct device *device)
{
1103
	edac_dbg(1, "Releasing device %s\n", dev_name(device));
1104
	kfree(device);
1105 1106 1107 1108 1109
}

static struct device_type addrmatch_type = {
	.groups		= addrmatch_groups,
	.release	= addrmatch_release,
1110 1111
};

1112 1113 1114 1115 1116 1117 1118 1119 1120
/*
 * all_channel_counts sysfs struct
 */

static struct attribute *i7core_udimm_counters_attrs[] = {
	&dev_attr_udimm0.attr,
	&dev_attr_udimm1.attr,
	&dev_attr_udimm2.attr,
	NULL
1121 1122
};

1123 1124
static struct attribute_group all_channel_counts_grp = {
	.attrs	= i7core_udimm_counters_attrs,
1125 1126
};

1127 1128 1129
static const struct attribute_group *all_channel_counts_groups[] = {
	&all_channel_counts_grp,
	NULL
1130 1131
};

1132 1133
static void all_channel_counts_release(struct device *device)
{
1134
	edac_dbg(1, "Releasing device %s\n", dev_name(device));
1135
	kfree(device);