i7core_edac.c 61.3 KB
Newer Older
1 2
/* Intel i7 core/Nehalem Memory Controller kernel module
 *
3
 * This driver supports the memory controllers found on the Intel
4 5 6
 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
 * and Westmere-EP.
7 8 9 10
 *
 * This file may be distributed under the terms of the
 * GNU General Public License version 2 only.
 *
11
 * Copyright (c) 2009-2010 by:
12
 *	 Mauro Carvalho Chehab
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *
 * Red Hat Inc. http://www.redhat.com
 *
 * Forked and adapted from the i5400_edac driver
 *
 * Based on the following public Intel datasheets:
 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
 * Datasheet, Volume 2:
 *	http://download.intel.com/design/processor/datashts/320835.pdf
 * Intel Xeon Processor 5500 Series Datasheet Volume 2
 *	http://www.intel.com/Assets/PDF/datasheet/321322.pdf
 * also available at:
 * 	http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
Randy Dunlap's avatar
Randy Dunlap committed
33
#include <linux/delay.h>
Nils Carlson's avatar
Nils Carlson committed
34
#include <linux/dmi.h>
35 36
#include <linux/edac.h>
#include <linux/mmzone.h>
37
#include <linux/smp.h>
38
#include <asm/mce.h>
39
#include <asm/processor.h>
40
#include <asm/div64.h>
41 42 43

#include "edac_core.h"

44 45 46 47 48
/* Static vars */
static LIST_HEAD(i7core_edac_list);
static DEFINE_MUTEX(i7core_edac_lock);
static int probed;

49 50 51
static int use_pci_fixup;
module_param(use_pci_fixup, int, 0444);
MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
52 53 54 55 56 57 58 59 60
/*
 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
 * registers start at bus 255, and are not reported by BIOS.
 * We currently find devices with only 2 sockets. In order to support more QPI
 * Quick Path Interconnect, just increment this number.
 */
#define MAX_SOCKET_BUSES	2


61 62 63
/*
 * Alter this version for the module when modifications are made
 */
Michal Marek's avatar
Michal Marek committed
64
#define I7CORE_REVISION    " Ver: 1.0.0"
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#define EDAC_MOD_STR      "i7core_edac"

/*
 * Debug macros
 */
#define i7core_printk(level, fmt, arg...)			\
	edac_printk(level, "i7core", fmt, ##arg)

#define i7core_mc_printk(mci, level, fmt, arg...)		\
	edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)

/*
 * i7core Memory Controller Registers
 */

80 81 82
	/* OFFSETS for Device 0 Function 0 */

#define MC_CFG_CONTROL	0x90
83 84
  #define MC_CFG_UNLOCK		0x02
  #define MC_CFG_LOCK		0x00
85

86 87 88 89 90 91
	/* OFFSETS for Device 3 Function 0 */

#define MC_CONTROL	0x48
#define MC_STATUS	0x4c
#define MC_MAX_DOD	0x64

92
/*
David Mackey's avatar
David Mackey committed
93
 * OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet:
94 95 96 97 98 99 100 101 102 103
 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
 */

#define MC_TEST_ERR_RCV1	0x60
  #define DIMM2_COR_ERR(r)			((r) & 0x7fff)

#define MC_TEST_ERR_RCV0	0x64
  #define DIMM1_COR_ERR(r)			(((r) >> 16) & 0x7fff)
  #define DIMM0_COR_ERR(r)			((r) & 0x7fff)

David Mackey's avatar
David Mackey committed
104
/* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */
105 106 107 108 109 110 111
#define MC_SSRCONTROL		0x48
  #define SSR_MODE_DISABLE	0x00
  #define SSR_MODE_ENABLE	0x01
  #define SSR_MODE_MASK		0x03

#define MC_SCRUB_CONTROL	0x4c
  #define STARTSCRUB		(1 << 24)
Nils Carlson's avatar
Nils Carlson committed
112
  #define SCRUBINTERVAL_MASK    0xffffff
113

114 115 116 117 118 119 120 121 122 123 124
#define MC_COR_ECC_CNT_0	0x80
#define MC_COR_ECC_CNT_1	0x84
#define MC_COR_ECC_CNT_2	0x88
#define MC_COR_ECC_CNT_3	0x8c
#define MC_COR_ECC_CNT_4	0x90
#define MC_COR_ECC_CNT_5	0x94

#define DIMM_TOP_COR_ERR(r)			(((r) >> 16) & 0x7fff)
#define DIMM_BOT_COR_ERR(r)			((r) & 0x7fff)


125 126
	/* OFFSETS for Devices 4,5 and 6 Function 0 */

127 128 129 130 131 132
#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
  #define THREE_DIMMS_PRESENT		(1 << 24)
  #define SINGLE_QUAD_RANK_PRESENT	(1 << 23)
  #define QUAD_RANK_PRESENT		(1 << 22)
  #define REGISTERED_DIMM		(1 << 15)

133 134 135 136
#define MC_CHANNEL_MAPPER	0x60
  #define RDLCH(r, ch)		((((r) >> (3 + (ch * 6))) & 0x07) - 1)
  #define WRLCH(r, ch)		((((r) >> (ch * 6)) & 0x07) - 1)

137 138 139
#define MC_CHANNEL_RANK_PRESENT 0x7c
  #define RANK_PRESENT_MASK		0xffff

140
#define MC_CHANNEL_ADDR_MATCH	0xf0
141 142 143 144 145 146 147 148 149 150
#define MC_CHANNEL_ERROR_MASK	0xf8
#define MC_CHANNEL_ERROR_INJECT	0xfc
  #define INJECT_ADDR_PARITY	0x10
  #define INJECT_ECC		0x08
  #define MASK_CACHELINE	0x06
  #define MASK_FULL_CACHELINE	0x06
  #define MASK_MSB32_CACHELINE	0x04
  #define MASK_LSB32_CACHELINE	0x02
  #define NO_MASK_CACHELINE	0x00
  #define REPEAT_EN		0x01
151

152
	/* OFFSETS for Devices 4,5 and 6 Function 1 */
153

154 155 156 157 158 159 160
#define MC_DOD_CH_DIMM0		0x48
#define MC_DOD_CH_DIMM1		0x4c
#define MC_DOD_CH_DIMM2		0x50
  #define RANKOFFSET_MASK	((1 << 12) | (1 << 11) | (1 << 10))
  #define RANKOFFSET(x)		((x & RANKOFFSET_MASK) >> 10)
  #define DIMM_PRESENT_MASK	(1 << 9)
  #define DIMM_PRESENT(x)	(((x) & DIMM_PRESENT_MASK) >> 9)
161 162 163 164
  #define MC_DOD_NUMBANK_MASK		((1 << 8) | (1 << 7))
  #define MC_DOD_NUMBANK(x)		(((x) & MC_DOD_NUMBANK_MASK) >> 7)
  #define MC_DOD_NUMRANK_MASK		((1 << 6) | (1 << 5))
  #define MC_DOD_NUMRANK(x)		(((x) & MC_DOD_NUMRANK_MASK) >> 5)
165
  #define MC_DOD_NUMROW_MASK		((1 << 4) | (1 << 3) | (1 << 2))
166
  #define MC_DOD_NUMROW(x)		(((x) & MC_DOD_NUMROW_MASK) >> 2)
167 168
  #define MC_DOD_NUMCOL_MASK		3
  #define MC_DOD_NUMCOL(x)		((x) & MC_DOD_NUMCOL_MASK)
169

170 171
#define MC_RANK_PRESENT		0x7c

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
#define MC_SAG_CH_0	0x80
#define MC_SAG_CH_1	0x84
#define MC_SAG_CH_2	0x88
#define MC_SAG_CH_3	0x8c
#define MC_SAG_CH_4	0x90
#define MC_SAG_CH_5	0x94
#define MC_SAG_CH_6	0x98
#define MC_SAG_CH_7	0x9c

#define MC_RIR_LIMIT_CH_0	0x40
#define MC_RIR_LIMIT_CH_1	0x44
#define MC_RIR_LIMIT_CH_2	0x48
#define MC_RIR_LIMIT_CH_3	0x4C
#define MC_RIR_LIMIT_CH_4	0x50
#define MC_RIR_LIMIT_CH_5	0x54
#define MC_RIR_LIMIT_CH_6	0x58
#define MC_RIR_LIMIT_CH_7	0x5C
#define MC_RIR_LIMIT_MASK	((1 << 10) - 1)

#define MC_RIR_WAY_CH		0x80
  #define MC_RIR_WAY_OFFSET_MASK	(((1 << 14) - 1) & ~0x7)
  #define MC_RIR_WAY_RANK_MASK		0x7

195 196 197 198 199
/*
 * i7core structs
 */

#define NUM_CHANS 3
200 201 202
#define MAX_DIMMS 3		/* Max DIMMS per channel */
#define MAX_MCR_FUNC  4
#define MAX_CHAN_FUNC 3
203 204 205 206 207

struct i7core_info {
	u32	mc_control;
	u32	mc_status;
	u32	max_dod;
208
	u32	ch_map;
209 210
};

211 212 213 214 215 216 217 218 219 220 221 222

struct i7core_inject {
	int	enable;

	u32	section;
	u32	type;
	u32	eccmask;

	/* Error address mask */
	int channel, dimm, rank, bank, page, col;
};

223
struct i7core_channel {
224 225 226
	bool		is_3dimms_present;
	bool		is_single_4rank;
	bool		has_4rank;
227
	u32		dimms;
228 229
};

230
struct pci_id_descr {
231 232 233
	int			dev;
	int			func;
	int 			dev_id;
234
	int			optional;
235 236
};

237
struct pci_id_table {
238 239
	const struct pci_id_descr	*descr;
	int				n_devs;
240 241
};

242 243 244 245
struct i7core_dev {
	struct list_head	list;
	u8			socket;
	struct pci_dev		**pdev;
246
	int			n_devs;
247 248 249
	struct mem_ctl_info	*mci;
};

250
struct i7core_pvt {
251
	struct device *addrmatch_dev, *chancounts_dev;
252

253 254 255 256 257
	struct pci_dev	*pci_noncore;
	struct pci_dev	*pci_mcr[MAX_MCR_FUNC + 1];
	struct pci_dev	*pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];

	struct i7core_dev *i7core_dev;
258

259
	struct i7core_info	info;
260
	struct i7core_inject	inject;
261
	struct i7core_channel	channel[NUM_CHANS];
262

263
	int		ce_count_available;
264 265

			/* ECC corrected errors counts per udimm */
266 267
	unsigned long	udimm_ce_count[MAX_DIMMS];
	int		udimm_last_ce_count[MAX_DIMMS];
268
			/* ECC corrected errors counts per rdimm */
269 270
	unsigned long	rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
	int		rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
271

272
	bool		is_registered, enable_scrub;
273

Nils Carlson's avatar
Nils Carlson committed
274 275 276
	/* DCLK Frequency used for computing scrub rate */
	int			dclk_freq;

277 278
	/* Struct to control EDAC polling */
	struct edac_pci_ctl_info *i7core_pci;
279 280
};

281 282 283 284 285
#define PCI_DESCR(device, function, device_id)	\
	.dev = (device),			\
	.func = (function),			\
	.dev_id = (device_id)

286
static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
287 288 289
		/* Memory controller */
	{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR)     },
	{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD)  },
290
			/* Exists only for RDIMM */
291
	{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1  },
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },

		/* Channel 0 */
	{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
	{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
	{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
	{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC)   },

		/* Channel 1 */
	{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
	{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
	{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
	{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC)   },

		/* Channel 2 */
	{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
	{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
	{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
	{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC)   },
311 312 313 314 315 316 317 318 319 320

		/* Generic Non-core registers */
	/*
	 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
	 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
	 * the probing code needs to test for the other address in case of
	 * failure of this one
	 */
	{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE)  },

321
};
322

323
static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
324 325 326 327 328 329 330 331 332
	{ PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR)         },
	{ PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD)      },
	{ PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST)     },

	{ PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
	{ PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
	{ PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
	{ PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC)   },

333 334 335 336
	{ PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
	{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
	{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
	{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC)   },
337 338 339 340 341 342

	/*
	 * This is the PCI device has an alternate address on some
	 * processors like Core i7 860
	 */
	{ PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE)     },
343 344
};

345
static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
		/* Memory controller */
	{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2)     },
	{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2)  },
			/* Exists only for RDIMM */
	{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1  },
	{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },

		/* Channel 0 */
	{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
	{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
	{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
	{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2)   },

		/* Channel 1 */
	{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
	{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
	{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
	{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2)   },

		/* Channel 2 */
	{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
	{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
	{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
	{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2)   },
370 371 372 373

		/* Generic Non-core registers */
	{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2)  },

374 375
};

376 377
#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
static const struct pci_id_table pci_dev_table[] = {
378 379 380
	PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
	PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
	PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
381
	{0,}			/* 0 terminated list. */
382 383
};

384 385 386
/*
 *	pci_device_id	table for which devices we are looking for
 */
387
static const struct pci_device_id i7core_pci_tbl[] = {
388
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
389
	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
390 391 392
	{0,}			/* 0 terminated list. */
};

393
/****************************************************************************
David Mackey's avatar
David Mackey committed
394
			Ancillary status routines
395 396 397
 ****************************************************************************/

	/* MC_CONTROL bits */
398 399
#define CH_ACTIVE(pvt, ch)	((pvt)->info.mc_control & (1 << (8 + ch)))
#define ECCx8(pvt)		((pvt)->info.mc_control & (1 << 1))
400 401

	/* MC_STATUS bits */
402
#define ECC_ENABLED(pvt)	((pvt)->info.mc_status & (1 << 4))
403
#define CH_DISABLED(pvt, ch)	((pvt)->info.mc_status & (1 << ch))
404 405

	/* MC_MAX_DOD read functions */
406
static inline int numdimms(u32 dimms)
407
{
408
	return (dimms & 0x3) + 1;
409 410
}

411
static inline int numrank(u32 rank)
412
{
413
	static const int ranks[] = { 1, 2, 4, -EINVAL };
414

415
	return ranks[rank & 0x3];
416 417
}

418
static inline int numbank(u32 bank)
419
{
420
	static const int banks[] = { 4, 8, 16, -EINVAL };
421

422
	return banks[bank & 0x3];
423 424
}

425
static inline int numrow(u32 row)
426
{
427
	static const int rows[] = {
428 429 430 431
		1 << 12, 1 << 13, 1 << 14, 1 << 15,
		1 << 16, -EINVAL, -EINVAL, -EINVAL,
	};

432
	return rows[row & 0x7];
433 434
}

435
static inline int numcol(u32 col)
436
{
437
	static const int cols[] = {
438 439
		1 << 10, 1 << 11, 1 << 12, -EINVAL,
	};
440
	return cols[col & 0x3];
441 442
}

443
static struct i7core_dev *get_i7core_dev(u8 socket)
444 445 446 447 448 449 450 451 452 453 454
{
	struct i7core_dev *i7core_dev;

	list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
		if (i7core_dev->socket == socket)
			return i7core_dev;
	}

	return NULL;
}

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
static struct i7core_dev *alloc_i7core_dev(u8 socket,
					   const struct pci_id_table *table)
{
	struct i7core_dev *i7core_dev;

	i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
	if (!i7core_dev)
		return NULL;

	i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
				   GFP_KERNEL);
	if (!i7core_dev->pdev) {
		kfree(i7core_dev);
		return NULL;
	}

	i7core_dev->socket = socket;
	i7core_dev->n_devs = table->n_devs;
	list_add_tail(&i7core_dev->list, &i7core_edac_list);

	return i7core_dev;
}

478 479 480 481 482 483 484
static void free_i7core_dev(struct i7core_dev *i7core_dev)
{
	list_del(&i7core_dev->list);
	kfree(i7core_dev->pdev);
	kfree(i7core_dev);
}

485 486 487
/****************************************************************************
			Memory check routines
 ****************************************************************************/
488

489
static int get_dimm_config(struct mem_ctl_info *mci)
490 491
{
	struct i7core_pvt *pvt = mci->pvt_info;
492
	struct pci_dev *pdev;
493
	int i, j;
494
	enum edac_type mode;
495
	enum mem_type mtype;
496
	struct dimm_info *dimm;
497

498
	/* Get data from the MC register, function 0 */
499
	pdev = pvt->pci_mcr[0];
500
	if (!pdev)
501 502
		return -ENODEV;

503
	/* Device 3 function 0 reads */
504 505 506 507
	pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
	pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
	pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
	pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
508

509 510 511
	edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
		 pvt->i7core_dev->socket, pvt->info.mc_control,
		 pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
512

513
	if (ECC_ENABLED(pvt)) {
514
		edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
515 516 517 518 519
		if (ECCx8(pvt))
			mode = EDAC_S8ECD8ED;
		else
			mode = EDAC_S4ECD4ED;
	} else {
520
		edac_dbg(0, "ECC disabled\n");
521 522
		mode = EDAC_NONE;
	}
523 524

	/* FIXME: need to handle the error codes */
525 526 527 528 529 530
	edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
		 numdimms(pvt->info.max_dod),
		 numrank(pvt->info.max_dod >> 2),
		 numbank(pvt->info.max_dod >> 4),
		 numrow(pvt->info.max_dod >> 6),
		 numcol(pvt->info.max_dod >> 9));
531

532
	for (i = 0; i < NUM_CHANS; i++) {
533
		u32 data, dimm_dod[3], value[8];
534

535 536 537
		if (!pvt->pci_ch[i][0])
			continue;

538
		if (!CH_ACTIVE(pvt, i)) {
539
			edac_dbg(0, "Channel %i is not active\n", i);
540 541 542
			continue;
		}
		if (CH_DISABLED(pvt, i)) {
543
			edac_dbg(0, "Channel %i is disabled\n", i);
544 545 546
			continue;
		}

547
		/* Devices 4-6 function 0 */
548
		pci_read_config_dword(pvt->pci_ch[i][0],
549 550
				MC_CHANNEL_DIMM_INIT_PARAMS, &data);

551 552 553 554 555 556 557 558 559

		if (data & THREE_DIMMS_PRESENT)
			pvt->channel[i].is_3dimms_present = true;

		if (data & SINGLE_QUAD_RANK_PRESENT)
			pvt->channel[i].is_single_4rank = true;

		if (data & QUAD_RANK_PRESENT)
			pvt->channel[i].has_4rank = true;
560

561 562
		if (data & REGISTERED_DIMM)
			mtype = MEM_RDDR3;
563
		else
564 565 566
			mtype = MEM_DDR3;

		/* Devices 4-6 function 1 */
567
		pci_read_config_dword(pvt->pci_ch[i][1],
568
				MC_DOD_CH_DIMM0, &dimm_dod[0]);
569
		pci_read_config_dword(pvt->pci_ch[i][1],
570
				MC_DOD_CH_DIMM1, &dimm_dod[1]);
571
		pci_read_config_dword(pvt->pci_ch[i][1],
572
				MC_DOD_CH_DIMM2, &dimm_dod[2]);
573

574 575 576 577 578 579 580 581
		edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
			 i,
			 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
			 data,
			 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
			 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
			 pvt->channel[i].has_4rank ? "HAS_4R " : "",
			 (data & REGISTERED_DIMM) ? 'R' : 'U');
582 583 584

		for (j = 0; j < 3; j++) {
			u32 banks, ranks, rows, cols;
585
			u32 size, npages;
586 587 588 589

			if (!DIMM_PRESENT(dimm_dod[j]))
				continue;

590 591
			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
				       i, j, 0);
592 593 594 595 596
			banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
			ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
			rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
			cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));

597 598 599
			/* DDR3 has 8 I/O banks */
			size = (rows * cols * banks * ranks) >> (20 - 3);

600 601 602 603
			edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
				 j, size,
				 RANKOFFSET(dimm_dod[j]),
				 banks, ranks, rows, cols);
604

605
			npages = MiB_TO_PAGES(size);
606

607
			dimm->nr_pages = npages;
608

609 610
			switch (banks) {
			case 4:
611
				dimm->dtype = DEV_X4;
612 613
				break;
			case 8:
614
				dimm->dtype = DEV_X8;
615 616
				break;
			case 16:
617
				dimm->dtype = DEV_X16;
618 619
				break;
			default:
620
				dimm->dtype = DEV_UNKNOWN;
621 622
			}

623 624 625 626 627 628
			snprintf(dimm->label, sizeof(dimm->label),
				 "CPU#%uChannel#%u_DIMM#%u",
				 pvt->i7core_dev->socket, i, j);
			dimm->grain = 8;
			dimm->edac_mode = mode;
			dimm->mtype = mtype;
629
		}
630

631 632 633 634 635 636 637 638
		pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
		pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
		pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
		pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
		pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
		pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
		pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
		pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
639
		edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
640
		for (j = 0; j < 8; j++)
641 642 643 644
			edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
				 (value[j] >> 27) & 0x1,
				 (value[j] >> 24) & 0x7,
				 (value[j] & ((1 << 24) - 1)));
645 646
	}

647 648 649
	return 0;
}

650 651 652 653
/****************************************************************************
			Error insertion routines
 ****************************************************************************/

654 655
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)

656 657 658 659 660 661 662
/* The i7core has independent error injection features per channel.
   However, to have a simpler code, we don't allow enabling error injection
   on more than one channel.
   Also, since a change at an inject parameter will be applied only at enable,
   we're disabling error injection on all write calls to the sysfs nodes that
   controls the error code injection.
 */
663
static int disable_inject(const struct mem_ctl_info *mci)
664 665 666 667 668
{
	struct i7core_pvt *pvt = mci->pvt_info;

	pvt->inject.enable = 0;

669
	if (!pvt->pci_ch[pvt->inject.channel][0])
670 671
		return -ENODEV;

672
	pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
673
				MC_CHANNEL_ERROR_INJECT, 0);
674 675

	return 0;
676 677 678 679 680 681 682 683 684
}

/*
 * i7core inject inject.section
 *
 *	accept and store error injection inject.section value
 *	bit 0 - refers to the lower 32-byte half cacheline
 *	bit 1 - refers to the upper 32-byte half cacheline
 */
685 686
static ssize_t i7core_inject_section_store(struct device *dev,
					   struct device_attribute *mattr,
687 688
					   const char *data, size_t count)
{
689
	struct mem_ctl_info *mci = to_mci(dev);
690 691 692 693 694
	struct i7core_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int rc;

	if (pvt->inject.enable)
695
		disable_inject(mci);
696

697
	rc = kstrtoul(data, 10, &value);
698
	if ((rc < 0) || (value > 3))
699
		return -EIO;
700 701 702 703 704

	pvt->inject.section = (u32) value;
	return count;
}

705 706 707
static ssize_t i7core_inject_section_show(struct device *dev,
					  struct device_attribute *mattr,
					  char *data)
708
{
709
	struct mem_ctl_info *mci = to_mci(dev);
710 711 712 713 714 715 716 717 718 719 720 721
	struct i7core_pvt *pvt = mci->pvt_info;
	return sprintf(data, "0x%08x\n", pvt->inject.section);
}

/*
 * i7core inject.type
 *
 *	accept and store error injection inject.section value
 *	bit 0 - repeat enable - Enable error repetition
 *	bit 1 - inject ECC error
 *	bit 2 - inject parity error
 */
722 723
static ssize_t i7core_inject_type_store(struct device *dev,
					struct device_attribute *mattr,
724 725
					const char *data, size_t count)
{
726 727
	struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
728 729 730 731
	unsigned long value;
	int rc;

	if (pvt->inject.enable)
732
		disable_inject(mci);
733

734
	rc = kstrtoul(data, 10, &value);
735
	if ((rc < 0) || (value > 7))
736
		return -EIO;
737 738 739 740 741

	pvt->inject.type = (u32) value;
	return count;
}

742 743 744
static ssize_t i7core_inject_type_show(struct device *dev,
				       struct device_attribute *mattr,
				       char *data)
745
{
746
	struct mem_ctl_info *mci = to_mci(dev);
747
	struct i7core_pvt *pvt = mci->pvt_info;
748

749 750 751 752 753 754 755 756 757 758 759 760 761
	return sprintf(data, "0x%08x\n", pvt->inject.type);
}

/*
 * i7core_inject_inject.eccmask_store
 *
 * The type of error (UE/CE) will depend on the inject.eccmask value:
 *   Any bits set to a 1 will flip the corresponding ECC bit
 *   Correctable errors can be injected by flipping 1 bit or the bits within
 *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
 *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
 *   uncorrectable error to be injected.
 */
762 763 764
static ssize_t i7core_inject_eccmask_store(struct device *dev,
					   struct device_attribute *mattr,
					   const char *data, size_t count)
765
{
766
	struct mem_ctl_info *mci = to_mci(dev);
767 768 769 770 771
	struct i7core_pvt *pvt = mci->pvt_info;
	unsigned long value;
	int rc;

	if (pvt->inject.enable)
772
		disable_inject(mci);
773

774
	rc = kstrtoul(data, 10, &value);
775
	if (rc < 0)
776
		return -EIO;
777 778 779 780 781

	pvt->inject.eccmask = (u32) value;
	return count;
}

782 783 784
static ssize_t i7core_inject_eccmask_show(struct device *dev,
					  struct device_attribute *mattr,
					  char *data)
785
{
786
	struct mem_ctl_info *mci = to_mci(dev);
787
	struct i7core_pvt *pvt = mci->pvt_info;
788

789 790 791 792 793 794 795 796 797 798 799 800 801 802
	return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
}

/*
 * i7core_addrmatch
 *
 * The type of error (UE/CE) will depend on the inject.eccmask value:
 *   Any bits set to a 1 will flip the corresponding ECC bit
 *   Correctable errors can be injected by flipping 1 bit or the bits within
 *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
 *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
 *   uncorrectable error to be injected.
 */

803 804
#define DECLARE_ADDR_MATCH(param, limit)			\
static ssize_t i7core_inject_store_##param(			\
805 806 807
	struct device *dev,					\
	struct device_attribute *mattr,				\
	const char *data, size_t count)				\
808
{								\
809
	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
810
	struct i7core_pvt *pvt;					\
811 812 813
	long value;						\
	int rc;							\
								\
814
	edac_dbg(1, "\n");					\
815 816
	pvt = mci->pvt_info;					\
								\
817 818 819
	if (pvt->inject.enable)					\
		disable_inject(mci);				\
								\
820
	if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
821 822
		value = -1;					\
	else {							\
823
		rc = kstrtoul(data, 10, &value);		\
824 825 826 827 828 829 830 831 832 833
		if ((rc < 0) || (value >= limit))		\
			return -EIO;				\
	}							\
								\
	pvt->inject.param = value;				\
								\
	return count;						\
}								\
								\
static ssize_t i7core_inject_show_##param(			\
834 835 836
	struct device *dev,					\
	struct device_attribute *mattr,				\
	char *data)						\
837
{								\
838
	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
839 840 841
	struct i7core_pvt *pvt;					\
								\
	pvt = mci->pvt_info;					\
842
	edac_dbg(1, "pvt=%p\n", pvt);				\
843 844 845 846
	if (pvt->inject.param < 0)				\
		return sprintf(data, "any\n");			\
	else							\
		return sprintf(data, "%d\n", pvt->inject.param);\
847 848
}

849
#define ATTR_ADDR_MATCH(param)					\
850 851 852
	static DEVICE_ATTR(param, S_IRUGO | S_IWUSR,		\
		    i7core_inject_show_##param,			\
		    i7core_inject_store_##param)
853

854 855 856 857 858 859
DECLARE_ADDR_MATCH(channel, 3);
DECLARE_ADDR_MATCH(dimm, 3);
DECLARE_ADDR_MATCH(rank, 4);
DECLARE_ADDR_MATCH(bank, 32);
DECLARE_ADDR_MATCH(page, 0x10000);
DECLARE_ADDR_MATCH(col, 0x4000);
860

861 862 863 864 865 866 867
ATTR_ADDR_MATCH(channel);
ATTR_ADDR_MATCH(dimm);
ATTR_ADDR_MATCH(rank);
ATTR_ADDR_MATCH(bank);
ATTR_ADDR_MATCH(page);
ATTR_ADDR_MATCH(col);

868
static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
869 870 871 872
{
	u32 read;
	int count;

873 874 875
	edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
		 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
		 where, val);
876

877 878
	for (count = 0; count < 10; count++) {
		if (count)
879
			msleep(100);
880 881 882 883 884 885 886
		pci_write_config_dword(dev, where, val);
		pci_read_config_dword(dev, where, &read);

		if (read == val)
			return 0;
	}

887 888 889 890
	i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
		"write=%08x. Read=%08x\n",
		dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
		where, val, read);
891 892 893 894

	return -EINVAL;
}

895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
/*
 * This routine prepares the Memory Controller for error injection.
 * The error will be injected when some process tries to write to the
 * memory that matches the given criteria.
 * The criteria can be set in terms of a mask where dimm, rank, bank, page
 * and col can be specified.
 * A -1 value for any of the mask items will make the MCU to ignore
 * that matching criteria for error injection.
 *
 * It should be noticed that the error will only happen after a write operation
 * on a memory that matches the condition. if REPEAT_EN is not enabled at
 * inject mask, then it will produce just one error. Otherwise, it will repeat
 * until the injectmask would be cleaned.
 *
 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
 *    is reliable enough to check if the MC is using the
 *    three channels. However, this is not clear at the datasheet.
 */
913 914 915
static ssize_t i7core_inject_enable_store(struct device *dev,
					  struct device_attribute *mattr,
					  const char *data, size_t count)
916
{
917
	struct mem_ctl_info *mci = to_mci(dev);
918 919 920 921 922 923
	struct i7core_pvt *pvt = mci->pvt_info;
	u32 injectmask;
	u64 mask = 0;
	int  rc;
	long enable;

924
	if (!pvt->pci_ch[pvt->inject.channel][0])
925 926
		return 0;

927
	rc = kstrtoul(data, 10, &enable);
928 929 930 931 932 933 934 935 936 937 938 939
	if ((rc < 0))
		return 0;

	if (enable) {
		pvt->inject.enable = 1;
	} else {
		disable_inject(mci);
		return count;
	}

	/* Sets pvt->inject.dimm mask */
	if (pvt->inject.dimm < 0)
940
		mask |= 1LL << 41;
941
	else {
942
		if (pvt->channel[pvt->inject.channel].dimms > 2)
943
			mask |= (pvt->inject.dimm & 0x3LL) << 35;
944
		else
945
			mask |= (pvt->inject.dimm & 0x1LL) << 36;
946 947 948 949
	}

	/* Sets pvt->inject.rank mask */
	if (pvt->inject.rank < 0)
950
		mask |= 1LL << 40;
951
	else {
952
		if (pvt->channel[pvt->inject.channel].dimms > 2)
953
			mask |= (pvt->inject.rank & 0x1LL) << 34;
954
		else
955
			mask |= (pvt->inject.rank & 0x3LL) << 34;
956 957 958 959
	}

	/* Sets pvt->inject.bank mask */
	if (pvt->inject.bank < 0)
960
		mask |= 1LL << 39;
961
	else
962
		mask |= (pvt->inject.bank & 0x15LL) << 30;
963 964 965

	/* Sets pvt->inject.page mask */
	if (pvt->inject.page < 0)
966
		mask |= 1LL << 38;
967
	else
968
		mask |= (pvt->inject.page & 0xffff) << 14;
969 970 971

	/* Sets pvt->inject.column mask */
	if (pvt->inject.col < 0)
972
		mask |= 1LL << 37;
973
	else
974
		mask |= (pvt->inject.col & 0x3fff);
975

976 977 978 979 980 981 982 983 984 985 986 987
	/*
	 * bit    0: REPEAT_EN
	 * bits 1-2: MASK_HALF_CACHELINE
	 * bit    3: INJECT_ECC
	 * bit    4: INJECT_ADDR_PARITY
	 */

	injectmask = (pvt->inject.type & 1) |
		     (pvt->inject.section & 0x3) << 1 |
		     (pvt->inject.type & 0x6) << (3 - 1);

	/* Unlock writes to registers - this register is write only */
988
	pci_write_config_dword(pvt->pci_noncore,
989
			       MC_CFG_CONTROL, 0x2);
990

991
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
992
			       MC_CHANNEL_ADDR_MATCH, mask);
993
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
994 995
			       MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);

996
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
997 998
			       MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);

999
	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1000
			       MC_CHANNEL_ERROR_INJECT, injectmask);
1001

1002
	/*
1003 1004 1005
	 * This is something undocumented, based on my tests
	 * Without writing 8 to this register, errors aren't injected. Not sure
	 * why.
1006
	 */
1007
	pci_write_config_dword(pvt->pci_noncore,
1008
			       MC_CFG_CONTROL, 8);
1009

1010 1011
	edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
		 mask, pvt->inject.eccmask, injectmask);
1012

1013

1014 1015 1016
	return count;
}

1017 1018 1019
static ssize_t i7core_inject_enable_show(struct device *dev,
					 struct device_attribute *mattr,
					 char *data)
1020
{
1021
	struct mem_ctl_info *mci = to_mci(dev);
1022
	struct i7core_pvt *pvt = mci->pvt_info;
1023 1024
	u32 injectmask;

1025 1026 1027
	if (!pvt->pci_ch[pvt->inject.channel][0])
		return 0;

1028
	pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1029
			       MC_CHANNEL_ERROR_INJECT, &injectmask);
1030

1031
	edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
1032 1033 1034 1035

	if (injectmask & 0x0c)
		pvt->inject.enable = 1;

1036 1037 1038
	return sprintf(data, "%d\n", pvt->inject.enable);
}

1039 1040
#define DECLARE_COUNTER(param)					\
static ssize_t i7core_show_counter_##param(			\
1041 1042 1043
	struct device *dev,					\
	struct device_attribute *mattr,				\
	char *data)						\
1044
{								\
1045
	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
1046 1047
	struct i7core_pvt *pvt = mci->pvt_info;			\
								\
1048
	edac_dbg(1, "\n");					\
1049 1050 1051 1052 1053
	if (!pvt->ce_count_available || (pvt->is_registered))	\
		return sprintf(data, "data unavailable\n");	\
	return sprintf(data, "%lu\n",				\
			pvt->udimm_ce_count[param]);		\
}
1054

1055
#define ATTR_COUNTER(param)					\
1056 1057 1058
	static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR,	\
		    i7core_show_counter_##param,		\
		    NULL)
1059

1060 1061 1062
DECLARE_COUNTER(0);
DECLARE_COUNTER(1);
DECLARE_COUNTER(2);
1063

1064 1065 1066 1067
ATTR_COUNTER(0);
ATTR_COUNTER(1);
ATTR_COUNTER(2);

1068
/*
1069
 * inject_addrmatch device sysfs struct
1070
 */
1071

1072 1073 1074 1075 1076 1077 1078 1079
static struct attribute *i7core_addrmatch_attrs[] = {
	&dev_attr_channel.attr,
	&dev_attr_dimm.attr,
	&dev_attr_rank.attr,
	&dev_attr_bank.attr,
	&dev_attr_page.attr,
	&dev_attr_col.attr,
	NULL
1080 1081
};

1082 1083
static struct attribute_group addrmatch_grp = {
	.attrs	= i7core_addrmatch_attrs,
1084 1085
};

1086 1087 1088
static const struct attribute_group *addrmatch_groups[] = {
	&addrmatch_grp,
	NULL
1089 1090
};

1091 1092
static void addrmatch_release(struct device *device)
{
1093
	edac_dbg(1, "Releasing device %s\n", dev_name(device));
1094
	kfree(device);
1095 1096 1097 1098 1099
}

static struct device_type addrmatch_type = {
	.groups		= addrmatch_groups,
	.release	= addrmatch_release,
1100 1101
};

1102 1103 1104 1105 1106 1107 1108 1109 1110
/*
 * all_channel_counts sysfs struct
 */

static struct attribute *i7core_udimm_counters_attrs[] = {
	&dev_attr_udimm0.attr,
	&dev_attr_udimm1.attr,
	&dev_attr_udimm2.attr,
	NULL
1111 1112
};

1113 1114
static struct attribute_group all_channel_counts_grp = {
	.attrs	= i7core_udimm_counters_attrs,
1115 1116
};

1117 1118 1119
static const struct attribute_group *all_channel_counts_groups[] = {
	&all_channel_counts_grp,
	NULL
1120 1121
};

1122 1123
static void all_channel_counts_release(struct device *device)
{
1124
	edac_dbg(1, "Releasing device %s\n", dev_name(device));
1125
	kfree(device);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
}

static struct device_type all_channel_counts_type = {
	.groups		= all_channel_counts_groups,
	.release	= all_channel_counts_release,
};

/*
 * inject sysfs attributes
 */

static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
		   i7core_inject_section_show, i7core_inject_section_store);

static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
		   i7core_inject_type_show, i7core_inject_type_store);


static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
		   i7core_inject_eccmask_show, i7core_inject_eccmask_store);

static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
		   i7core_inject_enable_show, i7core_inject_enable_store);