[PATCH] EDAC: probe1 cleanup 1-of-2

- Add lower-level functions that handle various parts of the initialization
  done by the xxx_probe1() functions.  Some of the xxx_probe1() functions are
  much too long and complicated (see "Chapter 5: Functions" in
  Documentation/CodingStyle).

- Cleanup of probe1() functions in EDAC

Signed-off-by: Doug Thompson <norsk5@xmission.com>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Doug Thompson 2006-06-30 01:56:08 -07:00 коммит произвёл Linus Torvalds
Родитель 2d7bbb91c8
Коммит 1318952514
6 изменённых файлов: 543 добавлений и 469 удалений

Просмотреть файл

@ -182,62 +182,20 @@ static void amd76x_check(struct mem_ctl_info *mci)
amd76x_process_error_info(mci, &info, 1);
}
/**
* amd76x_probe1 - Perform set up for detected device
* @pdev; PCI device detected
* @dev_idx: Device type index
*
* We have found an AMD76x and now need to set up the memory
* controller status reporting. We configure and set up the
* memory controller reporting and claim the device.
*/
static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
int rc = -ENODEV;
struct csrow_info *csrow;
u32 mba, mba_base, mba_mask, dms;
int index;
struct mem_ctl_info *mci = NULL;
enum edac_type ems_modes[] = {
EDAC_NONE,
EDAC_EC,
EDAC_SECDED,
EDAC_SECDED
};
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
if (mci == NULL) {
rc = -ENOMEM;
goto fail;
}
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = AMD76X_REVISION;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
mci->edac_check = amd76x_check;
mci->ctl_page_to_phys = NULL;
for (index = 0; index < mci->nr_csrows; index++) {
struct csrow_info *csrow = &mci->csrows[index];
u32 mba;
u32 mba_base;
u32 mba_mask;
u32 dms;
csrow = &mci->csrows[index];
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
AMD76X_MEM_BASE_ADDR + (index * 4),
&mba);
if (!(mba & BIT(0)))
continue;
@ -252,9 +210,54 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
csrow->grain = csrow->nr_pages << PAGE_SHIFT;
csrow->mtype = MEM_RDDR;
csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
csrow->edac_mode = ems_modes[ems_mode];
csrow->edac_mode = edac_mode;
}
}
/**
* amd76x_probe1 - Perform set up for detected device
* @pdev; PCI device detected
* @dev_idx: Device type index
*
* We have found an AMD76x and now need to set up the memory
* controller status reporting. We configure and set up the
* memory controller reporting and claim the device.
*/
static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
{
static const enum edac_type ems_modes[] = {
EDAC_NONE,
EDAC_EC,
EDAC_SECDED,
EDAC_SECDED
};
struct mem_ctl_info *mci = NULL;
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
if (mci == NULL) {
return -ENOMEM;
}
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = AMD76X_REVISION;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
mci->edac_check = amd76x_check;
mci->ctl_page_to_phys = NULL;
amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
amd76x_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
@ -270,9 +273,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
if (mci != NULL)
edac_mc_free(mci);
return rc;
return -ENODEV;
}
/* returns count (>= 0), or negative on error */

Просмотреть файл

@ -765,105 +765,38 @@ static void e752x_check(struct mem_ctl_info *mci)
e752x_process_error_info(mci, &info, 1);
}
static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u16 ddrcsr)
{
int rc = -ENODEV;
int index;
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci = NULL;
struct e752x_pvt *pvt = NULL;
u16 ddrcsr;
u32 drc;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
return (((ddrcsr >> 12) & 3) == 3);
}
static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u16 ddrcsr)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
int index, mem_dev, drc_chan;
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u32 dra;
unsigned long last_cumul_size;
struct pci_dev *dev = NULL;
struct e752x_error_info discard;
u8 value;
u32 dra, drc, cumul_size;
debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n");
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
* exclusive access, we take care not to violate that assumption and
* fail the probe. */
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
if (!force_function_unhide && !(stat8 & (1 << 5))) {
printk(KERN_INFO "Contact your BIOS vendor to see if the "
"E752x error registers can be safely un-hidden\n");
goto fail;
}
stat8 |= (1 << 5);
pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
/* need to find out the number of channels */
pci_read_config_dword(pdev, E752X_DRA, &dra);
pci_read_config_dword(pdev, E752X_DRC, &drc);
pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
/* FIXME: should check >>12 or 0xf, true for all? */
/* Dual channel = 1, Single channel = 0 */
drc_chan = (((ddrcsr >> 12) & 3) == 3);
drc_chan = dual_channel_active(ddrcsr);
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
if (mci == NULL) {
rc = -ENOMEM;
goto fail;
}
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E752X_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e752x_pvt *) mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev,
pvt->bridge_ck);
if (pvt->bridge_ck == NULL)
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
goto fail;
}
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->edac_check = e752x_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
/* find out the device types */
pci_read_config_dword(pdev, E752X_DRA, &dra);
/*
* The dram row boundary (DRB) reg values are boundary address for
/* The dram row boundary (DRB) reg values are boundary address for
* each DRAM row with a granularity of 64 or 128MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u8 value;
u32 cumul_size;
/* mem_dev 0=x8, 1=x4 */
int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
struct csrow_info *csrow = &mci->csrows[index];
mem_dev = (dra >> (index * 4 + 2)) & 0x3;
csrow = &mci->csrows[index];
mem_dev = (mem_dev == 2);
pci_read_config_byte(pdev, E752X_DRB + index, &value);
@ -871,7 +804,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@ -898,16 +830,19 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
} else
csrow->edac_mode = EDAC_NONE;
}
}
/* Fill in the memory map table */
static void e752x_init_mem_map_table(struct pci_dev *pdev,
struct e752x_pvt *pvt)
{
u8 value;
u8 last = 0;
u8 row = 0;
int index;
u8 value, last, row, stat8;
last = 0;
row = 0;
for (index = 0; index < 8; index += 2) {
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* test if there is a dimm in this slot */
if (value == last) {
/* no dimm in the slot, so flag it as empty */
@ -917,24 +852,143 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
pvt->map[index] = row;
row++;
last = value;
/* test the next value to see if the dimm is
double sided */
pci_read_config_byte(pdev,
E752X_DRB + index + 1,
/* test the next value to see if the dimm is double
* sided
*/
pci_read_config_byte(pdev, E752X_DRB + index + 1,
&value);
pvt->map[index + 1] = (value == last) ?
0xff : /* the dimm is single sided,
* so flag as empty
*/
so flag as empty */
row; /* this is a double sided dimm
* to save the next row #
*/
to save the next row # */
row++;
last = value;
}
}
/* set the map type. 1 = normal, 0 = reversed */
pci_read_config_byte(pdev, E752X_DRM, &stat8);
pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
}
/* Return 0 on success or 1 on failure. */
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
struct e752x_pvt *pvt)
{
struct pci_dev *dev;
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev,
pvt->bridge_ck);
if (pvt->bridge_ck == NULL)
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
NULL);
if (dev == NULL)
goto fail;
pvt->dev_d0f0 = dev;
pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
return 0;
fail:
pci_dev_put(pvt->bridge_ck);
return 1;
}
static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
{
struct pci_dev *dev;
dev = pvt->dev_d0f1;
/* Turn off error disable & SMI in case the BIOS turned it on */
pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
}
static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
struct e752x_error_info discard;
debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n");
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
* exclusive access, we take care not to violate that assumption and
* fail the probe. */
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
if (!force_function_unhide && !(stat8 & (1 << 5))) {
printk(KERN_INFO "Contact your BIOS vendor to see if the "
"E752x error registers can be safely un-hidden\n");
return -ENOMEM;
}
stat8 |= (1 << 5);
pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
/* FIXME: should check >>12 or 0xf, true for all? */
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
if (mci == NULL) {
return -ENOMEM;
}
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E752X_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e752x_pvt *) mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
if (e752x_get_devs(pdev, dev_idx, pvt)) {
edac_mc_free(mci);
return -ENODEV;
}
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->edac_check = e752x_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e752x_init_csrows(mci, pdev, ddrcsr);
e752x_init_mem_map_table(pdev, pvt);
/* set the map type. 1 = normal, 0 = reversed */
pci_read_config_byte(pdev, E752X_DRM, &stat8);
pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
@ -961,21 +1015,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail;
}
dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
NULL);
pvt->dev_d0f0 = dev;
/* find the error reporting device and clear errors */
dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
/* Turn off error disable & SMI in case the BIOS turned it on */
pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
e752x_init_error_reporting_regs(pvt);
e752x_get_error_info(mci, &discard); /* clear other MCH errors */
/* get this far and it's successful */
@ -983,20 +1023,12 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
if (mci) {
if (pvt->dev_d0f0)
pci_dev_put(pvt->dev_d0f0);
if (pvt->dev_d0f1)
pci_dev_put(pvt->dev_d0f1);
if (pvt->bridge_ck)
pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
return rc;
return -ENODEV;
}
/* returns count (>= 0), or negative on error */

Просмотреть файл

@ -335,90 +335,52 @@ static void e7xxx_check(struct mem_ctl_info *mci)
e7xxx_process_error_info(mci, &info, 1);
}
static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u32 drc, int dev_idx)
{
int rc = -ENODEV;
int index;
u16 pci_data;
struct mem_ctl_info *mci = NULL;
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 dra;
unsigned long last_cumul_size;
struct e7xxx_error_info discard;
return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
}
debugf0("%s(): mci\n", __func__);
/* need to find out the number of channels */
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
/* Return DRB granularity (0=32mb, 1=64mb). */
static inline int drb_granularity(u32 drc, int dev_idx)
{
/* only e7501 can be single channel */
if (dev_idx == E7501) {
drc_chan = ((drc >> 22) & 0x1);
drc_drbg = (drc >> 18) & 0x3;
return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
}
drc_ddim = (drc >> 20) & 0x3;
mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
if (mci == NULL) {
rc = -ENOMEM;
goto fail;
}
static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
int index;
u8 value;
u32 dra, cumul_size;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e7xxx_pvt *) mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev,
pvt->bridge_ck);
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail;
}
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
/* find out the device types */
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
drc_drbg = drb_granularity(drc, dev_idx);
drc_ddim = (drc >> 20) & 0x3;
last_cumul_size = 0;
/*
* The dram row boundary (DRB) reg values are boundary address
/* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u8 value;
u32 cumul_size;
for (index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
struct csrow_info *csrow = &mci->csrows[index];
mem_dev = (dra >> (index * 4 + 3)) & 0x1;
csrow = &mci->csrows[index];
pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@ -445,9 +407,54 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
} else
csrow->edac_mode = EDAC_NONE;
}
}
static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
struct e7xxx_error_info discard;
debugf0("%s(): mci\n", __func__);
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
if (mci == NULL)
return -ENOMEM;
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e7xxx_pvt *) mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev,
pvt->bridge_ck);
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail0;
}
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e7xxx_init_csrows(mci, pdev, dev_idx, drc);
mci->edac_cap |= EDAC_FLAG_NONE;
debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
@ -468,21 +475,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
*/
if (edac_mc_add_mc(mci,0)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
goto fail1;
}
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
fail:
if (mci != NULL) {
if(pvt != NULL && pvt->bridge_ck)
fail1:
pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
return rc;
fail0:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */

Просмотреть файл

@ -133,61 +133,27 @@ static void i82860_check(struct mem_ctl_info *mci)
i82860_process_error_info(mci, &info, 1);
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
{
int rc = -ENODEV;
int index;
struct mem_ctl_info *mci = NULL;
unsigned long last_cumul_size;
struct i82860_error_info discard;
u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
/* RDRAM has channels but these don't map onto the abstractions that
edac uses.
The device groups from the GRA registers seem to map reasonably
well onto the notion of a chip select row.
There are 16 GRA registers and since the name is associated with
the channel and the GRA registers map to physical devices so we are
going to make 1 channel for group.
*/
mci = edac_mc_alloc(0, 16, 1);
if (!mci)
return -ENOMEM;
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
/* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED;
/* adjust FLAGS */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82860_REVISION;
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->edac_check = i82860_check;
mci->ctl_page_to_phys = NULL;
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
mchcfg_ddim = mchcfg_ddim & 0x180;
last_cumul_size = 0;
/*
* The group row boundary (GRA) reg values are boundary address
/* The group row boundary (GRA) reg values are boundary address
* for each DRAM row with a granularity of 16MB. GRA regs are
* cumulative; therefore GRA15 will contain the total memory contained
* in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u16 value;
u32 cumul_size;
struct csrow_info *csrow = &mci->csrows[index];
pci_read_config_word(pdev, I82860_GBA + index * 2,
&value);
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
@ -205,7 +171,38 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i82860_error_info discard;
/* RDRAM has channels but these don't map onto the abstractions that
edac uses.
The device groups from the GRA registers seem to map reasonably
well onto the notion of a chip select row.
There are 16 GRA registers and since the name is associated with
the channel and the GRA registers map to physical devices so we are
going to make 1 channel for group.
*/
mci = edac_mc_alloc(0, 16, 1);
if (!mci)
return -ENOMEM;
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
/* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82860_REVISION;
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->edac_check = i82860_check;
mci->ctl_page_to_phys = NULL;
i82860_init_csrows(mci, pdev);
i82860_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
@ -213,14 +210,17 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
*/
if (edac_mc_add_mc(mci,0)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
edac_mc_free(mci);
} else {
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
rc = 0;
goto fail;
}
return rc;
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */

Просмотреть файл

@ -265,116 +265,109 @@ static void i82875p_check(struct mem_ctl_info *mci)
extern int pci_proc_attach_device(struct pci_dev *);
#endif
static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
/* Return 0 on success or 1 on failure. */
static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window)
{
int rc = -ENODEV;
int index;
struct mem_ctl_info *mci = NULL;
struct i82875p_pvt *pvt = NULL;
unsigned long last_cumul_size;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window = NULL;
u32 drc;
u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
u32 nr_chans;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
struct i82875p_error_info discard;
struct pci_dev *dev;
void __iomem *window;
debugf0("%s()\n", __func__);
ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
*ovrfl_pdev = NULL;
*ovrfl_window = NULL;
dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
if (!ovrfl_pdev) {
/*
* Intel tells BIOS developers to hide device 6 which
if (dev == NULL) {
/* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing
* the DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
ovrfl_pdev =
pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
if (!ovrfl_pdev)
return -ENODEV;
if (dev == NULL)
return 1;
}
*ovrfl_pdev = dev;
#ifdef CONFIG_PROC_FS
if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
i82875p_printk(KERN_ERR,
"%s(): Failed to attach overflow device\n", __func__);
return -ENODEV;
if ((dev->procent == NULL) && pci_proc_attach_device(dev)) {
i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow "
"device\n", __func__);
return 1;
}
#endif
/* CONFIG_PROC_FS */
if (pci_enable_device(ovrfl_pdev)) {
i82875p_printk(KERN_ERR,
"%s(): Failed to enable overflow device\n", __func__);
return -ENODEV;
#endif /* CONFIG_PROC_FS */
if (pci_enable_device(dev)) {
i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
"device\n", __func__);
return 1;
}
if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
if (pci_request_regions(dev, pci_name(dev))) {
#ifdef CORRECT_BIOS
goto fail0;
#endif
}
/* cache is irrelevant for PCI bus reads/writes */
ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
pci_resource_len(ovrfl_pdev, 0));
window = ioremap_nocache(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!ovrfl_window) {
if (window == NULL) {
i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
__func__);
goto fail1;
}
/* need to find out the number of channels */
drc = readl(ovrfl_window + I82875P_DRC);
drc_chan = ((drc >> 21) & 0x1);
nr_chans = drc_chan + 1;
*ovrfl_window = window;
return 0;
drc_ddim = (drc >> 18) & 0x1;
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
nr_chans);
fail1:
pci_release_regions(dev);
if (!mci) {
rc = -ENOMEM;
goto fail2;
#ifdef CORRECT_BIOS
fail0:
pci_disable_device(dev);
#endif
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return 1;
}
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
/* adjust FLAGS */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82875P_REVISION;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82875p_pvt *) mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u32 drc)
{
return (drc >> 21) & 0x1;
}
/*
* The dram row boundary (DRB) reg values are boundary address
static void i82875p_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 cumul_size;
int index;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
/* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u8 value;
u32 cumul_size;
struct csrow_info *csrow = &mci->csrows[index];
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@ -387,7 +380,49 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
u32 drc;
u32 nr_chans;
struct i82875p_error_info discard;
debugf0("%s()\n", __func__);
ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
nr_chans);
if (!mci) {
rc = -ENOMEM;
goto fail0;
}
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82875P_REVISION;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82875p_pvt *) mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
i82875p_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
@ -395,25 +430,20 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
*/
if (edac_mc_add_mc(mci,0)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail3;
goto fail1;
}
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
fail3:
fail1:
edac_mc_free(mci);
fail2:
fail0:
iounmap(ovrfl_window);
fail1:
pci_release_regions(ovrfl_pdev);
#ifdef CORRECT_BIOS
fail0:
#endif
pci_disable_device(ovrfl_pdev);
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return rc;

Просмотреть файл

@ -205,25 +205,72 @@ static void r82600_check(struct mem_ctl_info *mci)
r82600_process_error_info(mci, &info, 1);
}
static inline int ecc_enabled(u8 dramcr)
{
return dramcr & BIT(5);
}
static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
int index;
u8 drbar; /* SDRAM Row Boundry Address Register */
u32 row_high_limit, row_high_limit_last;
u32 reg_sdram, ecc_on, row_base;
ecc_on = ecc_enabled(dramcr);
reg_sdram = dramcr & BIT(4);
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
__func__, index, row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
csrow->grain = 1 << 14;
csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
csrow->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
int index;
struct mem_ctl_info *mci = NULL;
struct mem_ctl_info *mci;
u8 dramcr;
u32 ecc_on;
u32 reg_sdram;
u32 eapr;
u32 scrub_disabled;
u32 sdram_refresh_rate;
u32 row_high_limit_last = 0;
struct r82600_error_info discard;
debugf0("%s()\n", __func__);
pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
pci_read_config_dword(pdev, R82600_EAP, &eapr);
ecc_on = dramcr & BIT(5);
reg_sdram = dramcr & BIT(4);
scrub_disabled = eapr & BIT(31);
sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
@ -231,10 +278,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
if (mci == NULL) {
rc = -ENOMEM;
goto fail;
}
if (mci == NULL)
return -ENOMEM;
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
@ -250,7 +295,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
* is possible. */
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
if (ecc_on) {
if (ecc_enabled(dramcr)) {
if (scrub_disabled)
debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
"%#0x\n", __func__, mci, eapr);
@ -262,46 +307,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
mci->ctl_name = "R82600";
mci->edac_check = r82600_check;
mci->ctl_page_to_phys = NULL;
for (index = 0; index < mci->nr_csrows; index++) {
struct csrow_info *csrow = &mci->csrows[index];
u8 drbar; /* sDram Row Boundry Address Register */
u32 row_high_limit;
u32 row_base;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
__func__, index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
"%#0x \n", mci->mc_idx, __func__, index,
row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
csrow->grain = 1 << 14;
csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
csrow->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
r82600_init_csrows(mci, pdev, dramcr);
r82600_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
@ -324,10 +330,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
if (mci)
edac_mc_free(mci);
return rc;
return -ENODEV;
}
/* returns count (>= 0), or negative on error */