Merge branch 'master' into upstream-fixes
This commit is contained in:
Коммит
e47b207a5b
2
CREDITS
2
CREDITS
|
@ -3279,7 +3279,7 @@ S: Sevilla 41005
|
|||
S: Spain
|
||||
|
||||
N: Linus Torvalds
|
||||
E: torvalds@osdl.org
|
||||
E: torvalds@linux-foundation.org
|
||||
D: Original kernel hacker
|
||||
S: 12725 SW Millikan Way, Suite 400
|
||||
S: Beaverton, Oregon 97005
|
||||
|
|
|
@ -72,3 +72,7 @@ kernel patches.
|
|||
|
||||
If the new code is substantial, addition of subsystem-specific fault
|
||||
injection might be appropriate.
|
||||
|
||||
22: Newly-added code has been compiled with `gcc -W'. This will generate
|
||||
lots of noise, but is good for finding bugs like "warning: comparison
|
||||
between signed and unsigned".
|
||||
|
|
|
@ -134,9 +134,9 @@ Do not send more than 15 patches at once to the vger mailing lists!!!
|
|||
|
||||
|
||||
Linus Torvalds is the final arbiter of all changes accepted into the
|
||||
Linux kernel. His e-mail address is <torvalds@osdl.org>. He gets
|
||||
a lot of e-mail, so typically you should do your best to -avoid- sending
|
||||
him e-mail.
|
||||
Linux kernel. His e-mail address is <torvalds@linux-foundation.org>.
|
||||
He gets a lot of e-mail, so typically you should do your best to -avoid-
|
||||
sending him e-mail.
|
||||
|
||||
Patches which are bug fixes, are "obvious" changes, or similarly
|
||||
require little discussion should be sent or CC'd to Linus. Patches
|
||||
|
|
|
@ -318,3 +318,10 @@ Why: /proc/acpi/button has been replaced by events to the input layer
|
|||
Who: Len Brown <len.brown@intel.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: JFFS (version 1)
|
||||
When: 2.6.21
|
||||
Why: Unmaintained for years, superceded by JFFS2 for years.
|
||||
Who: Jeff Garzik <jeff@garzik.org>
|
||||
|
||||
---------------------------
|
||||
|
|
|
@ -17,7 +17,7 @@ You can use common Linux commands, such as cp and scp, to copy the
|
|||
memory image to a dump file on the local disk, or across the network to
|
||||
a remote system.
|
||||
|
||||
Kdump and kexec are currently supported on the x86, x86_64, ppc64 and IA64
|
||||
Kdump and kexec are currently supported on the x86, x86_64, ppc64 and ia64
|
||||
architectures.
|
||||
|
||||
When the system kernel boots, it reserves a small section of memory for
|
||||
|
@ -61,7 +61,12 @@ Install kexec-tools
|
|||
|
||||
2) Download the kexec-tools user-space package from the following URL:
|
||||
|
||||
http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools-testing-20061214.tar.gz
|
||||
http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools-testing.tar.gz
|
||||
|
||||
This is a symlink to the latest version, which at the time of writing is
|
||||
20061214, the only release of kexec-tools-testing so far. As other versions
|
||||
are made released, the older onese will remain available at
|
||||
http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
|
||||
|
||||
Note: Latest kexec-tools-testing git tree is available at
|
||||
|
||||
|
@ -71,11 +76,11 @@ http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools-testing.git;a=su
|
|||
|
||||
3) Unpack the tarball with the tar command, as follows:
|
||||
|
||||
tar xvpzf kexec-tools-testing-20061214.tar.gz
|
||||
tar xvpzf kexec-tools-testing.tar.gz
|
||||
|
||||
4) Change to the kexec-tools-1.101 directory, as follows:
|
||||
4) Change to the kexec-tools directory, as follows:
|
||||
|
||||
cd kexec-tools-testing-20061214
|
||||
cd kexec-tools-testing-VERSION
|
||||
|
||||
5) Configure the package, as follows:
|
||||
|
||||
|
@ -224,7 +229,23 @@ Dump-capture kernel config options (Arch Dependent, ppc64)
|
|||
|
||||
Dump-capture kernel config options (Arch Dependent, ia64)
|
||||
----------------------------------------------------------
|
||||
(To be filled)
|
||||
|
||||
- No specific options are required to create a dump-capture kernel
|
||||
for ia64, other than those specified in the arch idependent section
|
||||
above. This means that it is possible to use the system kernel
|
||||
as a dump-capture kernel if desired.
|
||||
|
||||
The crashkernel region can be automatically placed by the system
|
||||
kernel at run time. This is done by specifying the base address as 0,
|
||||
or omitting it all together.
|
||||
|
||||
crashkernel=256M@0
|
||||
or
|
||||
crashkernel=256M
|
||||
|
||||
If the start address is specified, note that the start address of the
|
||||
kernel will be aligned to 64Mb, so if the start address is not then
|
||||
any space below the alignment point will be wasted.
|
||||
|
||||
|
||||
Boot into System Kernel
|
||||
|
@ -243,6 +264,10 @@ Boot into System Kernel
|
|||
|
||||
On ppc64, use "crashkernel=128M@32M".
|
||||
|
||||
On ia64, 256M@256M is a generous value that typically works.
|
||||
The region may be automatically placed on ia64, see the
|
||||
dump-capture kernel config option notes above.
|
||||
|
||||
Load the Dump-capture Kernel
|
||||
============================
|
||||
|
||||
|
@ -261,7 +286,8 @@ For x86_64:
|
|||
For ppc64:
|
||||
- Use vmlinux
|
||||
For ia64:
|
||||
(To be filled)
|
||||
- Use vmlinux or vmlinuz.gz
|
||||
|
||||
|
||||
If you are using a uncompressed vmlinux image then use following command
|
||||
to load dump-capture kernel.
|
||||
|
@ -277,18 +303,19 @@ to load dump-capture kernel.
|
|||
--initrd=<initrd-for-dump-capture-kernel> \
|
||||
--append="root=<root-dev> <arch-specific-options>"
|
||||
|
||||
Please note, that --args-linux does not need to be specified for ia64.
|
||||
It is planned to make this a no-op on that architecture, but for now
|
||||
it should be omitted
|
||||
|
||||
Following are the arch specific command line options to be used while
|
||||
loading dump-capture kernel.
|
||||
|
||||
For i386 and x86_64:
|
||||
For i386, x86_64 and ia64:
|
||||
"init 1 irqpoll maxcpus=1"
|
||||
|
||||
For ppc64:
|
||||
"init 1 maxcpus=1 noirqdistrib"
|
||||
|
||||
For IA64
|
||||
(To be filled)
|
||||
|
||||
|
||||
Notes on loading the dump-capture kernel:
|
||||
|
||||
|
|
|
@ -1,142 +1,231 @@
|
|||
How To Write Linux PCI Drivers
|
||||
|
||||
by Martin Mares <mj@ucw.cz> on 07-Feb-2000
|
||||
How To Write Linux PCI Drivers
|
||||
|
||||
by Martin Mares <mj@ucw.cz> on 07-Feb-2000
|
||||
updated by Grant Grundler <grundler@parisc-linux.org> on 23-Dec-2006
|
||||
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
The world of PCI is vast and it's full of (mostly unpleasant) surprises.
|
||||
Different PCI devices have different requirements and different bugs --
|
||||
because of this, the PCI support layer in Linux kernel is not as trivial
|
||||
as one would wish. This short pamphlet tries to help all potential driver
|
||||
authors find their way through the deep forests of PCI handling.
|
||||
The world of PCI is vast and full of (mostly unpleasant) surprises.
|
||||
Since each CPU architecture implements different chip-sets and PCI devices
|
||||
have different requirements (erm, "features"), the result is the PCI support
|
||||
in the Linux kernel is not as trivial as one would wish. This short paper
|
||||
tries to introduce all potential driver authors to Linux APIs for
|
||||
PCI device drivers.
|
||||
|
||||
A more complete resource is the third edition of "Linux Device Drivers"
|
||||
by Jonathan Corbet, Alessandro Rubini, and Greg Kroah-Hartman.
|
||||
LDD3 is available for free (under Creative Commons License) from:
|
||||
|
||||
http://lwn.net/Kernel/LDD3/
|
||||
|
||||
However, keep in mind that all documents are subject to "bit rot".
|
||||
Refer to the source code if things are not working as described here.
|
||||
|
||||
Please send questions/comments/patches about Linux PCI API to the
|
||||
"Linux PCI" <linux-pci@atrey.karlin.mff.cuni.cz> mailing list.
|
||||
|
||||
|
||||
|
||||
0. Structure of PCI drivers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
There exist two kinds of PCI drivers: new-style ones (which leave most of
|
||||
probing for devices to the PCI layer and support online insertion and removal
|
||||
of devices [thus supporting PCI, hot-pluggable PCI and CardBus in a single
|
||||
driver]) and old-style ones which just do all the probing themselves. Unless
|
||||
you have a very good reason to do so, please don't use the old way of probing
|
||||
in any new code. After the driver finds the devices it wishes to operate
|
||||
on (either the old or the new way), it needs to perform the following steps:
|
||||
PCI drivers "discover" PCI devices in a system via pci_register_driver().
|
||||
Actually, it's the other way around. When the PCI generic code discovers
|
||||
a new device, the driver with a matching "description" will be notified.
|
||||
Details on this below.
|
||||
|
||||
pci_register_driver() leaves most of the probing for devices to
|
||||
the PCI layer and supports online insertion/removal of devices [thus
|
||||
supporting hot-pluggable PCI, CardBus, and Express-Card in a single driver].
|
||||
pci_register_driver() call requires passing in a table of function
|
||||
pointers and thus dictates the high level structure of a driver.
|
||||
|
||||
Once the driver knows about a PCI device and takes ownership, the
|
||||
driver generally needs to perform the following initialization:
|
||||
|
||||
Enable the device
|
||||
Access device configuration space
|
||||
Discover resources (addresses and IRQ numbers) provided by the device
|
||||
Allocate these resources
|
||||
Communicate with the device
|
||||
Request MMIO/IOP resources
|
||||
Set the DMA mask size (for both coherent and streaming DMA)
|
||||
Allocate and initialize shared control data (pci_allocate_coherent())
|
||||
Access device configuration space (if needed)
|
||||
Register IRQ handler (request_irq())
|
||||
Initialize non-PCI (i.e. LAN/SCSI/etc parts of the chip)
|
||||
Enable DMA/processing engines
|
||||
|
||||
When done using the device, and perhaps the module needs to be unloaded,
|
||||
the driver needs to take the follow steps:
|
||||
Disable the device from generating IRQs
|
||||
Release the IRQ (free_irq())
|
||||
Stop all DMA activity
|
||||
Release DMA buffers (both streaming and coherent)
|
||||
Unregister from other subsystems (e.g. scsi or netdev)
|
||||
Release MMIO/IOP resources
|
||||
Disable the device
|
||||
|
||||
Most of these topics are covered by the following sections, for the rest
|
||||
look at <linux/pci.h>, it's hopefully well commented.
|
||||
Most of these topics are covered in the following sections.
|
||||
For the rest look at LDD3 or <linux/pci.h> .
|
||||
|
||||
If the PCI subsystem is not configured (CONFIG_PCI is not set), most of
|
||||
the functions described below are defined as inline functions either completely
|
||||
empty or just returning an appropriate error codes to avoid lots of ifdefs
|
||||
in the drivers.
|
||||
the PCI functions described below are defined as inline functions either
|
||||
completely empty or just returning an appropriate error codes to avoid
|
||||
lots of ifdefs in the drivers.
|
||||
|
||||
|
||||
1. New-style drivers
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
The new-style drivers just call pci_register_driver during their initialization
|
||||
with a pointer to a structure describing the driver (struct pci_driver) which
|
||||
contains:
|
||||
|
||||
name Name of the driver
|
||||
1. pci_register_driver() call
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
PCI device drivers call pci_register_driver() during their
|
||||
initialization with a pointer to a structure describing the driver
|
||||
(struct pci_driver):
|
||||
|
||||
field name Description
|
||||
---------- ------------------------------------------------------
|
||||
id_table Pointer to table of device ID's the driver is
|
||||
interested in. Most drivers should export this
|
||||
table using MODULE_DEVICE_TABLE(pci,...).
|
||||
probe Pointer to a probing function which gets called (during
|
||||
execution of pci_register_driver for already existing
|
||||
devices or later if a new device gets inserted) for all
|
||||
PCI devices which match the ID table and are not handled
|
||||
by the other drivers yet. This function gets passed a
|
||||
pointer to the pci_dev structure representing the device
|
||||
and also which entry in the ID table did the device
|
||||
match. It returns zero when the driver has accepted the
|
||||
device or an error code (negative number) otherwise.
|
||||
This function always gets called from process context,
|
||||
so it can sleep.
|
||||
remove Pointer to a function which gets called whenever a
|
||||
device being handled by this driver is removed (either
|
||||
during deregistration of the driver or when it's
|
||||
manually pulled out of a hot-pluggable slot). This
|
||||
function always gets called from process context, so it
|
||||
can sleep.
|
||||
save_state Save a device's state before it's suspend.
|
||||
|
||||
probe This probing function gets called (during execution
|
||||
of pci_register_driver() for already existing
|
||||
devices or later if a new device gets inserted) for
|
||||
all PCI devices which match the ID table and are not
|
||||
"owned" by the other drivers yet. This function gets
|
||||
passed a "struct pci_dev *" for each device whose
|
||||
entry in the ID table matches the device. The probe
|
||||
function returns zero when the driver chooses to
|
||||
take "ownership" of the device or an error code
|
||||
(negative number) otherwise.
|
||||
The probe function always gets called from process
|
||||
context, so it can sleep.
|
||||
|
||||
remove The remove() function gets called whenever a device
|
||||
being handled by this driver is removed (either during
|
||||
deregistration of the driver or when it's manually
|
||||
pulled out of a hot-pluggable slot).
|
||||
The remove function always gets called from process
|
||||
context, so it can sleep.
|
||||
|
||||
suspend Put device into low power state.
|
||||
suspend_late Put device into low power state.
|
||||
|
||||
resume_early Wake device from low power state.
|
||||
resume Wake device from low power state.
|
||||
|
||||
(Please see Documentation/power/pci.txt for descriptions
|
||||
of PCI Power Management and the related functions.)
|
||||
|
||||
enable_wake Enable device to generate wake events from a low power
|
||||
state.
|
||||
|
||||
(Please see Documentation/power/pci.txt for descriptions
|
||||
of PCI Power Management and the related functions)
|
||||
shutdown Hook into reboot_notifier_list (kernel/sys.c).
|
||||
Intended to stop any idling DMA operations.
|
||||
Useful for enabling wake-on-lan (NIC) or changing
|
||||
the power state of a device before reboot.
|
||||
e.g. drivers/net/e100.c.
|
||||
|
||||
The ID table is an array of struct pci_device_id ending with a all-zero entry.
|
||||
Each entry consists of:
|
||||
err_handler See Documentation/pci-error-recovery.txt
|
||||
|
||||
multithread_probe Enable multi-threaded probe/scan. Driver must
|
||||
provide its own locking/syncronization for init
|
||||
operations if this is enabled.
|
||||
|
||||
|
||||
The ID table is an array of struct pci_device_id entries ending with an
|
||||
all-zero entry. Each entry consists of:
|
||||
|
||||
vendor,device Vendor and device ID to match (or PCI_ANY_ID)
|
||||
|
||||
vendor, device Vendor and device ID to match (or PCI_ANY_ID)
|
||||
subvendor, Subsystem vendor and device ID to match (or PCI_ANY_ID)
|
||||
subdevice
|
||||
class, Device class to match. The class_mask tells which bits
|
||||
class_mask of the class are honored during the comparison.
|
||||
subdevice,
|
||||
|
||||
class Device class, subclass, and "interface" to match.
|
||||
See Appendix D of the PCI Local Bus Spec or
|
||||
include/linux/pci_ids.h for a full list of classes.
|
||||
Most drivers do not need to specify class/class_mask
|
||||
as vendor/device is normally sufficient.
|
||||
|
||||
class_mask limit which sub-fields of the class field are compared.
|
||||
See drivers/scsi/sym53c8xx_2/ for example of usage.
|
||||
|
||||
driver_data Data private to the driver.
|
||||
Most drivers don't need to use driver_data field.
|
||||
Best practice is to use driver_data as an index
|
||||
into a static list of equivalent device types,
|
||||
instead of using it as a pointer.
|
||||
|
||||
Most drivers don't need to use the driver_data field. Best practice
|
||||
for use of driver_data is to use it as an index into a static list of
|
||||
equivalent device types, not to use it as a pointer.
|
||||
|
||||
Have a table entry {PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID}
|
||||
to have probe() called for every PCI device known to the system.
|
||||
Most drivers only need PCI_DEVICE() or PCI_DEVICE_CLASS() to set up
|
||||
a pci_device_id table.
|
||||
|
||||
New PCI IDs may be added to a device driver at runtime by writing
|
||||
to the file /sys/bus/pci/drivers/{driver}/new_id. When added, the
|
||||
driver will probe for all devices it can support.
|
||||
New PCI IDs may be added to a device driver pci_ids table at runtime
|
||||
as shown below:
|
||||
|
||||
echo "vendor device subvendor subdevice class class_mask driver_data" > \
|
||||
/sys/bus/pci/drivers/{driver}/new_id
|
||||
where all fields are passed in as hexadecimal values (no leading 0x).
|
||||
Users need pass only as many fields as necessary; vendor, device,
|
||||
subvendor, and subdevice fields default to PCI_ANY_ID (FFFFFFFF),
|
||||
class and classmask fields default to 0, and driver_data defaults to
|
||||
0UL. Device drivers must initialize use_driver_data in the dynids struct
|
||||
in their pci_driver struct prior to calling pci_register_driver in order
|
||||
for the driver_data field to get passed to the driver. Otherwise, only a
|
||||
0 is passed in that field.
|
||||
/sys/bus/pci/drivers/{driver}/new_id
|
||||
|
||||
All fields are passed in as hexadecimal values (no leading 0x).
|
||||
Users need pass only as many fields as necessary:
|
||||
o vendor, device, subvendor, and subdevice fields default
|
||||
to PCI_ANY_ID (FFFFFFFF),
|
||||
o class and classmask fields default to 0
|
||||
o driver_data defaults to 0UL.
|
||||
|
||||
Once added, the driver probe routine will be invoked for any unclaimed
|
||||
PCI devices listed in its (newly updated) pci_ids list.
|
||||
|
||||
When the driver exits, it just calls pci_unregister_driver() and the PCI layer
|
||||
automatically calls the remove hook for all devices handled by the driver.
|
||||
|
||||
|
||||
1.1 "Attributes" for driver functions/data
|
||||
|
||||
Please mark the initialization and cleanup functions where appropriate
|
||||
(the corresponding macros are defined in <linux/init.h>):
|
||||
|
||||
__init Initialization code. Thrown away after the driver
|
||||
initializes.
|
||||
__exit Exit code. Ignored for non-modular drivers.
|
||||
__devinit Device initialization code. Identical to __init if
|
||||
the kernel is not compiled with CONFIG_HOTPLUG, normal
|
||||
function otherwise.
|
||||
|
||||
|
||||
__devinit Device initialization code.
|
||||
Identical to __init if the kernel is not compiled
|
||||
with CONFIG_HOTPLUG, normal function otherwise.
|
||||
__devexit The same for __exit.
|
||||
|
||||
Tips:
|
||||
The module_init()/module_exit() functions (and all initialization
|
||||
functions called only from these) should be marked __init/exit.
|
||||
The struct pci_driver shouldn't be marked with any of these tags.
|
||||
The ID table array should be marked __devinitdata.
|
||||
The probe() and remove() functions (and all initialization
|
||||
functions called only from these) should be marked __devinit/exit.
|
||||
If you are sure the driver is not a hotplug driver then use only
|
||||
__init/exit __initdata/exitdata.
|
||||
Tips on when/where to use the above attributes:
|
||||
o The module_init()/module_exit() functions (and all
|
||||
initialization functions called _only_ from these)
|
||||
should be marked __init/__exit.
|
||||
|
||||
Pointers to functions marked as __devexit must be created using
|
||||
__devexit_p(function_name). That will generate the function
|
||||
name or NULL if the __devexit function will be discarded.
|
||||
o Do not mark the struct pci_driver.
|
||||
|
||||
o The ID table array should be marked __devinitdata.
|
||||
|
||||
o The probe() and remove() functions should be marked __devinit
|
||||
and __devexit respectively. All initialization functions
|
||||
exclusively called by the probe() routine, can be marked __devinit.
|
||||
Ditto for remove() and __devexit.
|
||||
|
||||
o If mydriver_probe() is marked with __devinit(), then all address
|
||||
references to mydriver_probe must use __devexit_p(mydriver_probe)
|
||||
(in the struct pci_driver declaration for example).
|
||||
__devexit_p() will generate the function name _or_ NULL if the
|
||||
function will be discarded. For an example, see drivers/net/tg3.c.
|
||||
|
||||
o Do NOT mark a function if you are not sure which mark to use.
|
||||
Better to not mark the function than mark the function wrong.
|
||||
|
||||
|
||||
2. How to find PCI devices manually (the old style)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
PCI drivers not using the pci_register_driver() interface search
|
||||
for PCI devices manually using the following constructs:
|
||||
|
||||
2. How to find PCI devices manually
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
PCI drivers should have a really good reason for not using the
|
||||
pci_register_driver() interface to search for PCI devices.
|
||||
The main reason PCI devices are controlled by multiple drivers
|
||||
is because one PCI device implements several different HW services.
|
||||
E.g. combined serial/parallel port/floppy controller.
|
||||
|
||||
A manual search may be performed using the following constructs:
|
||||
|
||||
Searching by vendor and device ID:
|
||||
|
||||
|
@ -150,87 +239,311 @@ Searching by class ID (iterate in a similar way):
|
|||
|
||||
Searching by both vendor/device and subsystem vendor/device ID:
|
||||
|
||||
pci_get_subsys(VENDOR_ID, DEVICE_ID, SUBSYS_VENDOR_ID, SUBSYS_DEVICE_ID, dev).
|
||||
pci_get_subsys(VENDOR_ID,DEVICE_ID, SUBSYS_VENDOR_ID, SUBSYS_DEVICE_ID, dev).
|
||||
|
||||
You can use the constant PCI_ANY_ID as a wildcard replacement for
|
||||
You can use the constant PCI_ANY_ID as a wildcard replacement for
|
||||
VENDOR_ID or DEVICE_ID. This allows searching for any device from a
|
||||
specific vendor, for example.
|
||||
|
||||
These functions are hotplug-safe. They increment the reference count on
|
||||
These functions are hotplug-safe. They increment the reference count on
|
||||
the pci_dev that they return. You must eventually (possibly at module unload)
|
||||
decrement the reference count on these devices by calling pci_dev_put().
|
||||
|
||||
|
||||
3. Enabling and disabling devices
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Before you do anything with the device you've found, you need to enable
|
||||
it by calling pci_enable_device() which enables I/O and memory regions of
|
||||
the device, allocates an IRQ if necessary, assigns missing resources if
|
||||
needed and wakes up the device if it was in suspended state. Please note
|
||||
that this function can fail.
|
||||
|
||||
If you want to use the device in bus mastering mode, call pci_set_master()
|
||||
which enables the bus master bit in PCI_COMMAND register and also fixes
|
||||
the latency timer value if it's set to something bogus by the BIOS.
|
||||
3. Device Initialization Steps
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you want to use the PCI Memory-Write-Invalidate transaction,
|
||||
As noted in the introduction, most PCI drivers need the following steps
|
||||
for device initialization:
|
||||
|
||||
Enable the device
|
||||
Request MMIO/IOP resources
|
||||
Set the DMA mask size (for both coherent and streaming DMA)
|
||||
Allocate and initialize shared control data (pci_allocate_coherent())
|
||||
Access device configuration space (if needed)
|
||||
Register IRQ handler (request_irq())
|
||||
Initialize non-PCI (i.e. LAN/SCSI/etc parts of the chip)
|
||||
Enable DMA/processing engines.
|
||||
|
||||
The driver can access PCI config space registers at any time.
|
||||
(Well, almost. When running BIST, config space can go away...but
|
||||
that will just result in a PCI Bus Master Abort and config reads
|
||||
will return garbage).
|
||||
|
||||
|
||||
3.1 Enable the PCI device
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Before touching any device registers, the driver needs to enable
|
||||
the PCI device by calling pci_enable_device(). This will:
|
||||
o wake up the device if it was in suspended state,
|
||||
o allocate I/O and memory regions of the device (if BIOS did not),
|
||||
o allocate an IRQ (if BIOS did not).
|
||||
|
||||
NOTE: pci_enable_device() can fail! Check the return value.
|
||||
NOTE2: Also see pci_enable_device_bars() below. Drivers can
|
||||
attempt to enable only a subset of BARs they need.
|
||||
|
||||
[ OS BUG: we don't check resource allocations before enabling those
|
||||
resources. The sequence would make more sense if we called
|
||||
pci_request_resources() before calling pci_enable_device().
|
||||
Currently, the device drivers can't detect the bug when when two
|
||||
devices have been allocated the same range. This is not a common
|
||||
problem and unlikely to get fixed soon.
|
||||
|
||||
This has been discussed before but not changed as of 2.6.19:
|
||||
http://lkml.org/lkml/2006/3/2/194
|
||||
]
|
||||
|
||||
pci_set_master() will enable DMA by setting the bus master bit
|
||||
in the PCI_COMMAND register. It also fixes the latency timer value if
|
||||
it's set to something bogus by the BIOS.
|
||||
|
||||
If the PCI device can use the PCI Memory-Write-Invalidate transaction,
|
||||
call pci_set_mwi(). This enables the PCI_COMMAND bit for Mem-Wr-Inval
|
||||
and also ensures that the cache line size register is set correctly.
|
||||
Make sure to check the return value of pci_set_mwi(), not all architectures
|
||||
may support Memory-Write-Invalidate.
|
||||
Check the return value of pci_set_mwi() as not all architectures
|
||||
or chip-sets may support Memory-Write-Invalidate.
|
||||
|
||||
If your driver decides to stop using the device (e.g., there was an
|
||||
error while setting it up or the driver module is being unloaded), it
|
||||
should call pci_disable_device() to deallocate any IRQ resources, disable
|
||||
PCI bus-mastering, etc. You should not do anything with the device after
|
||||
|
||||
3.2 Request MMIO/IOP resources
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Memory (MMIO), and I/O port addresses should NOT be read directly
|
||||
from the PCI device config space. Use the values in the pci_dev structure
|
||||
as the PCI "bus address" might have been remapped to a "host physical"
|
||||
address by the arch/chip-set specific kernel support.
|
||||
|
||||
See Documentation/IO-mapping.txt for how to access device registers
|
||||
or device memory.
|
||||
|
||||
The device driver needs to call pci_request_region() to verify
|
||||
no other device is already using the same address resource.
|
||||
Conversely, drivers should call pci_release_region() AFTER
|
||||
calling pci_disable_device().
|
||||
The idea is to prevent two devices colliding on the same address range.
|
||||
|
||||
4. How to access PCI config space
|
||||
[ See OS BUG comment above. Currently (2.6.19), The driver can only
|
||||
determine MMIO and IO Port resource availability _after_ calling
|
||||
pci_enable_device(). ]
|
||||
|
||||
Generic flavors of pci_request_region() are request_mem_region()
|
||||
(for MMIO ranges) and request_region() (for IO Port ranges).
|
||||
Use these for address resources that are not described by "normal" PCI
|
||||
BARs.
|
||||
|
||||
Also see pci_request_selected_regions() below.
|
||||
|
||||
|
||||
3.3 Set the DMA mask size
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
[ If anything below doesn't make sense, please refer to
|
||||
Documentation/DMA-API.txt. This section is just a reminder that
|
||||
drivers need to indicate DMA capabilities of the device and is not
|
||||
an authoritative source for DMA interfaces. ]
|
||||
|
||||
While all drivers should explicitly indicate the DMA capability
|
||||
(e.g. 32 or 64 bit) of the PCI bus master, devices with more than
|
||||
32-bit bus master capability for streaming data need the driver
|
||||
to "register" this capability by calling pci_set_dma_mask() with
|
||||
appropriate parameters. In general this allows more efficient DMA
|
||||
on systems where System RAM exists above 4G _physical_ address.
|
||||
|
||||
Drivers for all PCI-X and PCIe compliant devices must call
|
||||
pci_set_dma_mask() as they are 64-bit DMA devices.
|
||||
|
||||
Similarly, drivers must also "register" this capability if the device
|
||||
can directly address "consistent memory" in System RAM above 4G physical
|
||||
address by calling pci_set_consistent_dma_mask().
|
||||
Again, this includes drivers for all PCI-X and PCIe compliant devices.
|
||||
Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are
|
||||
64-bit DMA capable for payload ("streaming") data but not control
|
||||
("consistent") data.
|
||||
|
||||
|
||||
3.4 Setup shared control data
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared)
|
||||
memory. See Documentation/DMA-API.txt for a full description of
|
||||
the DMA APIs. This section is just a reminder that it needs to be done
|
||||
before enabling DMA on the device.
|
||||
|
||||
|
||||
3.5 Initialize device registers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Some drivers will need specific "capability" fields programmed
|
||||
or other "vendor specific" register initialized or reset.
|
||||
E.g. clearing pending interrupts.
|
||||
|
||||
|
||||
3.6 Register IRQ handler
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
While calling request_irq() is the the last step described here,
|
||||
this is often just another intermediate step to initialize a device.
|
||||
This step can often be deferred until the device is opened for use.
|
||||
|
||||
All interrupt handlers for IRQ lines should be registered with IRQF_SHARED
|
||||
and use the devid to map IRQs to devices (remember that all PCI IRQ lines
|
||||
can be shared).
|
||||
|
||||
request_irq() will associate an interrupt handler and device handle
|
||||
with an interrupt number. Historically interrupt numbers represent
|
||||
IRQ lines which run from the PCI device to the Interrupt controller.
|
||||
With MSI and MSI-X (more below) the interrupt number is a CPU "vector".
|
||||
|
||||
request_irq() also enables the interrupt. Make sure the device is
|
||||
quiesced and does not have any interrupts pending before registering
|
||||
the interrupt handler.
|
||||
|
||||
MSI and MSI-X are PCI capabilities. Both are "Message Signaled Interrupts"
|
||||
which deliver interrupts to the CPU via a DMA write to a Local APIC.
|
||||
The fundamental difference between MSI and MSI-X is how multiple
|
||||
"vectors" get allocated. MSI requires contiguous blocks of vectors
|
||||
while MSI-X can allocate several individual ones.
|
||||
|
||||
MSI capability can be enabled by calling pci_enable_msi() or
|
||||
pci_enable_msix() before calling request_irq(). This causes
|
||||
the PCI support to program CPU vector data into the PCI device
|
||||
capability registers.
|
||||
|
||||
If your PCI device supports both, try to enable MSI-X first.
|
||||
Only one can be enabled at a time. Many architectures, chip-sets,
|
||||
or BIOSes do NOT support MSI or MSI-X and the call to pci_enable_msi/msix
|
||||
will fail. This is important to note since many drivers have
|
||||
two (or more) interrupt handlers: one for MSI/MSI-X and another for IRQs.
|
||||
They choose which handler to register with request_irq() based on the
|
||||
return value from pci_enable_msi/msix().
|
||||
|
||||
There are (at least) two really good reasons for using MSI:
|
||||
1) MSI is an exclusive interrupt vector by definition.
|
||||
This means the interrupt handler doesn't have to verify
|
||||
its device caused the interrupt.
|
||||
|
||||
2) MSI avoids DMA/IRQ race conditions. DMA to host memory is guaranteed
|
||||
to be visible to the host CPU(s) when the MSI is delivered. This
|
||||
is important for both data coherency and avoiding stale control data.
|
||||
This guarantee allows the driver to omit MMIO reads to flush
|
||||
the DMA stream.
|
||||
|
||||
See drivers/infiniband/hw/mthca/ or drivers/net/tg3.c for examples
|
||||
of MSI/MSI-X usage.
|
||||
|
||||
|
||||
|
||||
4. PCI device shutdown
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
When a PCI device driver is being unloaded, most of the following
|
||||
steps need to be performed:
|
||||
|
||||
Disable the device from generating IRQs
|
||||
Release the IRQ (free_irq())
|
||||
Stop all DMA activity
|
||||
Release DMA buffers (both streaming and consistent)
|
||||
Unregister from other subsystems (e.g. scsi or netdev)
|
||||
Disable device from responding to MMIO/IO Port addresses
|
||||
Release MMIO/IO Port resource(s)
|
||||
|
||||
|
||||
4.1 Stop IRQs on the device
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
How to do this is chip/device specific. If it's not done, it opens
|
||||
the possibility of a "screaming interrupt" if (and only if)
|
||||
the IRQ is shared with another device.
|
||||
|
||||
When the shared IRQ handler is "unhooked", the remaining devices
|
||||
using the same IRQ line will still need the IRQ enabled. Thus if the
|
||||
"unhooked" device asserts IRQ line, the system will respond assuming
|
||||
it was one of the remaining devices asserted the IRQ line. Since none
|
||||
of the other devices will handle the IRQ, the system will "hang" until
|
||||
it decides the IRQ isn't going to get handled and masks the IRQ (100,000
|
||||
iterations later). Once the shared IRQ is masked, the remaining devices
|
||||
will stop functioning properly. Not a nice situation.
|
||||
|
||||
This is another reason to use MSI or MSI-X if it's available.
|
||||
MSI and MSI-X are defined to be exclusive interrupts and thus
|
||||
are not susceptible to the "screaming interrupt" problem.
|
||||
|
||||
|
||||
4.2 Release the IRQ
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
Once the device is quiesced (no more IRQs), one can call free_irq().
|
||||
This function will return control once any pending IRQs are handled,
|
||||
"unhook" the drivers IRQ handler from that IRQ, and finally release
|
||||
the IRQ if no one else is using it.
|
||||
|
||||
|
||||
4.3 Stop all DMA activity
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
It's extremely important to stop all DMA operations BEFORE attempting
|
||||
to deallocate DMA control data. Failure to do so can result in memory
|
||||
corruption, hangs, and on some chip-sets a hard crash.
|
||||
|
||||
Stopping DMA after stopping the IRQs can avoid races where the
|
||||
IRQ handler might restart DMA engines.
|
||||
|
||||
While this step sounds obvious and trivial, several "mature" drivers
|
||||
didn't get this step right in the past.
|
||||
|
||||
|
||||
4.4 Release DMA buffers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Once DMA is stopped, clean up streaming DMA first.
|
||||
I.e. unmap data buffers and return buffers to "upstream"
|
||||
owners if there is one.
|
||||
|
||||
Then clean up "consistent" buffers which contain the control data.
|
||||
|
||||
See Documentation/DMA-API.txt for details on unmapping interfaces.
|
||||
|
||||
|
||||
4.5 Unregister from other subsystems
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Most low level PCI device drivers support some other subsystem
|
||||
like USB, ALSA, SCSI, NetDev, Infiniband, etc. Make sure your
|
||||
driver isn't losing resources from that other subsystem.
|
||||
If this happens, typically the symptom is an Oops (panic) when
|
||||
the subsystem attempts to call into a driver that has been unloaded.
|
||||
|
||||
|
||||
4.6 Disable Device from responding to MMIO/IO Port addresses
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
io_unmap() MMIO or IO Port resources and then call pci_disable_device().
|
||||
This is the symmetric opposite of pci_enable_device().
|
||||
Do not access device registers after calling pci_disable_device().
|
||||
|
||||
|
||||
4.7 Release MMIO/IO Port Resource(s)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Call pci_release_region() to mark the MMIO or IO Port range as available.
|
||||
Failure to do so usually results in the inability to reload the driver.
|
||||
|
||||
|
||||
|
||||
5. How to access PCI config space
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
You can use pci_(read|write)_config_(byte|word|dword) to access the config
|
||||
|
||||
You can use pci_(read|write)_config_(byte|word|dword) to access the config
|
||||
space of a device represented by struct pci_dev *. All these functions return 0
|
||||
when successful or an error code (PCIBIOS_...) which can be translated to a text
|
||||
string by pcibios_strerror. Most drivers expect that accesses to valid PCI
|
||||
devices don't fail.
|
||||
|
||||
If you don't have a struct pci_dev available, you can call
|
||||
If you don't have a struct pci_dev available, you can call
|
||||
pci_bus_(read|write)_config_(byte|word|dword) to access a given device
|
||||
and function on that bus.
|
||||
|
||||
If you access fields in the standard portion of the config header, please
|
||||
If you access fields in the standard portion of the config header, please
|
||||
use symbolic names of locations and bits declared in <linux/pci.h>.
|
||||
|
||||
If you need to access Extended PCI Capability registers, just call
|
||||
If you need to access Extended PCI Capability registers, just call
|
||||
pci_find_capability() for the particular capability and it will find the
|
||||
corresponding register block for you.
|
||||
|
||||
|
||||
5. Addresses and interrupts
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Memory and port addresses and interrupt numbers should NOT be read from the
|
||||
config space. You should use the values in the pci_dev structure as they might
|
||||
have been remapped by the kernel.
|
||||
|
||||
See Documentation/IO-mapping.txt for how to access device memory.
|
||||
|
||||
The device driver needs to call pci_request_region() to make sure
|
||||
no other device is already using the same resource. The driver is expected
|
||||
to determine MMIO and IO Port resource availability _before_ calling
|
||||
pci_enable_device(). Conversely, drivers should call pci_release_region()
|
||||
_after_ calling pci_disable_device(). The idea is to prevent two devices
|
||||
colliding on the same address range.
|
||||
|
||||
Generic flavors of pci_request_region() are request_mem_region()
|
||||
(for MMIO ranges) and request_region() (for IO Port ranges).
|
||||
Use these for address resources that are not described by "normal" PCI
|
||||
interfaces (e.g. BAR).
|
||||
|
||||
All interrupt handlers should be registered with IRQF_SHARED and use the devid
|
||||
to map IRQs to devices (remember that all PCI interrupts are shared).
|
||||
|
||||
|
||||
6. Other interesting functions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
pci_find_slot() Find pci_dev corresponding to given bus and
|
||||
slot numbers.
|
||||
pci_set_power_state() Set PCI Power Management state (0=D0 ... 3=D3)
|
||||
|
@ -247,11 +560,12 @@ pci_set_mwi() Enable Memory-Write-Invalidate transactions.
|
|||
pci_clear_mwi() Disable Memory-Write-Invalidate transactions.
|
||||
|
||||
|
||||
|
||||
7. Miscellaneous hints
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
When displaying PCI slot names to the user (for example when a driver wants
|
||||
to tell the user what card has it found), please use pci_name(pci_dev)
|
||||
for this purpose.
|
||||
|
||||
When displaying PCI device names to the user (for example when a driver wants
|
||||
to tell the user what card has it found), please use pci_name(pci_dev).
|
||||
|
||||
Always refer to the PCI devices by a pointer to the pci_dev structure.
|
||||
All PCI layer functions use this identification and it's the only
|
||||
|
@ -259,31 +573,113 @@ reasonable one. Don't use bus/slot/function numbers except for very
|
|||
special purposes -- on systems with multiple primary buses their semantics
|
||||
can be pretty complex.
|
||||
|
||||
If you're going to use PCI bus mastering DMA, take a look at
|
||||
Documentation/DMA-mapping.txt.
|
||||
|
||||
Don't try to turn on Fast Back to Back writes in your driver. All devices
|
||||
on the bus need to be capable of doing it, so this is something which needs
|
||||
to be handled by platform and generic code, not individual drivers.
|
||||
|
||||
|
||||
|
||||
8. Vendor and device identifications
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
For the future, let's avoid adding device ids to include/linux/pci_ids.h.
|
||||
|
||||
PCI_VENDOR_ID_xxx for vendors, and a hex constant for device ids.
|
||||
One is not not required to add new device ids to include/linux/pci_ids.h.
|
||||
Please add PCI_VENDOR_ID_xxx for vendors and a hex constant for device ids.
|
||||
|
||||
PCI_VENDOR_ID_xxx constants are re-used. The device ids are arbitrary
|
||||
hex numbers (vendor controlled) and normally used only in a single
|
||||
location, the pci_device_id table.
|
||||
|
||||
Please DO submit new vendor/device ids to pciids.sourceforge.net project.
|
||||
|
||||
|
||||
Rationale: PCI_VENDOR_ID_xxx constants are re-used, but device ids are not.
|
||||
Further, device ids are arbitrary hex numbers, normally used only in a
|
||||
single location, the pci_device_id table.
|
||||
|
||||
9. Obsolete functions
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are several functions which you might come across when trying to
|
||||
port an old driver to the new PCI interface. They are no longer present
|
||||
in the kernel as they aren't compatible with hotplug or PCI domains or
|
||||
having sane locking.
|
||||
|
||||
pci_find_device() Superseded by pci_get_device()
|
||||
pci_find_subsys() Superseded by pci_get_subsys()
|
||||
pci_find_slot() Superseded by pci_get_slot()
|
||||
pci_find_device() Superseded by pci_get_device()
|
||||
pci_find_subsys() Superseded by pci_get_subsys()
|
||||
pci_find_slot() Superseded by pci_get_slot()
|
||||
|
||||
|
||||
The alternative is the traditional PCI device driver that walks PCI
|
||||
device lists. This is still possible but discouraged.
|
||||
|
||||
|
||||
|
||||
10. pci_enable_device_bars() and Legacy I/O Port space
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Large servers may not be able to provide I/O port resources to all PCI
|
||||
devices. I/O Port space is only 64KB on Intel Architecture[1] and is
|
||||
likely also fragmented since the I/O base register of PCI-to-PCI
|
||||
bridge will usually be aligned to a 4KB boundary[2]. On such systems,
|
||||
pci_enable_device() and pci_request_region() will fail when
|
||||
attempting to enable I/O Port regions that don't have I/O Port
|
||||
resources assigned.
|
||||
|
||||
Fortunately, many PCI devices which request I/O Port resources also
|
||||
provide access to the same registers via MMIO BARs. These devices can
|
||||
be handled without using I/O port space and the drivers typically
|
||||
offer a CONFIG_ option to only use MMIO regions
|
||||
(e.g. CONFIG_TULIP_MMIO). PCI devices typically provide I/O port
|
||||
interface for legacy OSes and will work when I/O port resources are not
|
||||
assigned. The "PCI Local Bus Specification Revision 3.0" discusses
|
||||
this on p.44, "IMPLEMENTATION NOTE".
|
||||
|
||||
If your PCI device driver doesn't need I/O port resources assigned to
|
||||
I/O Port BARs, you should use pci_enable_device_bars() instead of
|
||||
pci_enable_device() in order not to enable I/O port regions for the
|
||||
corresponding devices. In addition, you should use
|
||||
pci_request_selected_regions() and pci_release_selected_regions()
|
||||
instead of pci_request_regions()/pci_release_regions() in order not to
|
||||
request/release I/O port regions for the corresponding devices.
|
||||
|
||||
[1] Some systems support 64KB I/O port space per PCI segment.
|
||||
[2] Some PCI-to-PCI bridges support optional 1KB aligned I/O base.
|
||||
|
||||
|
||||
|
||||
11. MMIO Space and "Write Posting"
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Converting a driver from using I/O Port space to using MMIO space
|
||||
often requires some additional changes. Specifically, "write posting"
|
||||
needs to be handled. Many drivers (e.g. tg3, acenic, sym53c8xx_2)
|
||||
already do this. I/O Port space guarantees write transactions reach the PCI
|
||||
device before the CPU can continue. Writes to MMIO space allow the CPU
|
||||
to continue before the transaction reaches the PCI device. HW weenies
|
||||
call this "Write Posting" because the write completion is "posted" to
|
||||
the CPU before the transaction has reached its destination.
|
||||
|
||||
Thus, timing sensitive code should add readl() where the CPU is
|
||||
expected to wait before doing other work. The classic "bit banging"
|
||||
sequence works fine for I/O Port space:
|
||||
|
||||
for (i = 8; --i; val >>= 1) {
|
||||
outb(val & 1, ioport_reg); /* write bit */
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
The same sequence for MMIO space should be:
|
||||
|
||||
for (i = 8; --i; val >>= 1) {
|
||||
writeb(val & 1, mmio_reg); /* write bit */
|
||||
readb(safe_mmio_reg); /* flush posted write */
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
It is important that "safe_mmio_reg" not have any side effects that
|
||||
interferes with the correct operation of the device.
|
||||
|
||||
Another case to watch out for is when resetting a PCI device. Use PCI
|
||||
Configuration space reads to flush the writel(). This will gracefully
|
||||
handle the PCI master abort on all platforms if the PCI device is
|
||||
expected to not respond to a readl(). Most x86 platforms will allow
|
||||
MMIO reads to master abort (a.k.a. "Soft Fail") and return garbage
|
||||
(e.g. ~0). But many RISC platforms will crash (a.k.a."Hard Fail").
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ difficult to maintain, add yourself with a patch if desired.
|
|||
Bill Ryder <bryder@sgi.com>
|
||||
Thomas Sailer <sailer@ife.ee.ethz.ch>
|
||||
Gregory P. Smith <greg@electricrain.com>
|
||||
Linus Torvalds <torvalds@osdl.org>
|
||||
Linus Torvalds <torvalds@linux-foundation.org>
|
||||
Roman Weissgaerber <weissg@vienna.at>
|
||||
<Kazuki.Yasumatsu@fujixerox.co.jp>
|
||||
|
||||
|
|
|
@ -1254,7 +1254,7 @@ S: Maintained
|
|||
|
||||
ETHERNET BRIDGE
|
||||
P: Stephen Hemminger
|
||||
M: shemminger@osdl.org
|
||||
M: shemminger@linux-foundation.org
|
||||
L: bridge@osdl.org
|
||||
W: http://bridge.sourceforge.net/
|
||||
S: Maintained
|
||||
|
@ -2277,7 +2277,7 @@ S: Maintained
|
|||
|
||||
NETEM NETWORK EMULATOR
|
||||
P: Stephen Hemminger
|
||||
M: shemminger@osdl.org
|
||||
M: shemminger@linux-foundation.org
|
||||
L: netem@osdl.org
|
||||
S: Maintained
|
||||
|
||||
|
@ -3081,7 +3081,7 @@ S: Maintained
|
|||
|
||||
SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
|
||||
P: Stephen Hemminger
|
||||
M: shemminger@osdl.org
|
||||
M: shemminger@linux-foundation.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
|
|
4
README
4
README
|
@ -278,8 +278,8 @@ IF SOMETHING GOES WRONG:
|
|||
the file MAINTAINERS to see if there is a particular person associated
|
||||
with the part of the kernel that you are having trouble with. If there
|
||||
isn't anyone listed there, then the second best thing is to mail
|
||||
them to me (torvalds@osdl.org), and possibly to any other relevant
|
||||
mailing-list or to the newsgroup.
|
||||
them to me (torvalds@linux-foundation.org), and possibly to any other
|
||||
relevant mailing-list or to the newsgroup.
|
||||
|
||||
- In all bug-reports, *please* tell what kernel you are talking about,
|
||||
how to duplicate the problem, and what your setup is (use your common
|
||||
|
|
|
@ -710,11 +710,8 @@ __cpuinit int init_gdt(int cpu, struct task_struct *idle)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* Common CPU init for both boot and secondary CPUs */
|
||||
static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
|
||||
void __cpuinit cpu_set_gdt(int cpu)
|
||||
{
|
||||
struct tss_struct * t = &per_cpu(init_tss, cpu);
|
||||
struct thread_struct *thread = &curr->thread;
|
||||
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
|
||||
|
||||
/* Reinit these anyway, even if they've already been done (on
|
||||
|
@ -722,6 +719,13 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
|
|||
the real ones). */
|
||||
load_gdt(cpu_gdt_descr);
|
||||
set_kernel_gs();
|
||||
}
|
||||
|
||||
/* Common CPU init for both boot and secondary CPUs */
|
||||
static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
|
||||
{
|
||||
struct tss_struct * t = &per_cpu(init_tss, cpu);
|
||||
struct thread_struct *thread = &curr->thread;
|
||||
|
||||
if (cpu_test_and_set(cpu, cpu_initialized)) {
|
||||
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
|
||||
|
@ -807,6 +811,7 @@ void __cpuinit cpu_init(void)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
cpu_set_gdt(cpu);
|
||||
_cpu_init(cpu, curr);
|
||||
}
|
||||
|
||||
|
|
|
@ -310,13 +310,7 @@ static int __init setup_nmi_watchdog(char *str)
|
|||
|
||||
if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
|
||||
return 0;
|
||||
/*
|
||||
* If any other x86 CPU has a local APIC, then
|
||||
* please test the NMI stuff there and send me the
|
||||
* missing bits. Right now Intel P6/P4 and AMD K7 only.
|
||||
*/
|
||||
if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
|
||||
return 0; /* no lapic support */
|
||||
|
||||
nmi_watchdog = nmi;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -566,4 +566,11 @@ struct paravirt_ops paravirt_ops = {
|
|||
.irq_enable_sysexit = native_irq_enable_sysexit,
|
||||
.iret = native_iret,
|
||||
};
|
||||
EXPORT_SYMBOL(paravirt_ops);
|
||||
|
||||
/*
|
||||
* NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops
|
||||
* semantics are subject to change. Hence we only do this
|
||||
* internal-only export of this, until it gets sorted out and
|
||||
* all lowlevel CPU ops used by modules are separately exported.
|
||||
*/
|
||||
EXPORT_SYMBOL_GPL(paravirt_ops);
|
||||
|
|
|
@ -595,6 +595,12 @@ static void __cpuinit start_secondary(void *unused)
|
|||
*/
|
||||
void __devinit initialize_secondary(void)
|
||||
{
|
||||
/*
|
||||
* switch to the per CPU GDT we already set up
|
||||
* in do_boot_cpu()
|
||||
*/
|
||||
cpu_set_gdt(current_thread_info()->cpu);
|
||||
|
||||
/*
|
||||
* We don't actually need to load the full TSS,
|
||||
* basically just the stack pointer and the eip.
|
||||
|
@ -972,9 +978,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
|||
/* Stack for startup_32 can be just as for start_secondary onwards */
|
||||
stack_start.esp = (void *) idle->thread.esp;
|
||||
|
||||
start_pda = cpu_pda(cpu);
|
||||
cpu_gdt_descr = per_cpu(cpu_gdt_descr, cpu);
|
||||
|
||||
irq_ctx_init(cpu);
|
||||
|
||||
x86_cpu_to_apicid[cpu] = apicid;
|
||||
|
|
|
@ -772,6 +772,12 @@ initialize_secondary(void)
|
|||
set_current(hard_get_current());
|
||||
#endif
|
||||
|
||||
/*
|
||||
* switch to the per CPU GDT we already set up
|
||||
* in do_boot_cpu()
|
||||
*/
|
||||
cpu_set_gdt(current_thread_info()->cpu);
|
||||
|
||||
/*
|
||||
* We don't actually need to load the full TSS,
|
||||
* basically just the stack pointer and the eip.
|
||||
|
|
|
@ -1568,6 +1568,20 @@ config MIPS_MT_FPAFF
|
|||
depends on MIPS_MT
|
||||
default y
|
||||
|
||||
config MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
bool "Low-latency Dispatch of Deferred SMTC IPIs"
|
||||
depends on MIPS_MT_SMTC
|
||||
default y
|
||||
help
|
||||
SMTC pseudo-interrupts between TCs are deferred and queued
|
||||
if the target TC is interrupt-inhibited (IXMT). In the first
|
||||
SMTC prototypes, these queued IPIs were serviced on return
|
||||
to user mode, or on entry into the kernel idle loop. The
|
||||
INSTANT_REPLAY option dispatches them as part of local_irq_restore()
|
||||
processing, which adds runtime overhead (hence the option to turn
|
||||
it off), but ensures that IPIs are handled promptly even under
|
||||
heavy I/O interrupt load.
|
||||
|
||||
config MIPS_VPE_LOADER_TOM
|
||||
bool "Load VPE program into memory hidden from linux"
|
||||
depends on MIPS_VPE_LOADER
|
||||
|
|
|
@ -1017,6 +1017,33 @@ void setup_cross_vpe_interrupts(void)
|
|||
* SMTC-specific hacks invoked from elsewhere in the kernel.
|
||||
*/
|
||||
|
||||
void smtc_ipi_replay(void)
|
||||
{
|
||||
/*
|
||||
* To the extent that we've ever turned interrupts off,
|
||||
* we may have accumulated deferred IPIs. This is subtle.
|
||||
* If we use the smtc_ipi_qdepth() macro, we'll get an
|
||||
* exact number - but we'll also disable interrupts
|
||||
* and create a window of failure where a new IPI gets
|
||||
* queued after we test the depth but before we re-enable
|
||||
* interrupts. So long as IXMT never gets set, however,
|
||||
* we should be OK: If we pick up something and dispatch
|
||||
* it here, that's great. If we see nothing, but concurrent
|
||||
* with this operation, another TC sends us an IPI, IXMT
|
||||
* is clear, and we'll handle it as a real pseudo-interrupt
|
||||
* and not a pseudo-pseudo interrupt.
|
||||
*/
|
||||
if (IPIQ[smp_processor_id()].depth > 0) {
|
||||
struct smtc_ipi *pipi;
|
||||
extern void self_ipi(struct smtc_ipi *);
|
||||
|
||||
while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
|
||||
self_ipi(pipi);
|
||||
smtc_cpu_stats[smp_processor_id()].selfipis++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void smtc_idle_loop_hook(void)
|
||||
{
|
||||
#ifdef SMTC_IDLE_HOOK_DEBUG
|
||||
|
@ -1113,29 +1140,14 @@ void smtc_idle_loop_hook(void)
|
|||
if (pdb_msg != &id_ho_db_msg[0])
|
||||
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
|
||||
#endif /* SMTC_IDLE_HOOK_DEBUG */
|
||||
/*
|
||||
* To the extent that we've ever turned interrupts off,
|
||||
* we may have accumulated deferred IPIs. This is subtle.
|
||||
* If we use the smtc_ipi_qdepth() macro, we'll get an
|
||||
* exact number - but we'll also disable interrupts
|
||||
* and create a window of failure where a new IPI gets
|
||||
* queued after we test the depth but before we re-enable
|
||||
* interrupts. So long as IXMT never gets set, however,
|
||||
* we should be OK: If we pick up something and dispatch
|
||||
* it here, that's great. If we see nothing, but concurrent
|
||||
* with this operation, another TC sends us an IPI, IXMT
|
||||
* is clear, and we'll handle it as a real pseudo-interrupt
|
||||
* and not a pseudo-pseudo interrupt.
|
||||
*/
|
||||
if (IPIQ[smp_processor_id()].depth > 0) {
|
||||
struct smtc_ipi *pipi;
|
||||
extern void self_ipi(struct smtc_ipi *);
|
||||
|
||||
if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
|
||||
self_ipi(pipi);
|
||||
smtc_cpu_stats[smp_processor_id()].selfipis++;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Replay any accumulated deferred IPIs. If "Instant Replay"
|
||||
* is in use, there should never be any.
|
||||
*/
|
||||
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
smtc_ipi_replay();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
|
||||
}
|
||||
|
||||
void smtc_soft_dump(void)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Interrupt handing routines for NEC VR4100 series.
|
||||
*
|
||||
* Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
|
||||
* Copyright (C) 2005-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -73,13 +73,19 @@ static void irq_dispatch(unsigned int irq)
|
|||
if (cascade->get_irq != NULL) {
|
||||
unsigned int source_irq = irq;
|
||||
desc = irq_desc + source_irq;
|
||||
desc->chip->ack(source_irq);
|
||||
if (desc->chip->mask_ack)
|
||||
desc->chip->mask_ack(source_irq);
|
||||
else {
|
||||
desc->chip->mask(source_irq);
|
||||
desc->chip->ack(source_irq);
|
||||
}
|
||||
irq = cascade->get_irq(irq);
|
||||
if (irq < 0)
|
||||
atomic_inc(&irq_err_count);
|
||||
else
|
||||
irq_dispatch(irq);
|
||||
desc->chip->end(source_irq);
|
||||
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
|
||||
desc->chip->unmask(source_irq);
|
||||
} else
|
||||
do_IRQ(irq);
|
||||
}
|
||||
|
|
|
@ -358,13 +358,12 @@ ev64360_setup_mtd(void)
|
|||
|
||||
ptbl_entries = 3;
|
||||
|
||||
if ((ptbl = kmalloc(ptbl_entries * sizeof(struct mtd_partition),
|
||||
if ((ptbl = kzalloc(ptbl_entries * sizeof(struct mtd_partition),
|
||||
GFP_KERNEL)) == NULL) {
|
||||
|
||||
printk(KERN_WARNING "Can't alloc MTD partition table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(ptbl, 0, ptbl_entries * sizeof(struct mtd_partition));
|
||||
|
||||
ptbl[0].name = "reserved";
|
||||
ptbl[0].offset = 0;
|
||||
|
|
|
@ -302,8 +302,6 @@ int __init setup_nmi_watchdog(char *str)
|
|||
if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
|
||||
return 0;
|
||||
|
||||
if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
|
||||
return 0; /* no lapic support */
|
||||
nmi_watchdog = nmi;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -590,6 +590,12 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
|
|||
*/
|
||||
rq->cmd_flags |= REQ_SOFTBARRIER;
|
||||
|
||||
/*
|
||||
* Most requeues happen because of a busy condition,
|
||||
* don't force unplug of the queue for that case.
|
||||
*/
|
||||
unplug_it = 0;
|
||||
|
||||
if (q->ordseq == 0) {
|
||||
list_add(&rq->queuelist, &q->queue_head);
|
||||
break;
|
||||
|
@ -604,11 +610,6 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
|
|||
}
|
||||
|
||||
list_add_tail(&rq->queuelist, pos);
|
||||
/*
|
||||
* most requeues happen because of a busy condition, don't
|
||||
* force unplug of the queue for that case.
|
||||
*/
|
||||
unplug_it = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -1677,8 +1677,6 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
|
|||
struct acpi_video_device *video_device = data;
|
||||
struct acpi_device *device = NULL;
|
||||
|
||||
|
||||
printk("video device notify\n");
|
||||
if (!video_device)
|
||||
return;
|
||||
|
||||
|
|
|
@ -1845,7 +1845,7 @@ static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
|
|||
|
||||
/********** initialise a card **********/
|
||||
|
||||
static int __init hrz_init (hrz_dev * dev) {
|
||||
static int __devinit hrz_init (hrz_dev * dev) {
|
||||
int onefivefive;
|
||||
|
||||
u16 chan;
|
||||
|
|
|
@ -186,6 +186,7 @@ static int got_event; /* if events processing have been done */
|
|||
static void switchover_timeout(unsigned long data);
|
||||
static struct timer_list switchover_timer =
|
||||
TIMER_INITIALIZER(switchover_timeout , 0, 0);
|
||||
static unsigned long tlclk_timer_data;
|
||||
|
||||
static struct tlclk_alarms *alarm_events;
|
||||
|
||||
|
@ -197,10 +198,19 @@ static irqreturn_t tlclk_interrupt(int irq, void *dev_id);
|
|||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wq);
|
||||
|
||||
static unsigned long useflags;
|
||||
static DEFINE_MUTEX(tlclk_mutex);
|
||||
|
||||
static int tlclk_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int result;
|
||||
|
||||
if (test_and_set_bit(0, &useflags))
|
||||
return -EBUSY;
|
||||
/* this legacy device is always one per system and it doesn't
|
||||
* know how to handle multiple concurrent clients.
|
||||
*/
|
||||
|
||||
/* Make sure there is no interrupt pending while
|
||||
* initialising interrupt handler */
|
||||
inb(TLCLK_REG6);
|
||||
|
@ -221,6 +231,7 @@ static int tlclk_open(struct inode *inode, struct file *filp)
|
|||
static int tlclk_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
free_irq(telclk_interrupt, tlclk_interrupt);
|
||||
clear_bit(0, &useflags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -230,26 +241,25 @@ static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
|
|||
{
|
||||
if (count < sizeof(struct tlclk_alarms))
|
||||
return -EIO;
|
||||
if (mutex_lock_interruptible(&tlclk_mutex))
|
||||
return -EINTR;
|
||||
|
||||
|
||||
wait_event_interruptible(wq, got_event);
|
||||
if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms)))
|
||||
if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) {
|
||||
mutex_unlock(&tlclk_mutex);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
memset(alarm_events, 0, sizeof(struct tlclk_alarms));
|
||||
got_event = 0;
|
||||
|
||||
mutex_unlock(&tlclk_mutex);
|
||||
return sizeof(struct tlclk_alarms);
|
||||
}
|
||||
|
||||
static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
|
||||
loff_t *f_pos)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations tlclk_fops = {
|
||||
.read = tlclk_read,
|
||||
.write = tlclk_write,
|
||||
.open = tlclk_open,
|
||||
.release = tlclk_release,
|
||||
|
||||
|
@ -540,7 +550,7 @@ static ssize_t store_select_amcb1_transmit_clock(struct device *d,
|
|||
SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
|
||||
switch (val) {
|
||||
case CLK_8_592MHz:
|
||||
SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
|
||||
SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
|
||||
break;
|
||||
case CLK_11_184MHz:
|
||||
SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
|
||||
|
@ -549,7 +559,7 @@ static ssize_t store_select_amcb1_transmit_clock(struct device *d,
|
|||
SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
|
||||
break;
|
||||
case CLK_44_736MHz:
|
||||
SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
|
||||
SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
|
||||
break;
|
||||
}
|
||||
} else
|
||||
|
@ -839,11 +849,13 @@ static void __exit tlclk_cleanup(void)
|
|||
|
||||
static void switchover_timeout(unsigned long data)
|
||||
{
|
||||
if ((data & 1)) {
|
||||
if ((inb(TLCLK_REG1) & 0x08) != (data & 0x08))
|
||||
unsigned long flags = *(unsigned long *) data;
|
||||
|
||||
if ((flags & 1)) {
|
||||
if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
|
||||
alarm_events->switchover_primary++;
|
||||
} else {
|
||||
if ((inb(TLCLK_REG1) & 0x08) != (data & 0x08))
|
||||
if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
|
||||
alarm_events->switchover_secondary++;
|
||||
}
|
||||
|
||||
|
@ -901,8 +913,9 @@ static irqreturn_t tlclk_interrupt(int irq, void *dev_id)
|
|||
|
||||
/* TIMEOUT in ~10ms */
|
||||
switchover_timer.expires = jiffies + msecs_to_jiffies(10);
|
||||
switchover_timer.data = inb(TLCLK_REG1);
|
||||
add_timer(&switchover_timer);
|
||||
tlclk_timer_data = inb(TLCLK_REG1);
|
||||
switchover_timer.data = (unsigned long) &tlclk_timer_data;
|
||||
mod_timer(&switchover_timer, switchover_timer.expires);
|
||||
} else {
|
||||
got_event = 1;
|
||||
wake_up(&wq);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) 2002 MontaVista Software Inc.
|
||||
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
|
||||
* Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
|
||||
* Copyright (C) 2003-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -125,30 +125,17 @@ static inline uint16_t giu_clear(uint16_t offset, uint16_t clear)
|
|||
return data;
|
||||
}
|
||||
|
||||
static unsigned int startup_giuint_low_irq(unsigned int irq)
|
||||
static void ack_giuint_low(unsigned int irq)
|
||||
{
|
||||
unsigned int pin;
|
||||
|
||||
pin = GPIO_PIN_OF_IRQ(irq);
|
||||
giu_write(GIUINTSTATL, 1 << pin);
|
||||
giu_set(GIUINTENL, 1 << pin);
|
||||
|
||||
return 0;
|
||||
giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
|
||||
}
|
||||
|
||||
static void shutdown_giuint_low_irq(unsigned int irq)
|
||||
static void mask_giuint_low(unsigned int irq)
|
||||
{
|
||||
giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
|
||||
}
|
||||
|
||||
static void enable_giuint_low_irq(unsigned int irq)
|
||||
{
|
||||
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
|
||||
}
|
||||
|
||||
#define disable_giuint_low_irq shutdown_giuint_low_irq
|
||||
|
||||
static void ack_giuint_low_irq(unsigned int irq)
|
||||
static void mask_ack_giuint_low(unsigned int irq)
|
||||
{
|
||||
unsigned int pin;
|
||||
|
||||
|
@ -157,46 +144,30 @@ static void ack_giuint_low_irq(unsigned int irq)
|
|||
giu_write(GIUINTSTATL, 1 << pin);
|
||||
}
|
||||
|
||||
static void end_giuint_low_irq(unsigned int irq)
|
||||
static void unmask_giuint_low(unsigned int irq)
|
||||
{
|
||||
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
|
||||
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type giuint_low_irq_type = {
|
||||
.typename = "GIUINTL",
|
||||
.startup = startup_giuint_low_irq,
|
||||
.shutdown = shutdown_giuint_low_irq,
|
||||
.enable = enable_giuint_low_irq,
|
||||
.disable = disable_giuint_low_irq,
|
||||
.ack = ack_giuint_low_irq,
|
||||
.end = end_giuint_low_irq,
|
||||
static struct irq_chip giuint_low_irq_chip = {
|
||||
.name = "GIUINTL",
|
||||
.ack = ack_giuint_low,
|
||||
.mask = mask_giuint_low,
|
||||
.mask_ack = mask_ack_giuint_low,
|
||||
.unmask = unmask_giuint_low,
|
||||
};
|
||||
|
||||
static unsigned int startup_giuint_high_irq(unsigned int irq)
|
||||
static void ack_giuint_high(unsigned int irq)
|
||||
{
|
||||
unsigned int pin;
|
||||
|
||||
pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
|
||||
giu_write(GIUINTSTATH, 1 << pin);
|
||||
giu_set(GIUINTENH, 1 << pin);
|
||||
|
||||
return 0;
|
||||
giu_write(GIUINTSTATH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
|
||||
}
|
||||
|
||||
static void shutdown_giuint_high_irq(unsigned int irq)
|
||||
static void mask_giuint_high(unsigned int irq)
|
||||
{
|
||||
giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
|
||||
}
|
||||
|
||||
static void enable_giuint_high_irq(unsigned int irq)
|
||||
{
|
||||
giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
|
||||
}
|
||||
|
||||
#define disable_giuint_high_irq shutdown_giuint_high_irq
|
||||
|
||||
static void ack_giuint_high_irq(unsigned int irq)
|
||||
static void mask_ack_giuint_high(unsigned int irq)
|
||||
{
|
||||
unsigned int pin;
|
||||
|
||||
|
@ -205,20 +176,17 @@ static void ack_giuint_high_irq(unsigned int irq)
|
|||
giu_write(GIUINTSTATH, 1 << pin);
|
||||
}
|
||||
|
||||
static void end_giuint_high_irq(unsigned int irq)
|
||||
static void unmask_giuint_high(unsigned int irq)
|
||||
{
|
||||
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
|
||||
giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type giuint_high_irq_type = {
|
||||
.typename = "GIUINTH",
|
||||
.startup = startup_giuint_high_irq,
|
||||
.shutdown = shutdown_giuint_high_irq,
|
||||
.enable = enable_giuint_high_irq,
|
||||
.disable = disable_giuint_high_irq,
|
||||
.ack = ack_giuint_high_irq,
|
||||
.end = end_giuint_high_irq,
|
||||
static struct irq_chip giuint_high_irq_chip = {
|
||||
.name = "GIUINTH",
|
||||
.ack = ack_giuint_high,
|
||||
.mask = mask_giuint_high,
|
||||
.mask_ack = mask_ack_giuint_high,
|
||||
.unmask = unmask_giuint_high,
|
||||
};
|
||||
|
||||
static int giu_get_irq(unsigned int irq)
|
||||
|
@ -282,9 +250,15 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
|
|||
break;
|
||||
}
|
||||
}
|
||||
set_irq_chip_and_handler(GIU_IRQ(pin),
|
||||
&giuint_low_irq_chip,
|
||||
handle_edge_irq);
|
||||
} else {
|
||||
giu_clear(GIUINTTYPL, mask);
|
||||
giu_clear(GIUINTHTSELL, mask);
|
||||
set_irq_chip_and_handler(GIU_IRQ(pin),
|
||||
&giuint_low_irq_chip,
|
||||
handle_level_irq);
|
||||
}
|
||||
giu_write(GIUINTSTATL, mask);
|
||||
} else if (pin < GIUINT_HIGH_MAX) {
|
||||
|
@ -311,9 +285,15 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
|
|||
break;
|
||||
}
|
||||
}
|
||||
set_irq_chip_and_handler(GIU_IRQ(pin),
|
||||
&giuint_high_irq_chip,
|
||||
handle_edge_irq);
|
||||
} else {
|
||||
giu_clear(GIUINTTYPH, mask);
|
||||
giu_clear(GIUINTHTSELH, mask);
|
||||
set_irq_chip_and_handler(GIU_IRQ(pin),
|
||||
&giuint_high_irq_chip,
|
||||
handle_level_irq);
|
||||
}
|
||||
giu_write(GIUINTSTATH, mask);
|
||||
}
|
||||
|
@ -617,10 +597,11 @@ static const struct file_operations gpio_fops = {
|
|||
static int __devinit giu_probe(struct platform_device *dev)
|
||||
{
|
||||
unsigned long start, size, flags = 0;
|
||||
unsigned int nr_pins = 0;
|
||||
unsigned int nr_pins = 0, trigger, i, pin;
|
||||
struct resource *res1, *res2 = NULL;
|
||||
void *base;
|
||||
int retval, i;
|
||||
struct irq_chip *chip;
|
||||
int retval;
|
||||
|
||||
switch (current_cpu_data.cputype) {
|
||||
case CPU_VR4111:
|
||||
|
@ -688,11 +669,20 @@ static int __devinit giu_probe(struct platform_device *dev)
|
|||
giu_write(GIUINTENL, 0);
|
||||
giu_write(GIUINTENH, 0);
|
||||
|
||||
trigger = giu_read(GIUINTTYPH) << 16;
|
||||
trigger |= giu_read(GIUINTTYPL);
|
||||
for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
|
||||
if (i < GIU_IRQ(GIUINT_HIGH_OFFSET))
|
||||
irq_desc[i].chip = &giuint_low_irq_type;
|
||||
pin = GPIO_PIN_OF_IRQ(i);
|
||||
if (pin < GIUINT_HIGH_OFFSET)
|
||||
chip = &giuint_low_irq_chip;
|
||||
else
|
||||
irq_desc[i].chip = &giuint_high_irq_type;
|
||||
chip = &giuint_high_irq_chip;
|
||||
|
||||
if (trigger & (1 << pin))
|
||||
set_irq_chip_and_handler(i, chip, handle_edge_irq);
|
||||
else
|
||||
set_irq_chip_and_handler(i, chip, handle_level_irq);
|
||||
|
||||
}
|
||||
|
||||
return cascade_irq(GIUINT_IRQ, giu_get_irq);
|
||||
|
|
|
@ -344,8 +344,11 @@ int ehca_destroy_cq(struct ib_cq *cq)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
|
||||
while (my_cq->nr_callbacks)
|
||||
while (my_cq->nr_callbacks) {
|
||||
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
|
||||
yield();
|
||||
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
|
||||
}
|
||||
|
||||
idr_remove(&ehca_cq_idr, my_cq->token);
|
||||
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
|
||||
|
|
|
@ -440,7 +440,8 @@ void ehca_tasklet_eq(unsigned long data)
|
|||
cq = idr_find(&ehca_cq_idr, token);
|
||||
|
||||
if (cq == NULL) {
|
||||
spin_unlock(&ehca_cq_idr_lock);
|
||||
spin_unlock_irqrestore(&ehca_cq_idr_lock,
|
||||
flags);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1621,18 +1621,30 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
|||
switch (token) {
|
||||
case SRP_OPT_ID_EXT:
|
||||
p = match_strdup(args);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
|
||||
kfree(p);
|
||||
break;
|
||||
|
||||
case SRP_OPT_IOC_GUID:
|
||||
p = match_strdup(args);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
|
||||
kfree(p);
|
||||
break;
|
||||
|
||||
case SRP_OPT_DGID:
|
||||
p = match_strdup(args);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (strlen(p) != 32) {
|
||||
printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
|
||||
kfree(p);
|
||||
|
@ -1656,6 +1668,10 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
|||
|
||||
case SRP_OPT_SERVICE_ID:
|
||||
p = match_strdup(args);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
|
||||
kfree(p);
|
||||
break;
|
||||
|
@ -1693,6 +1709,10 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
|||
|
||||
case SRP_OPT_INITIATOR_EXT:
|
||||
p = match_strdup(args);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
|
||||
kfree(p);
|
||||
break;
|
||||
|
|
|
@ -272,7 +272,9 @@ static void kvm_free_physmem(struct kvm *kvm)
|
|||
|
||||
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
|
||||
kvm_mmu_destroy(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
kvm_arch_ops->vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -274,7 +274,7 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
|
|||
struct kvm_mmu_page *page;
|
||||
|
||||
if (is_writeble_pte(*shadow_ent))
|
||||
return 0;
|
||||
return !user || (*shadow_ent & PT_USER_MASK);
|
||||
|
||||
writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
|
||||
if (user) {
|
||||
|
|
|
@ -1407,7 +1407,8 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|||
int r;
|
||||
|
||||
again:
|
||||
do_interrupt_requests(vcpu, kvm_run);
|
||||
if (!vcpu->mmio_read_completed)
|
||||
do_interrupt_requests(vcpu, kvm_run);
|
||||
|
||||
clgi();
|
||||
|
||||
|
|
|
@ -1717,7 +1717,8 @@ again:
|
|||
vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
|
||||
#endif
|
||||
|
||||
do_interrupt_requests(vcpu, kvm_run);
|
||||
if (!vcpu->mmio_read_completed)
|
||||
do_interrupt_requests(vcpu, kvm_run);
|
||||
|
||||
if (vcpu->guest_debug.enabled)
|
||||
kvm_guest_debug_pre(vcpu);
|
||||
|
@ -1824,7 +1825,7 @@ again:
|
|||
#endif
|
||||
"setbe %0 \n\t"
|
||||
"popf \n\t"
|
||||
: "=g" (fail)
|
||||
: "=q" (fail)
|
||||
: "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
|
||||
"c"(vcpu),
|
||||
[rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#define ModRM (1<<6)
|
||||
/* Destination is only written; never read. */
|
||||
#define Mov (1<<7)
|
||||
#define BitOp (1<<8)
|
||||
|
||||
static u8 opcode_table[256] = {
|
||||
/* 0x00 - 0x07 */
|
||||
|
@ -148,7 +149,7 @@ static u8 opcode_table[256] = {
|
|||
0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
|
||||
};
|
||||
|
||||
static u8 twobyte_table[256] = {
|
||||
static u16 twobyte_table[256] = {
|
||||
/* 0x00 - 0x0F */
|
||||
0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
|
||||
0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
|
||||
|
@ -180,16 +181,16 @@ static u8 twobyte_table[256] = {
|
|||
/* 0x90 - 0x9F */
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
/* 0xA0 - 0xA7 */
|
||||
0, 0, 0, DstMem | SrcReg | ModRM, 0, 0, 0, 0,
|
||||
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
|
||||
/* 0xA8 - 0xAF */
|
||||
0, 0, 0, DstMem | SrcReg | ModRM, 0, 0, 0, 0,
|
||||
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0,
|
||||
/* 0xB0 - 0xB7 */
|
||||
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
|
||||
DstMem | SrcReg | ModRM,
|
||||
DstMem | SrcReg | ModRM | BitOp,
|
||||
0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
|
||||
DstReg | SrcMem16 | ModRM | Mov,
|
||||
/* 0xB8 - 0xBF */
|
||||
0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM,
|
||||
0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
|
||||
0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
|
||||
DstReg | SrcMem16 | ModRM | Mov,
|
||||
/* 0xC0 - 0xCF */
|
||||
|
@ -469,7 +470,8 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
int
|
||||
x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
|
||||
{
|
||||
u8 b, d, sib, twobyte = 0, rex_prefix = 0;
|
||||
unsigned d;
|
||||
u8 b, sib, twobyte = 0, rex_prefix = 0;
|
||||
u8 modrm, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
|
||||
unsigned long *override_base = NULL;
|
||||
unsigned int op_bytes, ad_bytes, lock_prefix = 0, rep_prefix = 0, i;
|
||||
|
@ -726,46 +728,6 @@ done_prefixes:
|
|||
;
|
||||
}
|
||||
|
||||
/* Decode and fetch the destination operand: register or memory. */
|
||||
switch (d & DstMask) {
|
||||
case ImplicitOps:
|
||||
/* Special instructions do their own operand decoding. */
|
||||
goto special_insn;
|
||||
case DstReg:
|
||||
dst.type = OP_REG;
|
||||
if ((d & ByteOp)
|
||||
&& !(twobyte_table && (b == 0xb6 || b == 0xb7))) {
|
||||
dst.ptr = decode_register(modrm_reg, _regs,
|
||||
(rex_prefix == 0));
|
||||
dst.val = *(u8 *) dst.ptr;
|
||||
dst.bytes = 1;
|
||||
} else {
|
||||
dst.ptr = decode_register(modrm_reg, _regs, 0);
|
||||
switch ((dst.bytes = op_bytes)) {
|
||||
case 2:
|
||||
dst.val = *(u16 *)dst.ptr;
|
||||
break;
|
||||
case 4:
|
||||
dst.val = *(u32 *)dst.ptr;
|
||||
break;
|
||||
case 8:
|
||||
dst.val = *(u64 *)dst.ptr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DstMem:
|
||||
dst.type = OP_MEM;
|
||||
dst.ptr = (unsigned long *)cr2;
|
||||
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
|
||||
if (!(d & Mov) && /* optimisation - avoid slow emulated read */
|
||||
((rc = ops->read_emulated((unsigned long)dst.ptr,
|
||||
&dst.val, dst.bytes, ctxt)) != 0))
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dst.orig_val = dst.val;
|
||||
|
||||
/*
|
||||
* Decode and fetch the source operand: register, memory
|
||||
* or immediate.
|
||||
|
@ -838,6 +800,50 @@ done_prefixes:
|
|||
break;
|
||||
}
|
||||
|
||||
/* Decode and fetch the destination operand: register or memory. */
|
||||
switch (d & DstMask) {
|
||||
case ImplicitOps:
|
||||
/* Special instructions do their own operand decoding. */
|
||||
goto special_insn;
|
||||
case DstReg:
|
||||
dst.type = OP_REG;
|
||||
if ((d & ByteOp)
|
||||
&& !(twobyte_table && (b == 0xb6 || b == 0xb7))) {
|
||||
dst.ptr = decode_register(modrm_reg, _regs,
|
||||
(rex_prefix == 0));
|
||||
dst.val = *(u8 *) dst.ptr;
|
||||
dst.bytes = 1;
|
||||
} else {
|
||||
dst.ptr = decode_register(modrm_reg, _regs, 0);
|
||||
switch ((dst.bytes = op_bytes)) {
|
||||
case 2:
|
||||
dst.val = *(u16 *)dst.ptr;
|
||||
break;
|
||||
case 4:
|
||||
dst.val = *(u32 *)dst.ptr;
|
||||
break;
|
||||
case 8:
|
||||
dst.val = *(u64 *)dst.ptr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DstMem:
|
||||
dst.type = OP_MEM;
|
||||
dst.ptr = (unsigned long *)cr2;
|
||||
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
|
||||
if (d & BitOp) {
|
||||
dst.ptr += src.val / BITS_PER_LONG;
|
||||
dst.bytes = sizeof(long);
|
||||
}
|
||||
if (!(d & Mov) && /* optimisation - avoid slow emulated read */
|
||||
((rc = ops->read_emulated((unsigned long)dst.ptr,
|
||||
&dst.val, dst.bytes, ctxt)) != 0))
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
dst.orig_val = dst.val;
|
||||
|
||||
if (twobyte)
|
||||
goto twobyte_insn;
|
||||
|
||||
|
|
|
@ -700,6 +700,7 @@ videobuf_qbuf(struct videobuf_queue *q,
|
|||
goto done;
|
||||
}
|
||||
if (buf->state == STATE_QUEUED ||
|
||||
buf->state == STATE_PREPARED ||
|
||||
buf->state == STATE_ACTIVE) {
|
||||
dprintk(1,"qbuf: buffer is already queued or active.\n");
|
||||
goto done;
|
||||
|
|
|
@ -164,9 +164,15 @@ config MTD_CHAR
|
|||
memory chips, and also use ioctl() to obtain information about
|
||||
the device, or to erase parts of it.
|
||||
|
||||
config MTD_BLKDEVS
|
||||
tristate "Common interface to block layer for MTD 'translation layers'"
|
||||
depends on MTD && BLOCK
|
||||
default n
|
||||
|
||||
config MTD_BLOCK
|
||||
tristate "Caching block device access to MTD devices"
|
||||
depends on MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
---help---
|
||||
Although most flash chips have an erase size too large to be useful
|
||||
as block devices, it is possible to use MTD devices which are based
|
||||
|
@ -189,6 +195,7 @@ config MTD_BLOCK
|
|||
config MTD_BLOCK_RO
|
||||
tristate "Readonly block device access to MTD devices"
|
||||
depends on MTD_BLOCK!=y && MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
help
|
||||
This allows you to mount read-only file systems (such as cramfs)
|
||||
from an MTD device, without the overhead (and danger) of the caching
|
||||
|
@ -200,6 +207,7 @@ config MTD_BLOCK_RO
|
|||
config FTL
|
||||
tristate "FTL (Flash Translation Layer) support"
|
||||
depends on MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
---help---
|
||||
This provides support for the original Flash Translation Layer which
|
||||
is part of the PCMCIA specification. It uses a kind of pseudo-
|
||||
|
@ -216,6 +224,7 @@ config FTL
|
|||
config NFTL
|
||||
tristate "NFTL (NAND Flash Translation Layer) support"
|
||||
depends on MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
---help---
|
||||
This provides support for the NAND Flash Translation Layer which is
|
||||
used on M-Systems' DiskOnChip devices. It uses a kind of pseudo-
|
||||
|
@ -239,6 +248,7 @@ config NFTL_RW
|
|||
config INFTL
|
||||
tristate "INFTL (Inverse NAND Flash Translation Layer) support"
|
||||
depends on MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
---help---
|
||||
This provides support for the Inverse NAND Flash Translation
|
||||
Layer which is used on M-Systems' newer DiskOnChip devices. It
|
||||
|
@ -256,6 +266,7 @@ config INFTL
|
|||
config RFD_FTL
|
||||
tristate "Resident Flash Disk (Flash Translation Layer) support"
|
||||
depends on MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
---help---
|
||||
This provides support for the flash translation layer known
|
||||
as the Resident Flash Disk (RFD), as used by the Embedded BIOS
|
||||
|
@ -265,8 +276,8 @@ config RFD_FTL
|
|||
|
||||
config SSFDC
|
||||
tristate "NAND SSFDC (SmartMedia) read only translation layer"
|
||||
depends on MTD
|
||||
default n
|
||||
depends on MTD && BLOCK
|
||||
select MTD_BLKDEVS
|
||||
help
|
||||
This enables read only access to SmartMedia formatted NAND
|
||||
flash. You can mount it with FAT file system.
|
||||
|
|
|
@ -15,13 +15,14 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
|
|||
|
||||
# 'Users' - code which presents functionality to userspace.
|
||||
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
|
||||
obj-$(CONFIG_MTD_BLOCK) += mtdblock.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_SSFDC) += ssfdc.o mtd_blkdevs.o
|
||||
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
|
||||
obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
|
||||
obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
|
||||
obj-$(CONFIG_FTL) += ftl.o
|
||||
obj-$(CONFIG_NFTL) += nftl.o
|
||||
obj-$(CONFIG_INFTL) += inftl.o
|
||||
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
|
||||
obj-$(CONFIG_SSFDC) += ssfdc.o
|
||||
|
||||
nftl-objs := nftlcore.o nftlmount.o
|
||||
inftl-objs := inftlcore.o inftlmount.o
|
||||
|
|
|
@ -207,11 +207,10 @@ static int parse_afs_partitions(struct mtd_info *mtd,
|
|||
if (!sz)
|
||||
return ret;
|
||||
|
||||
parts = kmalloc(sz, GFP_KERNEL);
|
||||
parts = kzalloc(sz, GFP_KERNEL);
|
||||
if (!parts)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(parts, 0, sz);
|
||||
str = (char *)(parts + idx);
|
||||
|
||||
/*
|
||||
|
|
|
@ -643,13 +643,12 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
|
|||
int reg_idx;
|
||||
int offset;
|
||||
|
||||
mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd) {
|
||||
printk(KERN_WARNING
|
||||
"%s: kmalloc failed for info structure\n", map->name);
|
||||
return NULL;
|
||||
}
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
mtd->priv = map;
|
||||
|
||||
memset(&temp, 0, sizeof(temp));
|
||||
|
|
|
@ -337,12 +337,11 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
|
|||
struct mtd_info *mtd;
|
||||
int i;
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd) {
|
||||
printk(KERN_ERR "Failed to allocate memory for MTD device\n");
|
||||
return NULL;
|
||||
}
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
mtd->priv = map;
|
||||
mtd->type = MTD_NORFLASH;
|
||||
|
||||
|
@ -2224,6 +2223,8 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
|
|||
case FL_CFI_QUERY:
|
||||
case FL_JEDEC_QUERY:
|
||||
if (chip->oldstate == FL_READY) {
|
||||
/* place the chip in a known state before suspend */
|
||||
map_write(map, CMD(0xFF), cfi->chips[i].start);
|
||||
chip->oldstate = chip->state;
|
||||
chip->state = FL_PM_SUSPENDED;
|
||||
/* No need to wake_up() on this state change -
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#define MANUFACTURER_ATMEL 0x001F
|
||||
#define MANUFACTURER_SST 0x00BF
|
||||
#define SST49LF004B 0x0060
|
||||
#define SST49LF040B 0x0050
|
||||
#define SST49LF008A 0x005a
|
||||
#define AT49BV6416 0x00d6
|
||||
|
||||
|
@ -233,6 +234,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
|
|||
};
|
||||
static struct cfi_fixup jedec_fixup_table[] = {
|
||||
{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
|
||||
{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
|
||||
{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
|
||||
{ 0, 0, NULL, NULL }
|
||||
};
|
||||
|
@ -255,12 +257,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
|
|||
struct mtd_info *mtd;
|
||||
int i;
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd) {
|
||||
printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
|
||||
return NULL;
|
||||
}
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
mtd->priv = map;
|
||||
mtd->type = MTD_NORFLASH;
|
||||
|
||||
|
@ -519,10 +520,12 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
|
|||
if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
|
||||
goto sleep;
|
||||
|
||||
if (!(mode == FL_READY || mode == FL_POINT
|
||||
if (!( mode == FL_READY
|
||||
|| mode == FL_POINT
|
||||
|| !cfip
|
||||
|| (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
|
||||
|| (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
|
||||
|| (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
|
||||
)))
|
||||
goto sleep;
|
||||
|
||||
/* We could check to see if we're trying to access the sector
|
||||
|
|
|
@ -172,7 +172,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
|
|||
int i,j;
|
||||
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
|
||||
|
||||
if (!mtd) {
|
||||
|
@ -181,7 +181,6 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
mtd->priv = map;
|
||||
mtd->type = MTD_NORFLASH;
|
||||
mtd->size = devsize * cfi->numchips;
|
||||
|
|
|
@ -40,7 +40,7 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
|
|||
if (mtd) {
|
||||
if (mtd->size > map->size) {
|
||||
printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n",
|
||||
(unsigned long)mtd->size >> 10,
|
||||
(unsigned long)mtd->size >> 10,
|
||||
(unsigned long)map->size >> 10);
|
||||
mtd->size = map->size;
|
||||
}
|
||||
|
@ -113,13 +113,12 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
|
|||
}
|
||||
|
||||
mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG;
|
||||
chip_map = kmalloc(mapsize, GFP_KERNEL);
|
||||
chip_map = kzalloc(mapsize, GFP_KERNEL);
|
||||
if (!chip_map) {
|
||||
printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
|
||||
kfree(cfi.cfiq);
|
||||
return NULL;
|
||||
}
|
||||
memset (chip_map, 0, mapsize);
|
||||
|
||||
set_bit(0, chip_map); /* Mark first chip valid */
|
||||
|
||||
|
|
|
@ -116,11 +116,10 @@ static struct mtd_info *jedec_probe(struct map_info *map)
|
|||
char Part[200];
|
||||
memset(&priv,0,sizeof(priv));
|
||||
|
||||
MTD = kmalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
|
||||
MTD = kzalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
|
||||
if (!MTD)
|
||||
return NULL;
|
||||
|
||||
memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
|
||||
priv = (struct jedec_private *)&MTD[1];
|
||||
|
||||
my_bank_size = map->size;
|
||||
|
|
|
@ -154,6 +154,7 @@
|
|||
#define SST39SF010A 0x00B5
|
||||
#define SST39SF020A 0x00B6
|
||||
#define SST49LF004B 0x0060
|
||||
#define SST49LF040B 0x0050
|
||||
#define SST49LF008A 0x005a
|
||||
#define SST49LF030A 0x001C
|
||||
#define SST49LF040A 0x0051
|
||||
|
@ -1400,6 +1401,20 @@ static const struct amd_flash_info jedec_table[] = {
|
|||
ERASEINFO(0x01000,64),
|
||||
}
|
||||
}, {
|
||||
.mfr_id = MANUFACTURER_SST,
|
||||
.dev_id = SST49LF040B,
|
||||
.name = "SST 49LF040B",
|
||||
.uaddr = {
|
||||
[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
|
||||
},
|
||||
.DevSize = SIZE_512KiB,
|
||||
.CmdSet = P_ID_AMD_STD,
|
||||
.NumEraseRegions= 1,
|
||||
.regions = {
|
||||
ERASEINFO(0x01000,128),
|
||||
}
|
||||
}, {
|
||||
|
||||
.mfr_id = MANUFACTURER_SST,
|
||||
.dev_id = SST49LF004B,
|
||||
.name = "SST 49LF004B",
|
||||
|
@ -1874,7 +1889,7 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
|
|||
|
||||
|
||||
/*
|
||||
* There is a BIG problem properly ID'ing the JEDEC devic and guaranteeing
|
||||
* There is a BIG problem properly ID'ing the JEDEC device and guaranteeing
|
||||
* the mapped address, unlock addresses, and proper chip ID. This function
|
||||
* attempts to minimize errors. It is doubtfull that this probe will ever
|
||||
* be perfect - consequently there should be some module parameters that
|
||||
|
|
|
@ -47,13 +47,11 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
|
|||
{
|
||||
struct mtd_info *mtd;
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
|
||||
map->fldrv = &map_absent_chipdrv;
|
||||
mtd->priv = map;
|
||||
mtd->name = map->name;
|
||||
|
|
|
@ -55,12 +55,10 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
|
|||
#endif
|
||||
/* OK. It seems to be RAM. */
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd)
|
||||
return NULL;
|
||||
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
|
||||
map->fldrv = &mapram_chipdrv;
|
||||
mtd->priv = map;
|
||||
mtd->name = map->name;
|
||||
|
|
|
@ -31,12 +31,10 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
|
|||
{
|
||||
struct mtd_info *mtd;
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd)
|
||||
return NULL;
|
||||
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
|
||||
map->fldrv = &maprom_chipdrv;
|
||||
mtd->priv = map;
|
||||
mtd->name = map->name;
|
||||
|
|
|
@ -112,18 +112,16 @@ static struct mtd_info *sharp_probe(struct map_info *map)
|
|||
struct sharp_info *sharp = NULL;
|
||||
int width;
|
||||
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if(!mtd)
|
||||
return NULL;
|
||||
|
||||
sharp = kmalloc(sizeof(*sharp), GFP_KERNEL);
|
||||
sharp = kzalloc(sizeof(*sharp), GFP_KERNEL);
|
||||
if(!sharp) {
|
||||
kfree(mtd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
|
||||
width = sharp_probe_map(map,mtd);
|
||||
if(!width){
|
||||
kfree(mtd);
|
||||
|
@ -143,7 +141,6 @@ static struct mtd_info *sharp_probe(struct map_info *map)
|
|||
mtd->writesize = 1;
|
||||
mtd->name = map->name;
|
||||
|
||||
memset(sharp, 0, sizeof(*sharp));
|
||||
sharp->chipshift = 23;
|
||||
sharp->numchips = 1;
|
||||
sharp->chips[0].start = 0;
|
||||
|
|
|
@ -163,13 +163,12 @@ static struct mtd_partition * newpart(char *s,
|
|||
*num_parts = this_part + 1;
|
||||
alloc_size = *num_parts * sizeof(struct mtd_partition) +
|
||||
extra_mem_size;
|
||||
parts = kmalloc(alloc_size, GFP_KERNEL);
|
||||
parts = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!parts)
|
||||
{
|
||||
printk(KERN_ERR ERRP "out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
memset(parts, 0, alloc_size);
|
||||
extra_mem = (unsigned char *)(parts + *num_parts);
|
||||
}
|
||||
/* enter this partition (offset will be calculated later if it is zero at this point) */
|
||||
|
@ -346,7 +345,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
|
|||
*
|
||||
* This function needs to be visible for bootloaders.
|
||||
*/
|
||||
int mtdpart_setup(char *s)
|
||||
static int mtdpart_setup(char *s)
|
||||
{
|
||||
cmdline = s;
|
||||
return 1;
|
||||
|
|
|
@ -295,10 +295,9 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
|
|||
if (!devname)
|
||||
return NULL;
|
||||
|
||||
dev = kmalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
|
||||
dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
memset(dev, 0, sizeof(*dev));
|
||||
|
||||
/* Get a handle on the device */
|
||||
bdev = open_bdev_excl(devname, O_RDWR, NULL);
|
||||
|
|
|
@ -131,11 +131,10 @@ static int __init ms02nv_init_one(ulong addr)
|
|||
int ret = -ENODEV;
|
||||
|
||||
/* The module decodes 8MiB of address space. */
|
||||
mod_res = kmalloc(sizeof(*mod_res), GFP_KERNEL);
|
||||
mod_res = kzalloc(sizeof(*mod_res), GFP_KERNEL);
|
||||
if (!mod_res)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(mod_res, 0, sizeof(*mod_res));
|
||||
mod_res->name = ms02nv_name;
|
||||
mod_res->start = addr;
|
||||
mod_res->end = addr + MS02NV_SLOT_SIZE - 1;
|
||||
|
@ -153,24 +152,21 @@ static int __init ms02nv_init_one(ulong addr)
|
|||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
|
||||
if (!mtd)
|
||||
goto err_out_mod_res_rel;
|
||||
memset(mtd, 0, sizeof(*mtd));
|
||||
mp = kmalloc(sizeof(*mp), GFP_KERNEL);
|
||||
mp = kzalloc(sizeof(*mp), GFP_KERNEL);
|
||||
if (!mp)
|
||||
goto err_out_mtd;
|
||||
memset(mp, 0, sizeof(*mp));
|
||||
|
||||
mtd->priv = mp;
|
||||
mp->resource.module = mod_res;
|
||||
|
||||
/* Firmware's diagnostic NVRAM area. */
|
||||
diag_res = kmalloc(sizeof(*diag_res), GFP_KERNEL);
|
||||
diag_res = kzalloc(sizeof(*diag_res), GFP_KERNEL);
|
||||
if (!diag_res)
|
||||
goto err_out_mp;
|
||||
|
||||
memset(diag_res, 0, sizeof(*diag_res));
|
||||
diag_res->name = ms02nv_res_diag_ram;
|
||||
diag_res->start = addr;
|
||||
diag_res->end = addr + MS02NV_RAM - 1;
|
||||
|
@ -180,11 +176,10 @@ static int __init ms02nv_init_one(ulong addr)
|
|||
mp->resource.diag_ram = diag_res;
|
||||
|
||||
/* User-available general-purpose NVRAM area. */
|
||||
user_res = kmalloc(sizeof(*user_res), GFP_KERNEL);
|
||||
user_res = kzalloc(sizeof(*user_res), GFP_KERNEL);
|
||||
if (!user_res)
|
||||
goto err_out_diag_res;
|
||||
|
||||
memset(user_res, 0, sizeof(*user_res));
|
||||
user_res->name = ms02nv_res_user_ram;
|
||||
user_res->start = addr + MS02NV_RAM;
|
||||
user_res->end = addr + size - 1;
|
||||
|
@ -194,11 +189,10 @@ static int __init ms02nv_init_one(ulong addr)
|
|||
mp->resource.user_ram = user_res;
|
||||
|
||||
/* Control and status register. */
|
||||
csr_res = kmalloc(sizeof(*csr_res), GFP_KERNEL);
|
||||
csr_res = kzalloc(sizeof(*csr_res), GFP_KERNEL);
|
||||
if (!csr_res)
|
||||
goto err_out_user_res;
|
||||
|
||||
memset(csr_res, 0, sizeof(*csr_res));
|
||||
csr_res->name = ms02nv_res_csr;
|
||||
csr_res->start = addr + MS02NV_CSR;
|
||||
csr_res->end = addr + MS02NV_CSR + 3;
|
||||
|
|
|
@ -480,7 +480,7 @@ add_dataflash(struct spi_device *spi, char *name,
|
|||
device->writesize = pagesize;
|
||||
device->owner = THIS_MODULE;
|
||||
device->type = MTD_DATAFLASH;
|
||||
device->flags = MTD_CAP_NORFLASH;
|
||||
device->flags = MTD_WRITEABLE;
|
||||
device->erase = dataflash_erase;
|
||||
device->read = dataflash_read;
|
||||
device->write = dataflash_write;
|
||||
|
|
|
@ -126,12 +126,10 @@ static int register_device(char *name, unsigned long start, unsigned long len)
|
|||
struct phram_mtd_list *new;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||
new = kzalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
goto out0;
|
||||
|
||||
memset(new, 0, sizeof(*new));
|
||||
|
||||
ret = -EIO;
|
||||
new->mtd.priv = ioremap(start, len);
|
||||
if (!new->mtd.priv) {
|
||||
|
|
|
@ -168,19 +168,16 @@ static int register_device(char *name, unsigned long start, unsigned long length
|
|||
E("slram: Cannot allocate new MTD device.\n");
|
||||
return(-ENOMEM);
|
||||
}
|
||||
(*curmtd)->mtdinfo = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
|
||||
(*curmtd)->mtdinfo = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
|
||||
(*curmtd)->next = NULL;
|
||||
|
||||
if ((*curmtd)->mtdinfo) {
|
||||
memset((char *)(*curmtd)->mtdinfo, 0, sizeof(struct mtd_info));
|
||||
(*curmtd)->mtdinfo->priv =
|
||||
kmalloc(sizeof(slram_priv_t), GFP_KERNEL);
|
||||
kzalloc(sizeof(slram_priv_t), GFP_KERNEL);
|
||||
|
||||
if (!(*curmtd)->mtdinfo->priv) {
|
||||
kfree((*curmtd)->mtdinfo);
|
||||
(*curmtd)->mtdinfo = NULL;
|
||||
} else {
|
||||
memset((*curmtd)->mtdinfo->priv,0,sizeof(slram_priv_t));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1033,7 +1033,7 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|||
{
|
||||
partition_t *partition;
|
||||
|
||||
partition = kmalloc(sizeof(partition_t), GFP_KERNEL);
|
||||
partition = kzalloc(sizeof(partition_t), GFP_KERNEL);
|
||||
|
||||
if (!partition) {
|
||||
printk(KERN_WARNING "No memory to scan for FTL on %s\n",
|
||||
|
@ -1041,8 +1041,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|||
return;
|
||||
}
|
||||
|
||||
memset(partition, 0, sizeof(partition_t));
|
||||
|
||||
partition->mbd.mtd = mtd;
|
||||
|
||||
if ((scan_header(partition) == 0) &&
|
||||
|
@ -1054,7 +1052,7 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|||
le32_to_cpu(partition->header.FormattedSize) >> 10);
|
||||
#endif
|
||||
partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
|
||||
partition->mbd.blksize = SECTOR_SIZE;
|
||||
|
||||
partition->mbd.tr = tr;
|
||||
partition->mbd.devnum = -1;
|
||||
if (!add_mtd_blktrans_dev((void *)partition))
|
||||
|
@ -1076,6 +1074,7 @@ struct mtd_blktrans_ops ftl_tr = {
|
|||
.name = "ftl",
|
||||
.major = FTL_MAJOR,
|
||||
.part_bits = PART_BITS,
|
||||
.blksize = SECTOR_SIZE,
|
||||
.readsect = ftl_readsect,
|
||||
.writesect = ftl_writesect,
|
||||
.getgeo = ftl_getgeo,
|
||||
|
|
|
@ -67,17 +67,16 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|||
|
||||
DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
|
||||
|
||||
inftl = kmalloc(sizeof(*inftl), GFP_KERNEL);
|
||||
inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
|
||||
|
||||
if (!inftl) {
|
||||
printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
|
||||
return;
|
||||
}
|
||||
memset(inftl, 0, sizeof(*inftl));
|
||||
|
||||
inftl->mbd.mtd = mtd;
|
||||
inftl->mbd.devnum = -1;
|
||||
inftl->mbd.blksize = 512;
|
||||
|
||||
inftl->mbd.tr = tr;
|
||||
|
||||
if (INFTL_mount(inftl) < 0) {
|
||||
|
@ -163,10 +162,9 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
|
|||
ops.ooblen = len;
|
||||
ops.oobbuf = buf;
|
||||
ops.datbuf = NULL;
|
||||
ops.len = len;
|
||||
|
||||
res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
|
||||
*retlen = ops.retlen;
|
||||
*retlen = ops.oobretlen;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -184,10 +182,9 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
|
|||
ops.ooblen = len;
|
||||
ops.oobbuf = buf;
|
||||
ops.datbuf = NULL;
|
||||
ops.len = len;
|
||||
|
||||
res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
|
||||
*retlen = ops.retlen;
|
||||
*retlen = ops.oobretlen;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -945,6 +942,7 @@ static struct mtd_blktrans_ops inftl_tr = {
|
|||
.name = "inftl",
|
||||
.major = INFTL_MAJOR,
|
||||
.part_bits = INFTL_PARTN_BITS,
|
||||
.blksize = 512,
|
||||
.getgeo = inftl_getgeo,
|
||||
.readsect = inftl_readblock,
|
||||
.writesect = inftl_writeblock,
|
||||
|
|
|
@ -60,6 +60,15 @@ config MTD_PHYSMAP_BANKWIDTH
|
|||
Ignore this option if you use run-time physmap configuration
|
||||
(i.e., run-time calling physmap_configure()).
|
||||
|
||||
config MTD_PHYSMAP_OF
|
||||
tristate "Flash device in physical memory map based on OF descirption"
|
||||
depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
|
||||
help
|
||||
This provides a 'mapping' driver which allows the NOR Flash and
|
||||
ROM driver code to communicate with chips which are mapped
|
||||
physically into the CPU's memory. The mapping description here is
|
||||
taken from OF device tree.
|
||||
|
||||
config MTD_SUN_UFLASH
|
||||
tristate "Sun Microsystems userflash support"
|
||||
depends on SPARC && MTD_CFI
|
||||
|
@ -184,6 +193,24 @@ config MTD_ICHXROM
|
|||
|
||||
BE VERY CAREFUL.
|
||||
|
||||
config MTD_ESB2ROM
|
||||
tristate "BIOS flash chip on Intel ESB Controller Hub 2"
|
||||
depends on X86 && MTD_JEDECPROBE && PCI
|
||||
help
|
||||
Support for treating the BIOS flash chip on ESB2 motherboards
|
||||
as an MTD device - with this you can reprogram your BIOS.
|
||||
|
||||
BE VERY CAREFUL.
|
||||
|
||||
config MTD_CK804XROM
|
||||
tristate "BIOS flash chip on Nvidia CK804"
|
||||
depends on X86 && MTD_JEDECPROBE
|
||||
help
|
||||
Support for treating the BIOS flash chip on nvidia motherboards
|
||||
as an MTD device - with this you can reprogram your BIOS.
|
||||
|
||||
BE VERY CAREFUL.
|
||||
|
||||
config MTD_SCB2_FLASH
|
||||
tristate "BIOS flash chip on Intel SCB2 boards"
|
||||
depends on X86 && MTD_JEDECPROBE
|
||||
|
@ -355,50 +382,6 @@ config MTD_TQM834x
|
|||
TQ Components TQM834x boards. If you have one of these boards
|
||||
and would like to use the flash chips on it, say 'Y'.
|
||||
|
||||
config MTD_CSTM_MIPS_IXX
|
||||
tristate "Flash chip mapping on ITE QED-4N-S01B, Globespan IVR or custom board"
|
||||
depends on MIPS && MTD_CFI && MTD_JEDECPROBE && MTD_PARTITIONS
|
||||
help
|
||||
This provides a mapping driver for the Integrated Technology
|
||||
Express, Inc (ITE) QED-4N-S01B eval board and the Globespan IVR
|
||||
Reference Board. It provides the necessary addressing, length,
|
||||
buswidth, vpp code and addition setup of the flash device for
|
||||
these boards. In addition, this mapping driver can be used for
|
||||
other boards via setting of the CONFIG_MTD_CSTM_MIPS_IXX_START/
|
||||
LEN/BUSWIDTH parameters. This mapping will provide one mtd device
|
||||
using one partition. The start address can be offset from the
|
||||
beginning of flash and the len can be less than the total flash
|
||||
device size to allow a window into the flash. Both CFI and JEDEC
|
||||
probes are called.
|
||||
|
||||
config MTD_CSTM_MIPS_IXX_START
|
||||
hex "Physical start address of flash mapping"
|
||||
depends on MTD_CSTM_MIPS_IXX
|
||||
default "0x8000000"
|
||||
help
|
||||
This is the physical memory location that the MTD driver will
|
||||
use for the flash chips on your particular target board.
|
||||
Refer to the memory map which should hopefully be in the
|
||||
documentation for your board.
|
||||
|
||||
config MTD_CSTM_MIPS_IXX_LEN
|
||||
hex "Physical length of flash mapping"
|
||||
depends on MTD_CSTM_MIPS_IXX
|
||||
default "0x4000000"
|
||||
help
|
||||
This is the total length that the MTD driver will use for the
|
||||
flash chips on your particular board. Refer to the memory
|
||||
map which should hopefully be in the documentation for your
|
||||
board.
|
||||
|
||||
config MTD_CSTM_MIPS_IXX_BUSWIDTH
|
||||
int "Bus width in octets"
|
||||
depends on MTD_CSTM_MIPS_IXX
|
||||
default "2"
|
||||
help
|
||||
This is the total bus width of the mapping of the flash chips
|
||||
on your particular board.
|
||||
|
||||
config MTD_OCELOT
|
||||
tristate "Momenco Ocelot boot flash device"
|
||||
depends on MIPS && MOMENCO_OCELOT
|
||||
|
|
|
@ -12,12 +12,13 @@ obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
|
|||
obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
|
||||
obj-$(CONFIG_MTD_BAST) += bast-flash.o
|
||||
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
|
||||
obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
|
||||
obj-$(CONFIG_MTD_DC21285) += dc21285.o
|
||||
obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
|
||||
obj-$(CONFIG_MTD_L440GX) += l440gx.o
|
||||
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
|
||||
obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
|
||||
obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
|
||||
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
|
||||
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
|
||||
obj-$(CONFIG_MTD_LUBBOCK) += lubbock-flash.o
|
||||
obj-$(CONFIG_MTD_MAINSTONE) += mainstone-flash.o
|
||||
|
@ -25,6 +26,7 @@ obj-$(CONFIG_MTD_MBX860) += mbx860.o
|
|||
obj-$(CONFIG_MTD_CEIVA) += ceiva.o
|
||||
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
|
||||
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
|
||||
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
|
||||
obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
|
||||
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
|
||||
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -44,6 +45,23 @@ struct amd76xrom_map_info {
|
|||
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
|
||||
};
|
||||
|
||||
/* The 2 bits controlling the window size are often set to allow reading
|
||||
* the BIOS, but too small to allow writing, since the lock registers are
|
||||
* 4MiB lower in the address space than the data.
|
||||
*
|
||||
* This is intended to prevent flashing the bios, perhaps accidentally.
|
||||
*
|
||||
* This parameter allows the normal driver to over-ride the BIOS settings.
|
||||
*
|
||||
* The bits are 6 and 7. If both bits are set, it is a 5MiB window.
|
||||
* If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
|
||||
* 64KiB window.
|
||||
*
|
||||
*/
|
||||
static uint win_size_bits;
|
||||
module_param(win_size_bits, uint, 0);
|
||||
MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x43 byte, normally set by BIOS.");
|
||||
|
||||
static struct amd76xrom_window amd76xrom_window = {
|
||||
.maps = LIST_HEAD_INIT(amd76xrom_window.maps),
|
||||
};
|
||||
|
@ -95,6 +113,16 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
|
|||
/* Remember the pci dev I find the window in - already have a ref */
|
||||
window->pdev = pdev;
|
||||
|
||||
/* Enable the selected rom window. This is often incorrectly
|
||||
* set up by the BIOS, and the 4MiB offset for the lock registers
|
||||
* requires the full 5MiB of window space.
|
||||
*
|
||||
* This 'write, then read' approach leaves the bits for
|
||||
* other uses of the hardware info.
|
||||
*/
|
||||
pci_read_config_byte(pdev, 0x43, &byte);
|
||||
pci_write_config_byte(pdev, 0x43, byte | win_size_bits );
|
||||
|
||||
/* Assume the rom window is properly setup, and find it's size */
|
||||
pci_read_config_byte(pdev, 0x43, &byte);
|
||||
if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) {
|
||||
|
@ -129,12 +157,6 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
|
|||
(unsigned long long)window->rsrc.end);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
/* Enable the selected rom window */
|
||||
pci_read_config_byte(pdev, 0x43, &byte);
|
||||
pci_write_config_byte(pdev, 0x43, byte | rwindow->segen_bits);
|
||||
#endif
|
||||
|
||||
/* Enable writes through the rom window */
|
||||
pci_read_config_byte(pdev, 0x40, &byte);
|
||||
|
|
|
@ -131,7 +131,7 @@ static int bast_flash_probe(struct platform_device *pdev)
|
|||
|
||||
info->map.phys = res->start;
|
||||
info->map.size = res->end - res->start + 1;
|
||||
info->map.name = pdev->dev.bus_id;
|
||||
info->map.name = pdev->dev.bus_id;
|
||||
info->map.bankwidth = 2;
|
||||
|
||||
if (info->map.size > AREA_MAXSIZE)
|
||||
|
|
|
@ -122,10 +122,9 @@ static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info
|
|||
/*
|
||||
* Allocate the map_info structs in one go.
|
||||
*/
|
||||
maps = kmalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
|
||||
maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
|
||||
if (!maps)
|
||||
return -ENOMEM;
|
||||
memset(maps, 0, sizeof(struct map_info) * nr);
|
||||
/*
|
||||
* Claim and then map the memory regions.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,356 @@
|
|||
/*
|
||||
* ck804xrom.c
|
||||
*
|
||||
* Normal mappings of chips in physical memory
|
||||
*
|
||||
* Dave Olsen <dolsen@lnxi.com>
|
||||
* Ryan Jackson <rjackson@lnxi.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/map.h>
|
||||
#include <linux/mtd/cfi.h>
|
||||
#include <linux/mtd/flashchip.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
|
||||
#define MOD_NAME KBUILD_BASENAME
|
||||
|
||||
#define ADDRESS_NAME_LEN 18
|
||||
|
||||
#define ROM_PROBE_STEP_SIZE (64*1024)
|
||||
|
||||
struct ck804xrom_window {
|
||||
void __iomem *virt;
|
||||
unsigned long phys;
|
||||
unsigned long size;
|
||||
struct list_head maps;
|
||||
struct resource rsrc;
|
||||
struct pci_dev *pdev;
|
||||
};
|
||||
|
||||
struct ck804xrom_map_info {
|
||||
struct list_head list;
|
||||
struct map_info map;
|
||||
struct mtd_info *mtd;
|
||||
struct resource rsrc;
|
||||
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
|
||||
};
|
||||
|
||||
|
||||
/* The 2 bits controlling the window size are often set to allow reading
|
||||
* the BIOS, but too small to allow writing, since the lock registers are
|
||||
* 4MiB lower in the address space than the data.
|
||||
*
|
||||
* This is intended to prevent flashing the bios, perhaps accidentally.
|
||||
*
|
||||
* This parameter allows the normal driver to override the BIOS settings.
|
||||
*
|
||||
* The bits are 6 and 7. If both bits are set, it is a 5MiB window.
|
||||
* If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
|
||||
* 64KiB window.
|
||||
*
|
||||
*/
|
||||
static uint win_size_bits = 0;
|
||||
module_param(win_size_bits, uint, 0);
|
||||
MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x88 byte, normally set by BIOS.");
|
||||
|
||||
static struct ck804xrom_window ck804xrom_window = {
|
||||
.maps = LIST_HEAD_INIT(ck804xrom_window.maps),
|
||||
};
|
||||
|
||||
static void ck804xrom_cleanup(struct ck804xrom_window *window)
|
||||
{
|
||||
struct ck804xrom_map_info *map, *scratch;
|
||||
u8 byte;
|
||||
|
||||
if (window->pdev) {
|
||||
/* Disable writes through the rom window */
|
||||
pci_read_config_byte(window->pdev, 0x6d, &byte);
|
||||
pci_write_config_byte(window->pdev, 0x6d, byte & ~1);
|
||||
}
|
||||
|
||||
/* Free all of the mtd devices */
|
||||
list_for_each_entry_safe(map, scratch, &window->maps, list) {
|
||||
if (map->rsrc.parent)
|
||||
release_resource(&map->rsrc);
|
||||
|
||||
del_mtd_device(map->mtd);
|
||||
map_destroy(map->mtd);
|
||||
list_del(&map->list);
|
||||
kfree(map);
|
||||
}
|
||||
if (window->rsrc.parent)
|
||||
release_resource(&window->rsrc);
|
||||
|
||||
if (window->virt) {
|
||||
iounmap(window->virt);
|
||||
window->virt = NULL;
|
||||
window->phys = 0;
|
||||
window->size = 0;
|
||||
}
|
||||
pci_dev_put(window->pdev);
|
||||
}
|
||||
|
||||
|
||||
static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
|
||||
u8 byte;
|
||||
struct ck804xrom_window *window = &ck804xrom_window;
|
||||
struct ck804xrom_map_info *map = NULL;
|
||||
unsigned long map_top;
|
||||
|
||||
/* Remember the pci dev I find the window in */
|
||||
window->pdev = pci_dev_get(pdev);
|
||||
|
||||
/* Enable the selected rom window. This is often incorrectly
|
||||
* set up by the BIOS, and the 4MiB offset for the lock registers
|
||||
* requires the full 5MiB of window space.
|
||||
*
|
||||
* This 'write, then read' approach leaves the bits for
|
||||
* other uses of the hardware info.
|
||||
*/
|
||||
pci_read_config_byte(pdev, 0x88, &byte);
|
||||
pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
|
||||
|
||||
|
||||
/* Assume the rom window is properly setup, and find it's size */
|
||||
pci_read_config_byte(pdev, 0x88, &byte);
|
||||
|
||||
if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
|
||||
window->phys = 0xffb00000; /* 5MiB */
|
||||
else if ((byte & (1<<7)) == (1<<7))
|
||||
window->phys = 0xffc00000; /* 4MiB */
|
||||
else
|
||||
window->phys = 0xffff0000; /* 64KiB */
|
||||
|
||||
window->size = 0xffffffffUL - window->phys + 1UL;
|
||||
|
||||
/*
|
||||
* Try to reserve the window mem region. If this fails then
|
||||
* it is likely due to a fragment of the window being
|
||||
* "reserved" by the BIOS. In the case that the
|
||||
* request_mem_region() fails then once the rom size is
|
||||
* discovered we will try to reserve the unreserved fragment.
|
||||
*/
|
||||
window->rsrc.name = MOD_NAME;
|
||||
window->rsrc.start = window->phys;
|
||||
window->rsrc.end = window->phys + window->size - 1;
|
||||
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
if (request_resource(&iomem_resource, &window->rsrc)) {
|
||||
window->rsrc.parent = NULL;
|
||||
printk(KERN_ERR MOD_NAME
|
||||
" %s(): Unable to register resource"
|
||||
" 0x%.016llx-0x%.016llx - kernel bug?\n",
|
||||
__func__,
|
||||
(unsigned long long)window->rsrc.start,
|
||||
(unsigned long long)window->rsrc.end);
|
||||
}
|
||||
|
||||
|
||||
/* Enable writes through the rom window */
|
||||
pci_read_config_byte(pdev, 0x6d, &byte);
|
||||
pci_write_config_byte(pdev, 0x6d, byte | 1);
|
||||
|
||||
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
|
||||
|
||||
/* For write accesses caches are useless */
|
||||
window->virt = ioremap_nocache(window->phys, window->size);
|
||||
if (!window->virt) {
|
||||
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
|
||||
window->phys, window->size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get the first address to look for a rom chip at */
|
||||
map_top = window->phys;
|
||||
#if 1
|
||||
/* The probe sequence run over the firmware hub lock
|
||||
* registers sets them to 0x7 (no access).
|
||||
* Probe at most the last 4MiB of the address space.
|
||||
*/
|
||||
if (map_top < 0xffc00000)
|
||||
map_top = 0xffc00000;
|
||||
#endif
|
||||
/* Loop through and look for rom chips. Since we don't know the
|
||||
* starting address for each chip, probe every ROM_PROBE_STEP_SIZE
|
||||
* bytes from the starting address of the window.
|
||||
*/
|
||||
while((map_top - 1) < 0xffffffffUL) {
|
||||
struct cfi_private *cfi;
|
||||
unsigned long offset;
|
||||
int i;
|
||||
|
||||
if (!map)
|
||||
map = kmalloc(sizeof(*map), GFP_KERNEL);
|
||||
|
||||
if (!map) {
|
||||
printk(KERN_ERR MOD_NAME ": kmalloc failed");
|
||||
goto out;
|
||||
}
|
||||
memset(map, 0, sizeof(*map));
|
||||
INIT_LIST_HEAD(&map->list);
|
||||
map->map.name = map->map_name;
|
||||
map->map.phys = map_top;
|
||||
offset = map_top - window->phys;
|
||||
map->map.virt = (void __iomem *)
|
||||
(((unsigned long)(window->virt)) + offset);
|
||||
map->map.size = 0xffffffffUL - map_top + 1UL;
|
||||
/* Set the name of the map to the address I am trying */
|
||||
sprintf(map->map_name, "%s @%08lx",
|
||||
MOD_NAME, map->map.phys);
|
||||
|
||||
/* There is no generic VPP support */
|
||||
for(map->map.bankwidth = 32; map->map.bankwidth;
|
||||
map->map.bankwidth >>= 1)
|
||||
{
|
||||
char **probe_type;
|
||||
/* Skip bankwidths that are not supported */
|
||||
if (!map_bankwidth_supported(map->map.bankwidth))
|
||||
continue;
|
||||
|
||||
/* Setup the map methods */
|
||||
simple_map_init(&map->map);
|
||||
|
||||
/* Try all of the probe methods */
|
||||
probe_type = rom_probe_types;
|
||||
for(; *probe_type; probe_type++) {
|
||||
map->mtd = do_map_probe(*probe_type, &map->map);
|
||||
if (map->mtd)
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
map_top += ROM_PROBE_STEP_SIZE;
|
||||
continue;
|
||||
found:
|
||||
/* Trim the size if we are larger than the map */
|
||||
if (map->mtd->size > map->map.size) {
|
||||
printk(KERN_WARNING MOD_NAME
|
||||
" rom(%u) larger than window(%lu). fixing...\n",
|
||||
map->mtd->size, map->map.size);
|
||||
map->mtd->size = map->map.size;
|
||||
}
|
||||
if (window->rsrc.parent) {
|
||||
/*
|
||||
* Registering the MTD device in iomem may not be possible
|
||||
* if there is a BIOS "reserved" and BUSY range. If this
|
||||
* fails then continue anyway.
|
||||
*/
|
||||
map->rsrc.name = map->map_name;
|
||||
map->rsrc.start = map->map.phys;
|
||||
map->rsrc.end = map->map.phys + map->mtd->size - 1;
|
||||
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
if (request_resource(&window->rsrc, &map->rsrc)) {
|
||||
printk(KERN_ERR MOD_NAME
|
||||
": cannot reserve MTD resource\n");
|
||||
map->rsrc.parent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Make the whole region visible in the map */
|
||||
map->map.virt = window->virt;
|
||||
map->map.phys = window->phys;
|
||||
cfi = map->map.fldrv_priv;
|
||||
for(i = 0; i < cfi->numchips; i++)
|
||||
cfi->chips[i].start += offset;
|
||||
|
||||
/* Now that the mtd devices is complete claim and export it */
|
||||
map->mtd->owner = THIS_MODULE;
|
||||
if (add_mtd_device(map->mtd)) {
|
||||
map_destroy(map->mtd);
|
||||
map->mtd = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/* Calculate the new value of map_top */
|
||||
map_top += map->mtd->size;
|
||||
|
||||
/* File away the map structure */
|
||||
list_add(&map->list, &window->maps);
|
||||
map = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Free any left over map structures */
|
||||
if (map)
|
||||
kfree(map);
|
||||
|
||||
/* See if I have any map structures */
|
||||
if (list_empty(&window->maps)) {
|
||||
ck804xrom_cleanup(window);
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
|
||||
{
|
||||
struct ck804xrom_window *window = &ck804xrom_window;
|
||||
|
||||
ck804xrom_cleanup(window);
|
||||
}
|
||||
|
||||
static struct pci_device_id ck804xrom_pci_tbl[] = {
|
||||
{ PCI_VENDOR_ID_NVIDIA, 0x0051,
|
||||
PCI_ANY_ID, PCI_ANY_ID, }, /* nvidia ck804 */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl);
|
||||
|
||||
#if 0
|
||||
static struct pci_driver ck804xrom_driver = {
|
||||
.name = MOD_NAME,
|
||||
.id_table = ck804xrom_pci_tbl,
|
||||
.probe = ck804xrom_init_one,
|
||||
.remove = ck804xrom_remove_one,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init init_ck804xrom(void)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
struct pci_device_id *id;
|
||||
int retVal;
|
||||
pdev = NULL;
|
||||
|
||||
for(id = ck804xrom_pci_tbl; id->vendor; id++) {
|
||||
pdev = pci_find_device(id->vendor, id->device, NULL);
|
||||
if (pdev)
|
||||
break;
|
||||
}
|
||||
if (pdev) {
|
||||
retVal = ck804xrom_init_one(pdev, &ck804xrom_pci_tbl[0]);
|
||||
pci_dev_put(pdev);
|
||||
return retVal;
|
||||
}
|
||||
return -ENXIO;
|
||||
#if 0
|
||||
return pci_module_init(&ck804xrom_driver);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __exit cleanup_ck804xrom(void)
|
||||
{
|
||||
ck804xrom_remove_one(ck804xrom_window.pdev);
|
||||
}
|
||||
|
||||
module_init(init_ck804xrom);
|
||||
module_exit(cleanup_ck804xrom);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>, Dave Olsen <dolsen@lnxi.com>");
|
||||
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the Nvidia ck804 southbridge");
|
||||
|
|
@ -1,283 +0,0 @@
|
|||
/*
|
||||
* $Id: cstm_mips_ixx.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
|
||||
*
|
||||
* Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
|
||||
* Config with both CFI and JEDEC device support.
|
||||
*
|
||||
* Basically physmap.c with the addition of partitions and
|
||||
* an array of mapping info to accomodate more than one flash type per board.
|
||||
*
|
||||
* Copyright 2000 MontaVista Software Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
|
||||
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/map.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
|
||||
#define CC_GCR 0xB4013818
|
||||
#define CC_GPBCR 0xB401380A
|
||||
#define CC_GPBDR 0xB4013808
|
||||
#define CC_M68K_DEVICE 1
|
||||
#define CC_M68K_FUNCTION 6
|
||||
#define CC_CONFADDR 0xB8004000
|
||||
#define CC_CONFDATA 0xB8004004
|
||||
#define CC_FC_FCR 0xB8002004
|
||||
#define CC_FC_DCR 0xB8002008
|
||||
#define CC_GPACR 0xB4013802
|
||||
#define CC_GPAICR 0xB4013804
|
||||
#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
|
||||
|
||||
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
|
||||
void cstm_mips_ixx_set_vpp(struct map_info *map,int vpp)
|
||||
{
|
||||
static DEFINE_SPINLOCK(vpp_lock);
|
||||
static int vpp_count = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vpp_lock, flags);
|
||||
|
||||
if (vpp) {
|
||||
if (!vpp_count++) {
|
||||
__u16 data;
|
||||
__u8 data1;
|
||||
static u8 first = 1;
|
||||
|
||||
// Set GPIO port B pin3 to high
|
||||
data = *(__u16 *)(CC_GPBCR);
|
||||
data = (data & 0xff0f) | 0x0040;
|
||||
*(__u16 *)CC_GPBCR = data;
|
||||
*(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) | 0x08;
|
||||
if (first) {
|
||||
first = 0;
|
||||
/* need to have this delay for first
|
||||
enabling vpp after powerup */
|
||||
udelay(40);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!--vpp_count) {
|
||||
__u16 data;
|
||||
|
||||
// Set GPIO port B pin3 to high
|
||||
data = *(__u16 *)(CC_GPBCR);
|
||||
data = (data & 0xff3f) | 0x0040;
|
||||
*(__u16 *)CC_GPBCR = data;
|
||||
*(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) & 0xf7;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&vpp_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* board and partition description */
|
||||
|
||||
#define MAX_PHYSMAP_PARTITIONS 8
|
||||
struct cstm_mips_ixx_info {
|
||||
char *name;
|
||||
unsigned long window_addr;
|
||||
unsigned long window_size;
|
||||
int bankwidth;
|
||||
int num_partitions;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
|
||||
#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
|
||||
const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
|
||||
{
|
||||
{ // 28F128J3A in 2x16 configuration
|
||||
"big flash", // name
|
||||
0x08000000, // window_addr
|
||||
0x02000000, // window_size
|
||||
4, // bankwidth
|
||||
1, // num_partitions
|
||||
}
|
||||
|
||||
};
|
||||
static struct mtd_partition cstm_mips_ixx_partitions[PHYSMAP_NUMBER][MAX_PHYSMAP_PARTITIONS] = {
|
||||
{ // 28F128J3A in 2x16 configuration
|
||||
{
|
||||
.name = "main partition ",
|
||||
.size = 0x02000000, // 128 x 2 x 128k byte sectors
|
||||
.offset = 0,
|
||||
},
|
||||
},
|
||||
};
|
||||
#else /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
|
||||
#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
|
||||
const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
|
||||
{
|
||||
{
|
||||
"MTD flash", // name
|
||||
CONFIG_MTD_CSTM_MIPS_IXX_START, // window_addr
|
||||
CONFIG_MTD_CSTM_MIPS_IXX_LEN, // window_size
|
||||
CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH, // bankwidth
|
||||
1, // num_partitions
|
||||
},
|
||||
|
||||
};
|
||||
static struct mtd_partition cstm_mips_ixx_partitions[PHYSMAP_NUMBER][MAX_PHYSMAP_PARTITIONS] = {
|
||||
{
|
||||
{
|
||||
.name = "main partition",
|
||||
.size = CONFIG_MTD_CSTM_MIPS_IXX_LEN,
|
||||
.offset = 0,
|
||||
},
|
||||
},
|
||||
};
|
||||
#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
|
||||
|
||||
struct map_info cstm_mips_ixx_map[PHYSMAP_NUMBER];
|
||||
|
||||
int __init init_cstm_mips_ixx(void)
|
||||
{
|
||||
int i;
|
||||
int jedec;
|
||||
struct mtd_info *mymtd;
|
||||
struct mtd_partition *parts;
|
||||
|
||||
/* Initialize mapping */
|
||||
for (i=0;i<PHYSMAP_NUMBER;i++) {
|
||||
printk(KERN_NOTICE "cstm_mips_ixx flash device: 0x%lx at 0x%lx\n",
|
||||
cstm_mips_ixx_board_desc[i].window_size, cstm_mips_ixx_board_desc[i].window_addr);
|
||||
|
||||
|
||||
cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr;
|
||||
cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size);
|
||||
if (!cstm_mips_ixx_map[i].virt) {
|
||||
int j = 0;
|
||||
printk(KERN_WARNING "Failed to ioremap\n");
|
||||
for (j = 0; j < i; j++) {
|
||||
if (cstm_mips_ixx_map[j].virt) {
|
||||
iounmap(cstm_mips_ixx_map[j].virt);
|
||||
cstm_mips_ixx_map[j].virt = NULL;
|
||||
}
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name;
|
||||
cstm_mips_ixx_map[i].size = cstm_mips_ixx_board_desc[i].window_size;
|
||||
cstm_mips_ixx_map[i].bankwidth = cstm_mips_ixx_board_desc[i].bankwidth;
|
||||
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
|
||||
cstm_mips_ixx_map[i].set_vpp = cstm_mips_ixx_set_vpp;
|
||||
#endif
|
||||
simple_map_init(&cstm_mips_ixx_map[i]);
|
||||
//printk(KERN_NOTICE "cstm_mips_ixx: ioremap is %x\n",(unsigned int)(cstm_mips_ixx_map[i].virt));
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
|
||||
setup_ITE_IVR_flash();
|
||||
#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
|
||||
|
||||
for (i=0;i<PHYSMAP_NUMBER;i++) {
|
||||
parts = &cstm_mips_ixx_partitions[i][0];
|
||||
jedec = 0;
|
||||
mymtd = (struct mtd_info *)do_map_probe("cfi_probe", &cstm_mips_ixx_map[i]);
|
||||
//printk(KERN_NOTICE "phymap %d cfi_probe: mymtd is %x\n",i,(unsigned int)mymtd);
|
||||
if (!mymtd) {
|
||||
jedec = 1;
|
||||
mymtd = (struct mtd_info *)do_map_probe("jedec", &cstm_mips_ixx_map[i]);
|
||||
printk(KERN_NOTICE "cstm_mips_ixx %d jedec: mymtd is %x\n",i,(unsigned int)mymtd);
|
||||
}
|
||||
if (mymtd) {
|
||||
mymtd->owner = THIS_MODULE;
|
||||
|
||||
cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd;
|
||||
add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions);
|
||||
}
|
||||
else {
|
||||
for (i = 0; i < PHYSMAP_NUMBER; i++) {
|
||||
if (cstm_mips_ixx_map[i].virt) {
|
||||
iounmap(cstm_mips_ixx_map[i].virt);
|
||||
cstm_mips_ixx_map[i].virt = NULL;
|
||||
}
|
||||
}
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit cleanup_cstm_mips_ixx(void)
|
||||
{
|
||||
int i;
|
||||
struct mtd_info *mymtd;
|
||||
|
||||
for (i=0;i<PHYSMAP_NUMBER;i++) {
|
||||
mymtd = (struct mtd_info *)cstm_mips_ixx_map[i].map_priv_2;
|
||||
if (mymtd) {
|
||||
del_mtd_partitions(mymtd);
|
||||
map_destroy(mymtd);
|
||||
}
|
||||
if (cstm_mips_ixx_map[i].virt) {
|
||||
iounmap((void *)cstm_mips_ixx_map[i].virt);
|
||||
cstm_mips_ixx_map[i].virt = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
|
||||
void PCISetULongByOffset(__u32 DevNumber, __u32 FuncNumber, __u32 Offset, __u32 data)
|
||||
{
|
||||
__u32 offset;
|
||||
|
||||
offset = ( unsigned long )( 0x80000000 | ( DevNumber << 11 ) + ( FuncNumber << 8 ) + Offset) ;
|
||||
|
||||
*(__u32 *)CC_CONFADDR = offset;
|
||||
*(__u32 *)CC_CONFDATA = data;
|
||||
}
|
||||
void setup_ITE_IVR_flash()
|
||||
{
|
||||
__u32 size, base;
|
||||
|
||||
size = 0x0e000000; // 32MiB
|
||||
base = (0x08000000) >> 8 >>1; // Bug: we must shift one more bit
|
||||
|
||||
/* need to set ITE flash to 32 bits instead of default 8 */
|
||||
#ifdef CONFIG_MIPS_IVR
|
||||
*(__u32 *)CC_FC_FCR = 0x55;
|
||||
*(__u32 *)CC_GPACR = 0xfffc;
|
||||
#else
|
||||
*(__u32 *)CC_FC_FCR = 0x77;
|
||||
#endif
|
||||
/* turn bursting off */
|
||||
*(__u32 *)CC_FC_DCR = 0x0;
|
||||
|
||||
/* setup for one chip 4 byte PCI access */
|
||||
PCISetULongByOffset(CC_M68K_DEVICE, CC_M68K_FUNCTION, 0x60, size | base);
|
||||
PCISetULongByOffset(CC_M68K_DEVICE, CC_M68K_FUNCTION, 0x64, 0x02);
|
||||
}
|
||||
#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
|
||||
|
||||
module_init(init_cstm_mips_ixx);
|
||||
module_exit(cleanup_cstm_mips_ixx);
|
||||
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
|
||||
MODULE_DESCRIPTION("MTD map driver for ITE 8172G and Globespan IVR boards");
|
|
@ -0,0 +1,450 @@
|
|||
/*
|
||||
* esb2rom.c
|
||||
*
|
||||
* Normal mappings of flash chips in physical memory
|
||||
* through the Intel ESB2 Southbridge.
|
||||
*
|
||||
* This was derived from ichxrom.c in May 2006 by
|
||||
* Lew Glendenning <lglendenning@lnxi.com>
|
||||
*
|
||||
* Eric Biederman, of course, was a major help in this effort.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/map.h>
|
||||
#include <linux/mtd/cfi.h>
|
||||
#include <linux/mtd/flashchip.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define MOD_NAME KBUILD_BASENAME
|
||||
|
||||
#define ADDRESS_NAME_LEN 18
|
||||
|
||||
#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
|
||||
|
||||
#define BIOS_CNTL 0xDC
|
||||
#define BIOS_LOCK_ENABLE 0x02
|
||||
#define BIOS_WRITE_ENABLE 0x01
|
||||
|
||||
/* This became a 16-bit register, and EN2 has disappeared */
|
||||
#define FWH_DEC_EN1 0xD8
|
||||
#define FWH_F8_EN 0x8000
|
||||
#define FWH_F0_EN 0x4000
|
||||
#define FWH_E8_EN 0x2000
|
||||
#define FWH_E0_EN 0x1000
|
||||
#define FWH_D8_EN 0x0800
|
||||
#define FWH_D0_EN 0x0400
|
||||
#define FWH_C8_EN 0x0200
|
||||
#define FWH_C0_EN 0x0100
|
||||
#define FWH_LEGACY_F_EN 0x0080
|
||||
#define FWH_LEGACY_E_EN 0x0040
|
||||
/* reserved 0x0020 and 0x0010 */
|
||||
#define FWH_70_EN 0x0008
|
||||
#define FWH_60_EN 0x0004
|
||||
#define FWH_50_EN 0x0002
|
||||
#define FWH_40_EN 0x0001
|
||||
|
||||
/* these are 32-bit values */
|
||||
#define FWH_SEL1 0xD0
|
||||
#define FWH_SEL2 0xD4
|
||||
|
||||
#define FWH_8MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
|
||||
FWH_70_EN | FWH_60_EN | FWH_50_EN | FWH_40_EN)
|
||||
|
||||
#define FWH_7MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
|
||||
FWH_70_EN | FWH_60_EN | FWH_50_EN)
|
||||
|
||||
#define FWH_6MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
|
||||
FWH_70_EN | FWH_60_EN)
|
||||
|
||||
#define FWH_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
|
||||
FWH_70_EN)
|
||||
|
||||
#define FWH_4MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN)
|
||||
|
||||
#define FWH_3_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN)
|
||||
|
||||
#define FWH_3MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN | FWH_D0_EN)
|
||||
|
||||
#define FWH_2_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
|
||||
FWH_D8_EN)
|
||||
|
||||
#define FWH_2MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN)
|
||||
|
||||
#define FWH_1_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN)
|
||||
|
||||
#define FWH_1MiB (FWH_F8_EN | FWH_F0_EN)
|
||||
|
||||
#define FWH_0_5MiB (FWH_F8_EN)
|
||||
|
||||
|
||||
struct esb2rom_window {
|
||||
void __iomem* virt;
|
||||
unsigned long phys;
|
||||
unsigned long size;
|
||||
struct list_head maps;
|
||||
struct resource rsrc;
|
||||
struct pci_dev *pdev;
|
||||
};
|
||||
|
||||
struct esb2rom_map_info {
|
||||
struct list_head list;
|
||||
struct map_info map;
|
||||
struct mtd_info *mtd;
|
||||
struct resource rsrc;
|
||||
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
|
||||
};
|
||||
|
||||
static struct esb2rom_window esb2rom_window = {
|
||||
.maps = LIST_HEAD_INIT(esb2rom_window.maps),
|
||||
};
|
||||
|
||||
static void esb2rom_cleanup(struct esb2rom_window *window)
|
||||
{
|
||||
struct esb2rom_map_info *map, *scratch;
|
||||
u8 byte;
|
||||
|
||||
/* Disable writes through the rom window */
|
||||
pci_read_config_byte(window->pdev, BIOS_CNTL, &byte);
|
||||
pci_write_config_byte(window->pdev, BIOS_CNTL,
|
||||
byte & ~BIOS_WRITE_ENABLE);
|
||||
|
||||
/* Free all of the mtd devices */
|
||||
list_for_each_entry_safe(map, scratch, &window->maps, list) {
|
||||
if (map->rsrc.parent)
|
||||
release_resource(&map->rsrc);
|
||||
del_mtd_device(map->mtd);
|
||||
map_destroy(map->mtd);
|
||||
list_del(&map->list);
|
||||
kfree(map);
|
||||
}
|
||||
if (window->rsrc.parent)
|
||||
release_resource(&window->rsrc);
|
||||
if (window->virt) {
|
||||
iounmap(window->virt);
|
||||
window->virt = NULL;
|
||||
window->phys = 0;
|
||||
window->size = 0;
|
||||
}
|
||||
pci_dev_put(window->pdev);
|
||||
}
|
||||
|
||||
static int __devinit esb2rom_init_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
|
||||
struct esb2rom_window *window = &esb2rom_window;
|
||||
struct esb2rom_map_info *map = NULL;
|
||||
unsigned long map_top;
|
||||
u8 byte;
|
||||
u16 word;
|
||||
|
||||
/* For now I just handle the ecb2 and I assume there
|
||||
* are not a lot of resources up at the top of the address
|
||||
* space. It is possible to handle other devices in the
|
||||
* top 16MiB but it is very painful. Also since
|
||||
* you can only really attach a FWH to an ICHX there
|
||||
* a number of simplifications you can make.
|
||||
*
|
||||
* Also you can page firmware hubs if an 8MiB window isn't enough
|
||||
* but don't currently handle that case either.
|
||||
*/
|
||||
window->pdev = pci_dev_get(pdev);
|
||||
|
||||
/* RLG: experiment 2. Force the window registers to the widest values */
|
||||
|
||||
/*
|
||||
pci_read_config_word(pdev, FWH_DEC_EN1, &word);
|
||||
printk(KERN_DEBUG "Original FWH_DEC_EN1 : %x\n", word);
|
||||
pci_write_config_byte(pdev, FWH_DEC_EN1, 0xff);
|
||||
pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
|
||||
printk(KERN_DEBUG "New FWH_DEC_EN1 : %x\n", byte);
|
||||
|
||||
pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
|
||||
printk(KERN_DEBUG "Original FWH_DEC_EN2 : %x\n", byte);
|
||||
pci_write_config_byte(pdev, FWH_DEC_EN2, 0x0f);
|
||||
pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
|
||||
printk(KERN_DEBUG "New FWH_DEC_EN2 : %x\n", byte);
|
||||
*/
|
||||
|
||||
/* Find a region continuous to the end of the ROM window */
|
||||
window->phys = 0;
|
||||
pci_read_config_word(pdev, FWH_DEC_EN1, &word);
|
||||
printk(KERN_DEBUG "pci_read_config_byte : %x\n", word);
|
||||
|
||||
if ((word & FWH_8MiB) == FWH_8MiB)
|
||||
window->phys = 0xff400000;
|
||||
else if ((word & FWH_7MiB) == FWH_7MiB)
|
||||
window->phys = 0xff500000;
|
||||
else if ((word & FWH_6MiB) == FWH_6MiB)
|
||||
window->phys = 0xff600000;
|
||||
else if ((word & FWH_5MiB) == FWH_5MiB)
|
||||
window->phys = 0xFF700000;
|
||||
else if ((word & FWH_4MiB) == FWH_4MiB)
|
||||
window->phys = 0xffc00000;
|
||||
else if ((word & FWH_3_5MiB) == FWH_3_5MiB)
|
||||
window->phys = 0xffc80000;
|
||||
else if ((word & FWH_3MiB) == FWH_3MiB)
|
||||
window->phys = 0xffd00000;
|
||||
else if ((word & FWH_2_5MiB) == FWH_2_5MiB)
|
||||
window->phys = 0xffd80000;
|
||||
else if ((word & FWH_2MiB) == FWH_2MiB)
|
||||
window->phys = 0xffe00000;
|
||||
else if ((word & FWH_1_5MiB) == FWH_1_5MiB)
|
||||
window->phys = 0xffe80000;
|
||||
else if ((word & FWH_1MiB) == FWH_1MiB)
|
||||
window->phys = 0xfff00000;
|
||||
else if ((word & FWH_0_5MiB) == FWH_0_5MiB)
|
||||
window->phys = 0xfff80000;
|
||||
|
||||
/* reserved 0x0020 and 0x0010 */
|
||||
window->phys -= 0x400000UL;
|
||||
window->size = (0xffffffffUL - window->phys) + 1UL;
|
||||
|
||||
/* Enable writes through the rom window */
|
||||
pci_read_config_byte(pdev, BIOS_CNTL, &byte);
|
||||
if (!(byte & BIOS_WRITE_ENABLE) && (byte & (BIOS_LOCK_ENABLE))) {
|
||||
/* The BIOS will generate an error if I enable
|
||||
* this device, so don't even try.
|
||||
*/
|
||||
printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
|
||||
goto out;
|
||||
}
|
||||
pci_write_config_byte(pdev, BIOS_CNTL, byte | BIOS_WRITE_ENABLE);
|
||||
|
||||
/*
|
||||
* Try to reserve the window mem region. If this fails then
|
||||
* it is likely due to the window being "reseved" by the BIOS.
|
||||
*/
|
||||
window->rsrc.name = MOD_NAME;
|
||||
window->rsrc.start = window->phys;
|
||||
window->rsrc.end = window->phys + window->size - 1;
|
||||
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
if (request_resource(&iomem_resource, &window->rsrc)) {
|
||||
window->rsrc.parent = NULL;
|
||||
printk(KERN_DEBUG MOD_NAME
|
||||
": %s(): Unable to register resource"
|
||||
" 0x%.08llx-0x%.08llx - kernel bug?\n",
|
||||
__func__,
|
||||
(unsigned long long)window->rsrc.start,
|
||||
(unsigned long long)window->rsrc.end);
|
||||
}
|
||||
|
||||
/* Map the firmware hub into my address space. */
|
||||
window->virt = ioremap_nocache(window->phys, window->size);
|
||||
if (!window->virt) {
|
||||
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
|
||||
window->phys, window->size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get the first address to look for an rom chip at */
|
||||
map_top = window->phys;
|
||||
if ((window->phys & 0x3fffff) != 0) {
|
||||
/* if not aligned on 4MiB, look 4MiB lower in address space */
|
||||
map_top = window->phys + 0x400000;
|
||||
}
|
||||
#if 1
|
||||
/* The probe sequence run over the firmware hub lock
|
||||
* registers sets them to 0x7 (no access).
|
||||
* (Insane hardware design, but most copied Intel's.)
|
||||
* ==> Probe at most the last 4M of the address space.
|
||||
*/
|
||||
if (map_top < 0xffc00000)
|
||||
map_top = 0xffc00000;
|
||||
#endif
|
||||
/* Loop through and look for rom chips */
|
||||
while ((map_top - 1) < 0xffffffffUL) {
|
||||
struct cfi_private *cfi;
|
||||
unsigned long offset;
|
||||
int i;
|
||||
|
||||
if (!map)
|
||||
map = kmalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map) {
|
||||
printk(KERN_ERR MOD_NAME ": kmalloc failed");
|
||||
goto out;
|
||||
}
|
||||
memset(map, 0, sizeof(*map));
|
||||
INIT_LIST_HEAD(&map->list);
|
||||
map->map.name = map->map_name;
|
||||
map->map.phys = map_top;
|
||||
offset = map_top - window->phys;
|
||||
map->map.virt = (void __iomem *)
|
||||
(((unsigned long)(window->virt)) + offset);
|
||||
map->map.size = 0xffffffffUL - map_top + 1UL;
|
||||
/* Set the name of the map to the address I am trying */
|
||||
sprintf(map->map_name, "%s @%08lx",
|
||||
MOD_NAME, map->map.phys);
|
||||
|
||||
/* Firmware hubs only use vpp when being programmed
|
||||
* in a factory setting. So in-place programming
|
||||
* needs to use a different method.
|
||||
*/
|
||||
for(map->map.bankwidth = 32; map->map.bankwidth;
|
||||
map->map.bankwidth >>= 1) {
|
||||
char **probe_type;
|
||||
/* Skip bankwidths that are not supported */
|
||||
if (!map_bankwidth_supported(map->map.bankwidth))
|
||||
continue;
|
||||
|
||||
/* Setup the map methods */
|
||||
simple_map_init(&map->map);
|
||||
|
||||
/* Try all of the probe methods */
|
||||
probe_type = rom_probe_types;
|
||||
for(; *probe_type; probe_type++) {
|
||||
map->mtd = do_map_probe(*probe_type, &map->map);
|
||||
if (map->mtd)
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
map_top += ROM_PROBE_STEP_SIZE;
|
||||
continue;
|
||||
found:
|
||||
/* Trim the size if we are larger than the map */
|
||||
if (map->mtd->size > map->map.size) {
|
||||
printk(KERN_WARNING MOD_NAME
|
||||
" rom(%u) larger than window(%lu). fixing...\n",
|
||||
map->mtd->size, map->map.size);
|
||||
map->mtd->size = map->map.size;
|
||||
}
|
||||
if (window->rsrc.parent) {
|
||||
/*
|
||||
* Registering the MTD device in iomem may not be possible
|
||||
* if there is a BIOS "reserved" and BUSY range. If this
|
||||
* fails then continue anyway.
|
||||
*/
|
||||
map->rsrc.name = map->map_name;
|
||||
map->rsrc.start = map->map.phys;
|
||||
map->rsrc.end = map->map.phys + map->mtd->size - 1;
|
||||
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
if (request_resource(&window->rsrc, &map->rsrc)) {
|
||||
printk(KERN_ERR MOD_NAME
|
||||
": cannot reserve MTD resource\n");
|
||||
map->rsrc.parent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Make the whole region visible in the map */
|
||||
map->map.virt = window->virt;
|
||||
map->map.phys = window->phys;
|
||||
cfi = map->map.fldrv_priv;
|
||||
for(i = 0; i < cfi->numchips; i++)
|
||||
cfi->chips[i].start += offset;
|
||||
|
||||
/* Now that the mtd devices is complete claim and export it */
|
||||
map->mtd->owner = THIS_MODULE;
|
||||
if (add_mtd_device(map->mtd)) {
|
||||
map_destroy(map->mtd);
|
||||
map->mtd = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Calculate the new value of map_top */
|
||||
map_top += map->mtd->size;
|
||||
|
||||
/* File away the map structure */
|
||||
list_add(&map->list, &window->maps);
|
||||
map = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Free any left over map structures */
|
||||
kfree(map);
|
||||
|
||||
/* See if I have any map structures */
|
||||
if (list_empty(&window->maps)) {
|
||||
esb2rom_cleanup(window);
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __devexit esb2rom_remove_one (struct pci_dev *pdev)
|
||||
{
|
||||
struct esb2rom_window *window = &esb2rom_window;
|
||||
esb2rom_cleanup(window);
|
||||
}
|
||||
|
||||
static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = {
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
#if 0
|
||||
MODULE_DEVICE_TABLE(pci, esb2rom_pci_tbl);
|
||||
|
||||
static struct pci_driver esb2rom_driver = {
|
||||
.name = MOD_NAME,
|
||||
.id_table = esb2rom_pci_tbl,
|
||||
.probe = esb2rom_init_one,
|
||||
.remove = esb2rom_remove_one,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init init_esb2rom(void)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
struct pci_device_id *id;
|
||||
int retVal;
|
||||
|
||||
pdev = NULL;
|
||||
for (id = esb2rom_pci_tbl; id->vendor; id++) {
|
||||
printk(KERN_DEBUG "device id = %x\n", id->device);
|
||||
pdev = pci_get_device(id->vendor, id->device, NULL);
|
||||
if (pdev) {
|
||||
printk(KERN_DEBUG "matched device = %x\n", id->device);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (pdev) {
|
||||
printk(KERN_DEBUG "matched device id %x\n", id->device);
|
||||
retVal = esb2rom_init_one(pdev, &esb2rom_pci_tbl[0]);
|
||||
pci_dev_put(pdev);
|
||||
printk(KERN_DEBUG "retVal = %d\n", retVal);
|
||||
return retVal;
|
||||
}
|
||||
return -ENXIO;
|
||||
#if 0
|
||||
return pci_register_driver(&esb2rom_driver);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __exit cleanup_esb2rom(void)
|
||||
{
|
||||
esb2rom_remove_one(esb2rom_window.pdev);
|
||||
}
|
||||
|
||||
module_init(init_esb2rom);
|
||||
module_exit(cleanup_esb2rom);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Lew Glendenning <lglendenning@lnxi.com>");
|
||||
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ESB2 southbridge");
|
|
@ -75,14 +75,12 @@ static int armflash_probe(struct platform_device *dev)
|
|||
int err;
|
||||
void __iomem *base;
|
||||
|
||||
info = kmalloc(sizeof(struct armflash_info), GFP_KERNEL);
|
||||
info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL);
|
||||
if (!info) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(info, 0, sizeof(struct armflash_info));
|
||||
|
||||
info->plat = plat;
|
||||
if (plat && plat->init) {
|
||||
err = plat->init();
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/mtd/cfi.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -178,7 +179,7 @@ int nettel_eraseconfig(void)
|
|||
|
||||
init_waitqueue_head(&wait_q);
|
||||
mtd = get_mtd_device(NULL, 2);
|
||||
if (mtd) {
|
||||
if (!IS_ERR(mtd)) {
|
||||
nettel_erase.mtd = mtd;
|
||||
nettel_erase.callback = nettel_erasecallback;
|
||||
nettel_erase.callback = NULL;
|
||||
|
@ -471,7 +472,7 @@ out_unmap2:
|
|||
iounmap(nettel_amd_map.virt);
|
||||
|
||||
return(rc);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
|
|
@ -78,12 +78,10 @@ static int __devinit omapflash_probe(struct platform_device *pdev)
|
|||
struct resource *res = pdev->resource;
|
||||
unsigned long size = res->end - res->start + 1;
|
||||
|
||||
info = kmalloc(sizeof(struct omapflash_info), GFP_KERNEL);
|
||||
info = kzalloc(sizeof(struct omapflash_info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(info, 0, sizeof(struct omapflash_info));
|
||||
|
||||
if (!request_mem_region(res->start, size, "flash")) {
|
||||
err = -EBUSY;
|
||||
goto out_free_info;
|
||||
|
|
|
@ -735,11 +735,10 @@ static int pcmciamtd_probe(struct pcmcia_device *link)
|
|||
struct pcmciamtd_dev *dev;
|
||||
|
||||
/* Create new memory card device */
|
||||
dev = kmalloc(sizeof(*dev), GFP_KERNEL);
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev) return -ENOMEM;
|
||||
DEBUG(1, "dev=0x%p", dev);
|
||||
|
||||
memset(dev, 0, sizeof(*dev));
|
||||
dev->p_dev = link;
|
||||
link->priv = dev;
|
||||
|
||||
|
|
|
@ -89,15 +89,14 @@ static int physmap_flash_probe(struct platform_device *dev)
|
|||
return -ENODEV;
|
||||
|
||||
printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
|
||||
(unsigned long long)dev->resource->end - dev->resource->start + 1,
|
||||
(unsigned long long)(dev->resource->end - dev->resource->start + 1),
|
||||
(unsigned long long)dev->resource->start);
|
||||
|
||||
info = kmalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
|
||||
info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
|
||||
if (info == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
||||
platform_set_drvdata(dev, info);
|
||||
|
||||
|
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Normal mappings of chips in physical memory for OF devices
|
||||
*
|
||||
* Copyright (C) 2006 MontaVista Software Inc.
|
||||
* Author: Vitaly Wool <vwool@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/map.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/mtd/physmap.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/of_device.h>
|
||||
#include <asm/of_platform.h>
|
||||
|
||||
struct physmap_flash_info {
|
||||
struct mtd_info *mtd;
|
||||
struct map_info map;
|
||||
struct resource *res;
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
int nr_parts;
|
||||
struct mtd_partition *parts;
|
||||
#endif
|
||||
};
|
||||
|
||||
static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
static int parse_flash_partitions(struct device_node *node,
|
||||
struct mtd_partition **parts)
|
||||
{
|
||||
int i, plen, retval = -ENOMEM;
|
||||
const u32 *part;
|
||||
const char *name;
|
||||
|
||||
part = get_property(node, "partitions", &plen);
|
||||
if (part == NULL)
|
||||
goto err;
|
||||
|
||||
retval = plen / (2 * sizeof(u32));
|
||||
*parts = kzalloc(retval * sizeof(struct mtd_partition), GFP_KERNEL);
|
||||
if (*parts == NULL) {
|
||||
printk(KERN_ERR "Can't allocate the flash partition data!\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
name = get_property(node, "partition-names", &plen);
|
||||
|
||||
for (i = 0; i < retval; i++) {
|
||||
(*parts)[i].offset = *part++;
|
||||
(*parts)[i].size = *part & ~1;
|
||||
if (*part++ & 1) /* bit 0 set signifies read only partition */
|
||||
(*parts)[i].mask_flags = MTD_WRITEABLE;
|
||||
|
||||
if (name != NULL && plen > 0) {
|
||||
int len = strlen(name) + 1;
|
||||
|
||||
(*parts)[i].name = (char *)name;
|
||||
plen -= len;
|
||||
name += len;
|
||||
} else
|
||||
(*parts)[i].name = "unnamed";
|
||||
}
|
||||
err:
|
||||
return retval;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int of_physmap_remove(struct of_device *dev)
|
||||
{
|
||||
struct physmap_flash_info *info;
|
||||
|
||||
info = dev_get_drvdata(&dev->dev);
|
||||
if (info == NULL)
|
||||
return 0;
|
||||
dev_set_drvdata(&dev->dev, NULL);
|
||||
|
||||
if (info->mtd != NULL) {
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
if (info->nr_parts) {
|
||||
del_mtd_partitions(info->mtd);
|
||||
kfree(info->parts);
|
||||
} else {
|
||||
del_mtd_device(info->mtd);
|
||||
}
|
||||
#else
|
||||
del_mtd_device(info->mtd);
|
||||
#endif
|
||||
map_destroy(info->mtd);
|
||||
}
|
||||
|
||||
if (info->map.virt != NULL)
|
||||
iounmap(info->map.virt);
|
||||
|
||||
if (info->res != NULL) {
|
||||
release_resource(info->res);
|
||||
kfree(info->res);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit of_physmap_probe(struct of_device *dev, const struct of_device_id *match)
|
||||
{
|
||||
struct device_node *dp = dev->node;
|
||||
struct resource res;
|
||||
struct physmap_flash_info *info;
|
||||
const char **probe_type;
|
||||
const char *of_probe;
|
||||
const u32 *width;
|
||||
int err;
|
||||
|
||||
|
||||
if (of_address_to_resource(dp, 0, &res)) {
|
||||
dev_err(&dev->dev, "Can't get the flash mapping!\n");
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
dev_dbg(&dev->dev, "physmap flash device: %.8llx at %.8llx\n",
|
||||
(unsigned long long)res.end - res.start + 1,
|
||||
(unsigned long long)res.start);
|
||||
|
||||
info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
|
||||
if (info == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
memset(info, 0, sizeof(*info));
|
||||
|
||||
dev_set_drvdata(&dev->dev, info);
|
||||
|
||||
info->res = request_mem_region(res.start, res.end - res.start + 1,
|
||||
dev->dev.bus_id);
|
||||
if (info->res == NULL) {
|
||||
dev_err(&dev->dev, "Could not reserve memory region\n");
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
width = get_property(dp, "bank-width", NULL);
|
||||
if (width == NULL) {
|
||||
dev_err(&dev->dev, "Can't get the flash bank width!\n");
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
info->map.name = dev->dev.bus_id;
|
||||
info->map.phys = res.start;
|
||||
info->map.size = res.end - res.start + 1;
|
||||
info->map.bankwidth = *width;
|
||||
|
||||
info->map.virt = ioremap(info->map.phys, info->map.size);
|
||||
if (info->map.virt == NULL) {
|
||||
dev_err(&dev->dev, "Failed to ioremap flash region\n");
|
||||
err = EIO;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
simple_map_init(&info->map);
|
||||
|
||||
of_probe = get_property(dp, "probe-type", NULL);
|
||||
if (of_probe == NULL) {
|
||||
probe_type = rom_probe_types;
|
||||
for (; info->mtd == NULL && *probe_type != NULL; probe_type++)
|
||||
info->mtd = do_map_probe(*probe_type, &info->map);
|
||||
} else if (!strcmp(of_probe, "CFI"))
|
||||
info->mtd = do_map_probe("cfi_probe", &info->map);
|
||||
else if (!strcmp(of_probe, "JEDEC"))
|
||||
info->mtd = do_map_probe("jedec_probe", &info->map);
|
||||
else {
|
||||
if (strcmp(of_probe, "ROM"))
|
||||
dev_dbg(&dev->dev, "map_probe: don't know probe type "
|
||||
"'%s', mapping as rom\n");
|
||||
info->mtd = do_map_probe("mtd_rom", &info->map);
|
||||
}
|
||||
if (info->mtd == NULL) {
|
||||
dev_err(&dev->dev, "map_probe failed\n");
|
||||
err = -ENXIO;
|
||||
goto err_out;
|
||||
}
|
||||
info->mtd->owner = THIS_MODULE;
|
||||
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0);
|
||||
if (err > 0) {
|
||||
add_mtd_partitions(info->mtd, info->parts, err);
|
||||
} else if ((err = parse_flash_partitions(dp, &info->parts)) > 0) {
|
||||
dev_info(&dev->dev, "Using OF partition information\n");
|
||||
add_mtd_partitions(info->mtd, info->parts, err);
|
||||
info->nr_parts = err;
|
||||
} else
|
||||
#endif
|
||||
|
||||
add_mtd_device(info->mtd);
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
of_physmap_remove(dev);
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
}
|
||||
|
||||
static struct of_device_id of_physmap_match[] = {
|
||||
{
|
||||
.type = "rom",
|
||||
.compatible = "direct-mapped"
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, of_physmap_match);
|
||||
|
||||
|
||||
static struct of_platform_driver of_physmap_flash_driver = {
|
||||
.name = "physmap-flash",
|
||||
.match_table = of_physmap_match,
|
||||
.probe = of_physmap_probe,
|
||||
.remove = of_physmap_remove,
|
||||
};
|
||||
|
||||
static int __init of_physmap_init(void)
|
||||
{
|
||||
return of_register_platform_driver(&of_physmap_flash_driver);
|
||||
}
|
||||
|
||||
static void __exit of_physmap_exit(void)
|
||||
{
|
||||
of_unregister_platform_driver(&of_physmap_flash_driver);
|
||||
}
|
||||
|
||||
module_init(of_physmap_init);
|
||||
module_exit(of_physmap_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
|
||||
MODULE_DESCRIPTION("Configurable MTD map driver for OF");
|
|
@ -147,14 +147,13 @@ static int platram_probe(struct platform_device *pdev)
|
|||
|
||||
pdata = pdev->dev.platform_data;
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info == NULL) {
|
||||
dev_err(&pdev->dev, "no memory for flash info\n");
|
||||
err = -ENOMEM;
|
||||
goto exit_error;
|
||||
}
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
platform_set_drvdata(pdev, info);
|
||||
|
||||
info->dev = &pdev->dev;
|
||||
|
|
|
@ -273,14 +273,12 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
|
|||
/*
|
||||
* Allocate the map_info structs in one go.
|
||||
*/
|
||||
info = kmalloc(size, GFP_KERNEL);
|
||||
info = kzalloc(size, GFP_KERNEL);
|
||||
if (!info) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(info, 0, size);
|
||||
|
||||
if (plat->init) {
|
||||
ret = plat->init();
|
||||
if (ret)
|
||||
|
|
|
@ -132,20 +132,16 @@ static int __init init_tqm834x_mtd(void)
|
|||
|
||||
pr_debug("%s: chip probing count %d\n", __FUNCTION__, idx);
|
||||
|
||||
map_banks[idx] =
|
||||
(struct map_info *)kmalloc(sizeof(struct map_info),
|
||||
GFP_KERNEL);
|
||||
map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
|
||||
if (map_banks[idx] == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto error_mem;
|
||||
}
|
||||
memset((void *)map_banks[idx], 0, sizeof(struct map_info));
|
||||
map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
|
||||
map_banks[idx]->name = kzalloc(16, GFP_KERNEL);
|
||||
if (map_banks[idx]->name == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto error_mem;
|
||||
}
|
||||
memset((void *)map_banks[idx]->name, 0, 16);
|
||||
|
||||
sprintf(map_banks[idx]->name, "TQM834x-%d", idx);
|
||||
map_banks[idx]->size = flash_size;
|
||||
|
|
|
@ -134,14 +134,13 @@ int __init init_tqm_mtd(void)
|
|||
|
||||
printk(KERN_INFO "%s: chip probing count %d\n", __FUNCTION__, idx);
|
||||
|
||||
map_banks[idx] = (struct map_info *)kmalloc(sizeof(struct map_info), GFP_KERNEL);
|
||||
map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
|
||||
if(map_banks[idx] == NULL) {
|
||||
ret = -ENOMEM;
|
||||
/* FIXME: What if some MTD devices were probed already? */
|
||||
goto error_mem;
|
||||
}
|
||||
|
||||
memset((void *)map_banks[idx], 0, sizeof(struct map_info));
|
||||
map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
|
||||
|
||||
if (!map_banks[idx]->name) {
|
||||
|
|
|
@ -42,19 +42,20 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
unsigned long block, nsect;
|
||||
char *buf;
|
||||
|
||||
block = req->sector;
|
||||
nsect = req->current_nr_sectors;
|
||||
block = req->sector << 9 >> tr->blkshift;
|
||||
nsect = req->current_nr_sectors << 9 >> tr->blkshift;
|
||||
|
||||
buf = req->buffer;
|
||||
|
||||
if (!blk_fs_request(req))
|
||||
return 0;
|
||||
|
||||
if (block + nsect > get_capacity(req->rq_disk))
|
||||
if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
|
||||
return 0;
|
||||
|
||||
switch(rq_data_dir(req)) {
|
||||
case READ:
|
||||
for (; nsect > 0; nsect--, block++, buf += 512)
|
||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||
if (tr->readsect(dev, block, buf))
|
||||
return 0;
|
||||
return 1;
|
||||
|
@ -63,7 +64,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
if (!tr->writesect)
|
||||
return 0;
|
||||
|
||||
for (; nsect > 0; nsect--, block++, buf += 512)
|
||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||
if (tr->writesect(dev, block, buf))
|
||||
return 0;
|
||||
return 1;
|
||||
|
@ -297,7 +298,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
|
|||
|
||||
/* 2.5 has capacity in units of 512 bytes while still
|
||||
having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
|
||||
set_capacity(gd, (new->size * new->blksize) >> 9);
|
||||
set_capacity(gd, (new->size * tr->blksize) >> 9);
|
||||
|
||||
gd->private_data = new;
|
||||
new->blkcore_priv = gd;
|
||||
|
@ -372,12 +373,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
|
|||
if (!blktrans_notifier.list.next)
|
||||
register_mtd_user(&blktrans_notifier);
|
||||
|
||||
tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
|
||||
tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
|
||||
if (!tr->blkcore_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
|
||||
|
||||
mutex_lock(&mtd_table_mutex);
|
||||
|
||||
ret = register_blkdev(tr->major, tr->name);
|
||||
|
@ -401,6 +400,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
|
|||
}
|
||||
|
||||
tr->blkcore_priv->rq->queuedata = tr;
|
||||
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
|
||||
tr->blkshift = ffs(tr->blksize) - 1;
|
||||
|
||||
ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -278,11 +278,10 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
|
|||
}
|
||||
|
||||
/* OK, it's not open. Create cache info for it */
|
||||
mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
|
||||
mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
|
||||
if (!mtdblk)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(mtdblk, 0, sizeof(*mtdblk));
|
||||
mtdblk->count = 1;
|
||||
mtdblk->mtd = mtd;
|
||||
|
||||
|
@ -339,16 +338,14 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
|
|||
|
||||
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
||||
{
|
||||
struct mtd_blktrans_dev *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
|
||||
struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
memset(dev, 0, sizeof(*dev));
|
||||
|
||||
dev->mtd = mtd;
|
||||
dev->devnum = mtd->index;
|
||||
dev->blksize = 512;
|
||||
|
||||
dev->size = mtd->size >> 9;
|
||||
dev->tr = tr;
|
||||
|
||||
|
@ -368,6 +365,7 @@ static struct mtd_blktrans_ops mtdblock_tr = {
|
|||
.name = "mtdblock",
|
||||
.major = 31,
|
||||
.part_bits = 0,
|
||||
.blksize = 512,
|
||||
.open = mtdblock_open,
|
||||
.flush = mtdblock_flush,
|
||||
.release = mtdblock_release,
|
||||
|
|
|
@ -33,16 +33,14 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
|
|||
|
||||
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
||||
{
|
||||
struct mtd_blktrans_dev *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
|
||||
struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
memset(dev, 0, sizeof(*dev));
|
||||
|
||||
dev->mtd = mtd;
|
||||
dev->devnum = mtd->index;
|
||||
dev->blksize = 512;
|
||||
|
||||
dev->size = mtd->size >> 9;
|
||||
dev->tr = tr;
|
||||
dev->readonly = 1;
|
||||
|
@ -60,6 +58,7 @@ static struct mtd_blktrans_ops mtdblock_tr = {
|
|||
.name = "mtdblock",
|
||||
.major = 31,
|
||||
.part_bits = 0,
|
||||
.blksize = 512,
|
||||
.readsect = mtdblock_readsect,
|
||||
.writesect = mtdblock_writesect,
|
||||
.add_mtd = mtdblock_add_mtd,
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -100,8 +101,8 @@ static int mtd_open(struct inode *inode, struct file *file)
|
|||
|
||||
mtd = get_mtd_device(NULL, devnum);
|
||||
|
||||
if (!mtd)
|
||||
return -ENODEV;
|
||||
if (IS_ERR(mtd))
|
||||
return PTR_ERR(mtd);
|
||||
|
||||
if (MTD_ABSENT == mtd->type) {
|
||||
put_mtd_device(mtd);
|
||||
|
@ -431,7 +432,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
if(!(file->f_mode & 2))
|
||||
return -EPERM;
|
||||
|
||||
erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
|
||||
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
|
||||
if (!erase)
|
||||
ret = -ENOMEM;
|
||||
else {
|
||||
|
@ -440,7 +441,6 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
|
||||
init_waitqueue_head(&waitq);
|
||||
|
||||
memset (erase,0,sizeof(struct erase_info));
|
||||
if (copy_from_user(&erase->addr, argp,
|
||||
sizeof(struct erase_info_user))) {
|
||||
kfree(erase);
|
||||
|
@ -499,13 +499,12 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ops.len = buf.length;
|
||||
ops.ooblen = buf.length;
|
||||
ops.ooboffs = buf.start & (mtd->oobsize - 1);
|
||||
ops.datbuf = NULL;
|
||||
ops.mode = MTD_OOB_PLACE;
|
||||
|
||||
if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
|
||||
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
||||
return -EINVAL;
|
||||
|
||||
ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
|
||||
|
@ -520,7 +519,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
buf.start &= ~(mtd->oobsize - 1);
|
||||
ret = mtd->write_oob(mtd, buf.start, &ops);
|
||||
|
||||
if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
|
||||
if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen,
|
||||
sizeof(uint32_t)))
|
||||
ret = -EFAULT;
|
||||
|
||||
|
@ -548,7 +547,6 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ops.len = buf.length;
|
||||
ops.ooblen = buf.length;
|
||||
ops.ooboffs = buf.start & (mtd->oobsize - 1);
|
||||
ops.datbuf = NULL;
|
||||
|
@ -564,10 +562,10 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
buf.start &= ~(mtd->oobsize - 1);
|
||||
ret = mtd->read_oob(mtd, buf.start, &ops);
|
||||
|
||||
if (put_user(ops.retlen, (uint32_t __user *)argp))
|
||||
if (put_user(ops.oobretlen, (uint32_t __user *)argp))
|
||||
ret = -EFAULT;
|
||||
else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
|
||||
ops.retlen))
|
||||
else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
|
||||
ops.oobretlen))
|
||||
ret = -EFAULT;
|
||||
|
||||
kfree(ops.oobbuf);
|
||||
|
@ -616,6 +614,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
|
||||
memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
|
||||
sizeof(oi.oobfree));
|
||||
oi.eccbytes = mtd->ecclayout->eccbytes;
|
||||
|
||||
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
|
||||
return -EFAULT;
|
||||
|
@ -715,7 +714,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
|
|||
if (!mtd->ecclayout)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (copy_to_user(argp, &mtd->ecclayout,
|
||||
if (copy_to_user(argp, mtd->ecclayout,
|
||||
sizeof(struct nand_ecclayout)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
|
|
@ -247,7 +247,7 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
|
|||
struct mtd_oob_ops devops = *ops;
|
||||
int i, err, ret = 0;
|
||||
|
||||
ops->retlen = 0;
|
||||
ops->retlen = ops->oobretlen = 0;
|
||||
|
||||
for (i = 0; i < concat->num_subdev; i++) {
|
||||
struct mtd_info *subdev = concat->subdev[i];
|
||||
|
@ -263,6 +263,7 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
|
|||
|
||||
err = subdev->read_oob(subdev, from, &devops);
|
||||
ops->retlen += devops.retlen;
|
||||
ops->oobretlen += devops.oobretlen;
|
||||
|
||||
/* Save information about bitflips! */
|
||||
if (unlikely(err)) {
|
||||
|
@ -278,14 +279,18 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
|
|||
return err;
|
||||
}
|
||||
|
||||
devops.len = ops->len - ops->retlen;
|
||||
if (!devops.len)
|
||||
return ret;
|
||||
|
||||
if (devops.datbuf)
|
||||
if (devops.datbuf) {
|
||||
devops.len = ops->len - ops->retlen;
|
||||
if (!devops.len)
|
||||
return ret;
|
||||
devops.datbuf += devops.retlen;
|
||||
if (devops.oobbuf)
|
||||
devops.oobbuf += devops.ooblen;
|
||||
}
|
||||
if (devops.oobbuf) {
|
||||
devops.ooblen = ops->ooblen - ops->oobretlen;
|
||||
if (!devops.ooblen)
|
||||
return ret;
|
||||
devops.oobbuf += ops->oobretlen;
|
||||
}
|
||||
|
||||
from = 0;
|
||||
}
|
||||
|
@ -321,14 +326,18 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
devops.len = ops->len - ops->retlen;
|
||||
if (!devops.len)
|
||||
return 0;
|
||||
|
||||
if (devops.datbuf)
|
||||
if (devops.datbuf) {
|
||||
devops.len = ops->len - ops->retlen;
|
||||
if (!devops.len)
|
||||
return 0;
|
||||
devops.datbuf += devops.retlen;
|
||||
if (devops.oobbuf)
|
||||
devops.oobbuf += devops.ooblen;
|
||||
}
|
||||
if (devops.oobbuf) {
|
||||
devops.ooblen = ops->ooblen - ops->oobretlen;
|
||||
if (!devops.ooblen)
|
||||
return 0;
|
||||
devops.oobbuf += devops.oobretlen;
|
||||
}
|
||||
to = 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
|
@ -699,14 +708,13 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
|||
|
||||
/* allocate the device structure */
|
||||
size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
|
||||
concat = kmalloc(size, GFP_KERNEL);
|
||||
concat = kzalloc(size, GFP_KERNEL);
|
||||
if (!concat) {
|
||||
printk
|
||||
("memory allocation error while creating concatenated device \"%s\"\n",
|
||||
name);
|
||||
return NULL;
|
||||
}
|
||||
memset(concat, 0, size);
|
||||
concat->subdev = (struct mtd_info **) (concat + 1);
|
||||
|
||||
/*
|
||||
|
@ -764,6 +772,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
|||
concat->mtd.ecc_stats.badblocks +=
|
||||
subdev[i]->ecc_stats.badblocks;
|
||||
if (concat->mtd.writesize != subdev[i]->writesize ||
|
||||
concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
|
||||
concat->mtd.oobsize != subdev[i]->oobsize ||
|
||||
concat->mtd.ecctype != subdev[i]->ecctype ||
|
||||
concat->mtd.eccsize != subdev[i]->eccsize ||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/timer.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mtd/compatmac.h>
|
||||
|
@ -192,14 +193,14 @@ int unregister_mtd_user (struct mtd_notifier *old)
|
|||
* Given a number and NULL address, return the num'th entry in the device
|
||||
* table, if any. Given an address and num == -1, search the device table
|
||||
* for a device with that address and return if it's still present. Given
|
||||
* both, return the num'th driver only if its address matches. Return NULL
|
||||
* if not.
|
||||
* both, return the num'th driver only if its address matches. Return
|
||||
* error code if not.
|
||||
*/
|
||||
|
||||
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
|
||||
{
|
||||
struct mtd_info *ret = NULL;
|
||||
int i;
|
||||
int i, err = -ENODEV;
|
||||
|
||||
mutex_lock(&mtd_table_mutex);
|
||||
|
||||
|
@ -213,14 +214,73 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
|
|||
ret = NULL;
|
||||
}
|
||||
|
||||
if (ret && !try_module_get(ret->owner))
|
||||
ret = NULL;
|
||||
if (!ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (ret)
|
||||
ret->usecount++;
|
||||
if (!try_module_get(ret->owner))
|
||||
goto out_unlock;
|
||||
|
||||
if (ret->get_device) {
|
||||
err = ret->get_device(ret);
|
||||
if (err)
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
ret->usecount++;
|
||||
mutex_unlock(&mtd_table_mutex);
|
||||
return ret;
|
||||
|
||||
out_put:
|
||||
module_put(ret->owner);
|
||||
out_unlock:
|
||||
mutex_unlock(&mtd_table_mutex);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_mtd_device_nm - obtain a validated handle for an MTD device by
|
||||
* device name
|
||||
* @name: MTD device name to open
|
||||
*
|
||||
* This function returns MTD device description structure in case of
|
||||
* success and an error code in case of failure.
|
||||
*/
|
||||
|
||||
struct mtd_info *get_mtd_device_nm(const char *name)
|
||||
{
|
||||
int i, err = -ENODEV;
|
||||
struct mtd_info *mtd = NULL;
|
||||
|
||||
mutex_lock(&mtd_table_mutex);
|
||||
|
||||
for (i = 0; i < MAX_MTD_DEVICES; i++) {
|
||||
if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
|
||||
mtd = mtd_table[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mtd)
|
||||
goto out_unlock;
|
||||
|
||||
if (!try_module_get(mtd->owner))
|
||||
goto out_unlock;
|
||||
|
||||
if (mtd->get_device) {
|
||||
err = mtd->get_device(mtd);
|
||||
if (err)
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
mtd->usecount++;
|
||||
mutex_unlock(&mtd_table_mutex);
|
||||
return mtd;
|
||||
|
||||
out_put:
|
||||
module_put(mtd->owner);
|
||||
out_unlock:
|
||||
mutex_unlock(&mtd_table_mutex);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void put_mtd_device(struct mtd_info *mtd)
|
||||
|
@ -229,6 +289,8 @@ void put_mtd_device(struct mtd_info *mtd)
|
|||
|
||||
mutex_lock(&mtd_table_mutex);
|
||||
c = --mtd->usecount;
|
||||
if (mtd->put_device)
|
||||
mtd->put_device(mtd);
|
||||
mutex_unlock(&mtd_table_mutex);
|
||||
BUG_ON(c < 0);
|
||||
|
||||
|
@ -236,7 +298,7 @@ void put_mtd_device(struct mtd_info *mtd)
|
|||
}
|
||||
|
||||
/* default_mtd_writev - default mtd writev method for MTD devices that
|
||||
* dont implement their own
|
||||
* don't implement their own
|
||||
*/
|
||||
|
||||
int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
|
||||
|
@ -264,13 +326,14 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
|
|||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(add_mtd_device);
|
||||
EXPORT_SYMBOL(del_mtd_device);
|
||||
EXPORT_SYMBOL(get_mtd_device);
|
||||
EXPORT_SYMBOL(put_mtd_device);
|
||||
EXPORT_SYMBOL(register_mtd_user);
|
||||
EXPORT_SYMBOL(unregister_mtd_user);
|
||||
EXPORT_SYMBOL(default_mtd_writev);
|
||||
EXPORT_SYMBOL_GPL(add_mtd_device);
|
||||
EXPORT_SYMBOL_GPL(del_mtd_device);
|
||||
EXPORT_SYMBOL_GPL(get_mtd_device);
|
||||
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
|
||||
EXPORT_SYMBOL_GPL(put_mtd_device);
|
||||
EXPORT_SYMBOL_GPL(register_mtd_user);
|
||||
EXPORT_SYMBOL_GPL(unregister_mtd_user);
|
||||
EXPORT_SYMBOL_GPL(default_mtd_writev);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
|
||||
if (from >= mtd->size)
|
||||
return -EINVAL;
|
||||
if (from + ops->len > mtd->size)
|
||||
if (ops->datbuf && from + ops->len > mtd->size)
|
||||
return -EINVAL;
|
||||
res = part->master->read_oob(part->master, from + part->offset, ops);
|
||||
|
||||
|
@ -161,7 +161,7 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
|
|||
|
||||
if (to >= mtd->size)
|
||||
return -EINVAL;
|
||||
if (to + ops->len > mtd->size)
|
||||
if (ops->datbuf && to + ops->len > mtd->size)
|
||||
return -EINVAL;
|
||||
return part->master->write_oob(part->master, to + part->offset, ops);
|
||||
}
|
||||
|
@ -323,14 +323,13 @@ int add_mtd_partitions(struct mtd_info *master,
|
|||
for (i = 0; i < nbparts; i++) {
|
||||
|
||||
/* allocate the partition structure */
|
||||
slave = kmalloc (sizeof(*slave), GFP_KERNEL);
|
||||
slave = kzalloc (sizeof(*slave), GFP_KERNEL);
|
||||
if (!slave) {
|
||||
printk ("memory allocation error while creating partitions for \"%s\"\n",
|
||||
master->name);
|
||||
del_mtd_partitions(master);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(slave, 0, sizeof(*slave));
|
||||
list_add(&slave->list, &mtd_partitions);
|
||||
|
||||
/* set up the MTD object for this partition */
|
||||
|
@ -341,6 +340,7 @@ int add_mtd_partitions(struct mtd_info *master,
|
|||
slave->mtd.oobsize = master->oobsize;
|
||||
slave->mtd.ecctype = master->ecctype;
|
||||
slave->mtd.eccsize = master->eccsize;
|
||||
slave->mtd.subpage_sft = master->subpage_sft;
|
||||
|
||||
slave->mtd.name = parts[i].name;
|
||||
slave->mtd.bank_size = master->bank_size;
|
||||
|
|
|
@ -90,6 +90,7 @@ config MTD_NAND_RTC_FROM4
|
|||
depends on MTD_NAND && SH_SOLUTION_ENGINE
|
||||
select REED_SOLOMON
|
||||
select REED_SOLOMON_DEC8
|
||||
select BITREVERSE
|
||||
help
|
||||
This enables the driver for the Renesas Technology AG-AND
|
||||
flash interface board (FROM_BOARD4)
|
||||
|
@ -132,6 +133,7 @@ config MTD_NAND_S3C2410_HWECC
|
|||
config MTD_NAND_NDFC
|
||||
tristate "NDFC NanD Flash Controller"
|
||||
depends on MTD_NAND && 44x
|
||||
select MTD_NAND_ECC_SMC
|
||||
help
|
||||
NDFC Nand Flash Controllers are integrated in EP44x SoCs
|
||||
|
||||
|
@ -219,6 +221,13 @@ config MTD_NAND_SHARPSL
|
|||
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
|
||||
depends on MTD_NAND && ARCH_PXA
|
||||
|
||||
config MTD_NAND_CAFE
|
||||
tristate "NAND support for OLPC CAFÉ chip"
|
||||
depends on PCI
|
||||
help
|
||||
Use NAND flash attached to the CAFÉ chip designed for the $100
|
||||
laptop.
|
||||
|
||||
config MTD_NAND_CS553X
|
||||
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
|
||||
depends on MTD_NAND && X86_32 && (X86_PC || X86_GENERICARCH)
|
||||
|
@ -232,6 +241,13 @@ config MTD_NAND_CS553X
|
|||
|
||||
If you say "m", the module will be called "cs553x_nand.ko".
|
||||
|
||||
config MTD_NAND_AT91
|
||||
bool "Support for NAND Flash / SmartMedia on AT91"
|
||||
depends on MTD_NAND && ARCH_AT91
|
||||
help
|
||||
Enables support for NAND Flash / Smart Media Card interface
|
||||
on Atmel AT91 processors.
|
||||
|
||||
config MTD_NAND_NANDSIM
|
||||
tristate "Support for NAND Flash Simulator"
|
||||
depends on MTD_NAND && MTD_PARTITIONS
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
|
||||
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
|
||||
|
||||
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
|
||||
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
|
||||
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
|
||||
obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
|
||||
|
@ -22,5 +23,7 @@ obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
|
|||
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
|
||||
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
|
||||
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
|
||||
obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
|
||||
|
||||
nand-objs = nand_base.o nand_bbt.o
|
||||
nand-objs := nand_base.o nand_bbt.o
|
||||
cafe_nand-objs := cafe.o cafe_ecc.o
|
||||
|
|
|
@ -0,0 +1,223 @@
|
|||
/*
|
||||
* drivers/mtd/nand/at91_nand.c
|
||||
*
|
||||
* Copyright (C) 2003 Rick Bronson
|
||||
*
|
||||
* Derived from drivers/mtd/nand/autcpu12.c
|
||||
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
|
||||
*
|
||||
* Derived from drivers/mtd/spia.c
|
||||
* Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/sizes.h>
|
||||
|
||||
#include <asm/hardware.h>
|
||||
#include <asm/arch/board.h>
|
||||
#include <asm/arch/gpio.h>
|
||||
|
||||
struct at91_nand_host {
|
||||
struct nand_chip nand_chip;
|
||||
struct mtd_info mtd;
|
||||
void __iomem *io_base;
|
||||
struct at91_nand_data *board;
|
||||
};
|
||||
|
||||
/*
|
||||
* Hardware specific access to control-lines
|
||||
*/
|
||||
static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
|
||||
{
|
||||
struct nand_chip *nand_chip = mtd->priv;
|
||||
struct at91_nand_host *host = nand_chip->priv;
|
||||
|
||||
if (cmd == NAND_CMD_NONE)
|
||||
return;
|
||||
|
||||
if (ctrl & NAND_CLE)
|
||||
writeb(cmd, host->io_base + (1 << host->board->cle));
|
||||
else
|
||||
writeb(cmd, host->io_base + (1 << host->board->ale));
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the Device Ready pin.
|
||||
*/
|
||||
static int at91_nand_device_ready(struct mtd_info *mtd)
|
||||
{
|
||||
struct nand_chip *nand_chip = mtd->priv;
|
||||
struct at91_nand_host *host = nand_chip->priv;
|
||||
|
||||
return at91_get_gpio_value(host->board->rdy_pin);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable NAND.
|
||||
*/
|
||||
static void at91_nand_enable(struct at91_nand_host *host)
|
||||
{
|
||||
if (host->board->enable_pin)
|
||||
at91_set_gpio_value(host->board->enable_pin, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable NAND.
|
||||
*/
|
||||
static void at91_nand_disable(struct at91_nand_host *host)
|
||||
{
|
||||
if (host->board->enable_pin)
|
||||
at91_set_gpio_value(host->board->enable_pin, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe for the NAND device.
|
||||
*/
|
||||
static int __init at91_nand_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct at91_nand_host *host;
|
||||
struct mtd_info *mtd;
|
||||
struct nand_chip *nand_chip;
|
||||
int res;
|
||||
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
struct mtd_partition *partitions = NULL;
|
||||
int num_partitions = 0;
|
||||
#endif
|
||||
|
||||
/* Allocate memory for the device structure (and zero it) */
|
||||
host = kzalloc(sizeof(struct at91_nand_host), GFP_KERNEL);
|
||||
if (!host) {
|
||||
printk(KERN_ERR "at91_nand: failed to allocate device structure.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
host->io_base = ioremap(pdev->resource[0].start,
|
||||
pdev->resource[0].end - pdev->resource[0].start + 1);
|
||||
if (host->io_base == NULL) {
|
||||
printk(KERN_ERR "at91_nand: ioremap failed\n");
|
||||
kfree(host);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mtd = &host->mtd;
|
||||
nand_chip = &host->nand_chip;
|
||||
host->board = pdev->dev.platform_data;
|
||||
|
||||
nand_chip->priv = host; /* link the private data structures */
|
||||
mtd->priv = nand_chip;
|
||||
mtd->owner = THIS_MODULE;
|
||||
|
||||
/* Set address of NAND IO lines */
|
||||
nand_chip->IO_ADDR_R = host->io_base;
|
||||
nand_chip->IO_ADDR_W = host->io_base;
|
||||
nand_chip->cmd_ctrl = at91_nand_cmd_ctrl;
|
||||
nand_chip->dev_ready = at91_nand_device_ready;
|
||||
nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
|
||||
nand_chip->chip_delay = 20; /* 20us command delay time */
|
||||
|
||||
if (host->board->bus_width_16) /* 16-bit bus width */
|
||||
nand_chip->options |= NAND_BUSWIDTH_16;
|
||||
|
||||
platform_set_drvdata(pdev, host);
|
||||
at91_nand_enable(host);
|
||||
|
||||
if (host->board->det_pin) {
|
||||
if (at91_get_gpio_value(host->board->det_pin)) {
|
||||
printk ("No SmartMedia card inserted.\n");
|
||||
res = ENXIO;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Scan to find existance of the device */
|
||||
if (nand_scan(mtd, 1)) {
|
||||
res = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
if (host->board->partition_info)
|
||||
partitions = host->board->partition_info(mtd->size, &num_partitions);
|
||||
|
||||
if ((!partitions) || (num_partitions == 0)) {
|
||||
printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
|
||||
res = ENXIO;
|
||||
goto release;
|
||||
}
|
||||
|
||||
res = add_mtd_partitions(mtd, partitions, num_partitions);
|
||||
#else
|
||||
res = add_mtd_device(mtd);
|
||||
#endif
|
||||
|
||||
if (!res)
|
||||
return res;
|
||||
|
||||
release:
|
||||
nand_release(mtd);
|
||||
out:
|
||||
at91_nand_disable(host);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
iounmap(host->io_base);
|
||||
kfree(host);
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a NAND device.
|
||||
*/
|
||||
static int __devexit at91_nand_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct at91_nand_host *host = platform_get_drvdata(pdev);
|
||||
struct mtd_info *mtd = &host->mtd;
|
||||
|
||||
nand_release(mtd);
|
||||
|
||||
at91_nand_disable(host);
|
||||
|
||||
iounmap(host->io_base);
|
||||
kfree(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver at91_nand_driver = {
|
||||
.probe = at91_nand_probe,
|
||||
.remove = at91_nand_remove,
|
||||
.driver = {
|
||||
.name = "at91_nand",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init at91_nand_init(void)
|
||||
{
|
||||
return platform_driver_register(&at91_nand_driver);
|
||||
}
|
||||
|
||||
|
||||
static void __exit at91_nand_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&at91_nand_driver);
|
||||
}
|
||||
|
||||
|
||||
module_init(at91_nand_init);
|
||||
module_exit(at91_nand_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Rick Bronson");
|
||||
MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200");
|
|
@ -0,0 +1,770 @@
|
|||
/*
|
||||
* Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
|
||||
*
|
||||
* Copyright © 2006 Red Hat, Inc.
|
||||
* Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
|
||||
*/
|
||||
|
||||
#define DEBUG
|
||||
|
||||
#include <linux/device.h>
|
||||
#undef DEBUG
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#define CAFE_NAND_CTRL1 0x00
|
||||
#define CAFE_NAND_CTRL2 0x04
|
||||
#define CAFE_NAND_CTRL3 0x08
|
||||
#define CAFE_NAND_STATUS 0x0c
|
||||
#define CAFE_NAND_IRQ 0x10
|
||||
#define CAFE_NAND_IRQ_MASK 0x14
|
||||
#define CAFE_NAND_DATA_LEN 0x18
|
||||
#define CAFE_NAND_ADDR1 0x1c
|
||||
#define CAFE_NAND_ADDR2 0x20
|
||||
#define CAFE_NAND_TIMING1 0x24
|
||||
#define CAFE_NAND_TIMING2 0x28
|
||||
#define CAFE_NAND_TIMING3 0x2c
|
||||
#define CAFE_NAND_NONMEM 0x30
|
||||
#define CAFE_NAND_ECC_RESULT 0x3C
|
||||
#define CAFE_NAND_DMA_CTRL 0x40
|
||||
#define CAFE_NAND_DMA_ADDR0 0x44
|
||||
#define CAFE_NAND_DMA_ADDR1 0x48
|
||||
#define CAFE_NAND_ECC_SYN01 0x50
|
||||
#define CAFE_NAND_ECC_SYN23 0x54
|
||||
#define CAFE_NAND_ECC_SYN45 0x58
|
||||
#define CAFE_NAND_ECC_SYN67 0x5c
|
||||
#define CAFE_NAND_READ_DATA 0x1000
|
||||
#define CAFE_NAND_WRITE_DATA 0x2000
|
||||
|
||||
#define CAFE_GLOBAL_CTRL 0x3004
|
||||
#define CAFE_GLOBAL_IRQ 0x3008
|
||||
#define CAFE_GLOBAL_IRQ_MASK 0x300c
|
||||
#define CAFE_NAND_RESET 0x3034
|
||||
|
||||
int cafe_correct_ecc(unsigned char *buf,
|
||||
unsigned short *chk_syndrome_list);
|
||||
|
||||
struct cafe_priv {
|
||||
struct nand_chip nand;
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *mmio;
|
||||
uint32_t ctl1;
|
||||
uint32_t ctl2;
|
||||
int datalen;
|
||||
int nr_data;
|
||||
int data_pos;
|
||||
int page_addr;
|
||||
dma_addr_t dmaaddr;
|
||||
unsigned char *dmabuf;
|
||||
};
|
||||
|
||||
static int usedma = 1;
|
||||
module_param(usedma, int, 0644);
|
||||
|
||||
static int skipbbt = 0;
|
||||
module_param(skipbbt, int, 0644);
|
||||
|
||||
static int debug = 0;
|
||||
module_param(debug, int, 0644);
|
||||
|
||||
static int regdebug = 0;
|
||||
module_param(regdebug, int, 0644);
|
||||
|
||||
static int checkecc = 1;
|
||||
module_param(checkecc, int, 0644);
|
||||
|
||||
static int slowtiming = 0;
|
||||
module_param(slowtiming, int, 0644);
|
||||
|
||||
/* Hrm. Why isn't this already conditional on something in the struct device? */
|
||||
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
|
||||
|
||||
/* Make it easier to switch to PIO if we need to */
|
||||
#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
|
||||
#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
|
||||
|
||||
static int cafe_device_ready(struct mtd_info *mtd)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
|
||||
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
|
||||
|
||||
cafe_writel(cafe, irqs, NAND_IRQ);
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
|
||||
result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
|
||||
cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
|
||||
if (usedma)
|
||||
memcpy(cafe->dmabuf + cafe->datalen, buf, len);
|
||||
else
|
||||
memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
|
||||
|
||||
cafe->datalen += len;
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
|
||||
len, cafe->datalen);
|
||||
}
|
||||
|
||||
static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
|
||||
if (usedma)
|
||||
memcpy(buf, cafe->dmabuf + cafe->datalen, len);
|
||||
else
|
||||
memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
|
||||
len, cafe->datalen);
|
||||
cafe->datalen += len;
|
||||
}
|
||||
|
||||
static uint8_t cafe_read_byte(struct mtd_info *mtd)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
uint8_t d;
|
||||
|
||||
cafe_read_buf(mtd, &d, 1);
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
|
||||
|
||||
return d;
|
||||
}
|
||||
|
||||
static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
|
||||
int column, int page_addr)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
int adrbytes = 0;
|
||||
uint32_t ctl1;
|
||||
uint32_t doneint = 0x80000000;
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
|
||||
command, column, page_addr);
|
||||
|
||||
if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
|
||||
/* Second half of a command we already calculated */
|
||||
cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
|
||||
ctl1 = cafe->ctl1;
|
||||
cafe->ctl2 &= ~(1<<30);
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
|
||||
cafe->ctl1, cafe->nr_data);
|
||||
goto do_command;
|
||||
}
|
||||
/* Reset ECC engine */
|
||||
cafe_writel(cafe, 0, NAND_CTRL2);
|
||||
|
||||
/* Emulate NAND_CMD_READOOB on large-page chips */
|
||||
if (mtd->writesize > 512 &&
|
||||
command == NAND_CMD_READOOB) {
|
||||
column += mtd->writesize;
|
||||
command = NAND_CMD_READ0;
|
||||
}
|
||||
|
||||
/* FIXME: Do we need to send read command before sending data
|
||||
for small-page chips, to position the buffer correctly? */
|
||||
|
||||
if (column != -1) {
|
||||
cafe_writel(cafe, column, NAND_ADDR1);
|
||||
adrbytes = 2;
|
||||
if (page_addr != -1)
|
||||
goto write_adr2;
|
||||
} else if (page_addr != -1) {
|
||||
cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
|
||||
page_addr >>= 16;
|
||||
write_adr2:
|
||||
cafe_writel(cafe, page_addr, NAND_ADDR2);
|
||||
adrbytes += 2;
|
||||
if (mtd->size > mtd->writesize << 16)
|
||||
adrbytes++;
|
||||
}
|
||||
|
||||
cafe->data_pos = cafe->datalen = 0;
|
||||
|
||||
/* Set command valid bit */
|
||||
ctl1 = 0x80000000 | command;
|
||||
|
||||
/* Set RD or WR bits as appropriate */
|
||||
if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
|
||||
ctl1 |= (1<<26); /* rd */
|
||||
/* Always 5 bytes, for now */
|
||||
cafe->datalen = 4;
|
||||
/* And one address cycle -- even for STATUS, since the controller doesn't work without */
|
||||
adrbytes = 1;
|
||||
} else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
|
||||
command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
|
||||
ctl1 |= 1<<26; /* rd */
|
||||
/* For now, assume just read to end of page */
|
||||
cafe->datalen = mtd->writesize + mtd->oobsize - column;
|
||||
} else if (command == NAND_CMD_SEQIN)
|
||||
ctl1 |= 1<<25; /* wr */
|
||||
|
||||
/* Set number of address bytes */
|
||||
if (adrbytes)
|
||||
ctl1 |= ((adrbytes-1)|8) << 27;
|
||||
|
||||
if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
|
||||
/* Ignore the first command of a pair; the hardware
|
||||
deals with them both at once, later */
|
||||
cafe->ctl1 = ctl1;
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
|
||||
cafe->ctl1, cafe->datalen);
|
||||
return;
|
||||
}
|
||||
/* RNDOUT and READ0 commands need a following byte */
|
||||
if (command == NAND_CMD_RNDOUT)
|
||||
cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
|
||||
else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
|
||||
cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
|
||||
|
||||
do_command:
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
|
||||
cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
|
||||
|
||||
/* NB: The datasheet lies -- we really should be subtracting 1 here */
|
||||
cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
|
||||
cafe_writel(cafe, 0x90000000, NAND_IRQ);
|
||||
if (usedma && (ctl1 & (3<<25))) {
|
||||
uint32_t dmactl = 0xc0000000 + cafe->datalen;
|
||||
/* If WR or RD bits set, set up DMA */
|
||||
if (ctl1 & (1<<26)) {
|
||||
/* It's a read */
|
||||
dmactl |= (1<<29);
|
||||
/* ... so it's done when the DMA is done, not just
|
||||
the command. */
|
||||
doneint = 0x10000000;
|
||||
}
|
||||
cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
|
||||
}
|
||||
cafe->datalen = 0;
|
||||
|
||||
if (unlikely(regdebug)) {
|
||||
int i;
|
||||
printk("About to write command %08x to register 0\n", ctl1);
|
||||
for (i=4; i< 0x5c; i+=4)
|
||||
printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
|
||||
}
|
||||
|
||||
cafe_writel(cafe, ctl1, NAND_CTRL1);
|
||||
/* Apply this short delay always to ensure that we do wait tWB in
|
||||
* any case on any machine. */
|
||||
ndelay(100);
|
||||
|
||||
if (1) {
|
||||
int c = 500000;
|
||||
uint32_t irqs;
|
||||
|
||||
while (c--) {
|
||||
irqs = cafe_readl(cafe, NAND_IRQ);
|
||||
if (irqs & doneint)
|
||||
break;
|
||||
udelay(1);
|
||||
if (!(c % 100000))
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
|
||||
cpu_relax();
|
||||
}
|
||||
cafe_writel(cafe, doneint, NAND_IRQ);
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
|
||||
command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
|
||||
}
|
||||
|
||||
WARN_ON(cafe->ctl2 & (1<<30));
|
||||
|
||||
switch (command) {
|
||||
|
||||
case NAND_CMD_CACHEDPROG:
|
||||
case NAND_CMD_PAGEPROG:
|
||||
case NAND_CMD_ERASE1:
|
||||
case NAND_CMD_ERASE2:
|
||||
case NAND_CMD_SEQIN:
|
||||
case NAND_CMD_RNDIN:
|
||||
case NAND_CMD_STATUS:
|
||||
case NAND_CMD_DEPLETE1:
|
||||
case NAND_CMD_RNDOUT:
|
||||
case NAND_CMD_STATUS_ERROR:
|
||||
case NAND_CMD_STATUS_ERROR0:
|
||||
case NAND_CMD_STATUS_ERROR1:
|
||||
case NAND_CMD_STATUS_ERROR2:
|
||||
case NAND_CMD_STATUS_ERROR3:
|
||||
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
|
||||
return;
|
||||
}
|
||||
nand_wait_ready(mtd);
|
||||
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
|
||||
}
|
||||
|
||||
static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
|
||||
{
|
||||
//struct cafe_priv *cafe = mtd->priv;
|
||||
// cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
|
||||
}
|
||||
|
||||
static int cafe_nand_interrupt(int irq, void *id)
|
||||
{
|
||||
struct mtd_info *mtd = id;
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
|
||||
cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
|
||||
if (!irqs)
|
||||
return IRQ_NONE;
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void cafe_nand_bug(struct mtd_info *mtd)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int cafe_nand_write_oob(struct mtd_info *mtd,
|
||||
struct nand_chip *chip, int page)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
|
||||
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
|
||||
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
|
||||
status = chip->waitfunc(mtd, chip);
|
||||
|
||||
return status & NAND_STATUS_FAIL ? -EIO : 0;
|
||||
}
|
||||
|
||||
/* Don't use -- use nand_read_oob_std for now */
|
||||
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
int page, int sndcmd)
|
||||
{
|
||||
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
|
||||
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
|
||||
return 1;
|
||||
}
|
||||
/**
|
||||
* cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: buffer to store read data
|
||||
*
|
||||
* The hw generator calculates the error syndrome automatically. Therefor
|
||||
* we need a special oob layout and handling.
|
||||
*/
|
||||
static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
uint8_t *buf)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
|
||||
cafe_readl(cafe, NAND_ECC_RESULT),
|
||||
cafe_readl(cafe, NAND_ECC_SYN01));
|
||||
|
||||
chip->read_buf(mtd, buf, mtd->writesize);
|
||||
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
|
||||
|
||||
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
|
||||
unsigned short syn[8];
|
||||
int i;
|
||||
|
||||
for (i=0; i<8; i+=2) {
|
||||
uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
|
||||
syn[i] = tmp & 0xfff;
|
||||
syn[i+1] = (tmp >> 16) & 0xfff;
|
||||
}
|
||||
|
||||
if ((i = cafe_correct_ecc(buf, syn)) < 0) {
|
||||
dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
|
||||
cafe_readl(cafe, NAND_ADDR2) * 2048);
|
||||
for (i=0; i< 0x5c; i+=4)
|
||||
printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
|
||||
mtd->ecc_stats.failed++;
|
||||
} else {
|
||||
dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", i);
|
||||
mtd->ecc_stats.corrected += i;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nand_ecclayout cafe_oobinfo_2048 = {
|
||||
.eccbytes = 14,
|
||||
.eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
|
||||
.oobfree = {{14, 50}}
|
||||
};
|
||||
|
||||
/* Ick. The BBT code really ought to be able to work this bit out
|
||||
for itself from the above, at least for the 2KiB case */
|
||||
static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
|
||||
static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
|
||||
|
||||
static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
|
||||
static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
|
||||
|
||||
|
||||
static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
|
||||
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
|
||||
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
|
||||
.offs = 14,
|
||||
.len = 4,
|
||||
.veroffs = 18,
|
||||
.maxblocks = 4,
|
||||
.pattern = cafe_bbt_pattern_2048
|
||||
};
|
||||
|
||||
static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
|
||||
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
|
||||
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
|
||||
.offs = 14,
|
||||
.len = 4,
|
||||
.veroffs = 18,
|
||||
.maxblocks = 4,
|
||||
.pattern = cafe_mirror_pattern_2048
|
||||
};
|
||||
|
||||
static struct nand_ecclayout cafe_oobinfo_512 = {
|
||||
.eccbytes = 14,
|
||||
.eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
|
||||
.oobfree = {{14, 2}}
|
||||
};
|
||||
|
||||
static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
|
||||
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
|
||||
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
|
||||
.offs = 14,
|
||||
.len = 1,
|
||||
.veroffs = 15,
|
||||
.maxblocks = 4,
|
||||
.pattern = cafe_bbt_pattern_512
|
||||
};
|
||||
|
||||
static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
|
||||
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
|
||||
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
|
||||
.offs = 14,
|
||||
.len = 1,
|
||||
.veroffs = 15,
|
||||
.maxblocks = 4,
|
||||
.pattern = cafe_mirror_pattern_512
|
||||
};
|
||||
|
||||
|
||||
static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
|
||||
struct nand_chip *chip, const uint8_t *buf)
|
||||
{
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
|
||||
chip->write_buf(mtd, buf, mtd->writesize);
|
||||
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
|
||||
|
||||
/* Set up ECC autogeneration */
|
||||
cafe->ctl2 |= (1<<30);
|
||||
}
|
||||
|
||||
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
const uint8_t *buf, int page, int cached, int raw)
|
||||
{
|
||||
int status;
|
||||
|
||||
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
|
||||
|
||||
if (unlikely(raw))
|
||||
chip->ecc.write_page_raw(mtd, chip, buf);
|
||||
else
|
||||
chip->ecc.write_page(mtd, chip, buf);
|
||||
|
||||
/*
|
||||
* Cached progamming disabled for now, Not sure if its worth the
|
||||
* trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
|
||||
*/
|
||||
cached = 0;
|
||||
|
||||
if (!cached || !(chip->options & NAND_CACHEPRG)) {
|
||||
|
||||
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
|
||||
status = chip->waitfunc(mtd, chip);
|
||||
/*
|
||||
* See if operation failed and additional status checks are
|
||||
* available
|
||||
*/
|
||||
if ((status & NAND_STATUS_FAIL) && (chip->errstat))
|
||||
status = chip->errstat(mtd, chip, FL_WRITING, status,
|
||||
page);
|
||||
|
||||
if (status & NAND_STATUS_FAIL)
|
||||
return -EIO;
|
||||
} else {
|
||||
chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
|
||||
status = chip->waitfunc(mtd, chip);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
|
||||
/* Send command to read back the data */
|
||||
chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
|
||||
|
||||
if (chip->verify_buf(mtd, buf, mtd->writesize))
|
||||
return -EIO;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit cafe_nand_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
struct mtd_info *mtd;
|
||||
struct cafe_priv *cafe;
|
||||
uint32_t ctrl;
|
||||
int err = 0;
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
mtd = kzalloc(sizeof(*mtd) + sizeof(struct cafe_priv), GFP_KERNEL);
|
||||
if (!mtd) {
|
||||
dev_warn(&pdev->dev, "failed to alloc mtd_info\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
cafe = (void *)(&mtd[1]);
|
||||
|
||||
mtd->priv = cafe;
|
||||
mtd->owner = THIS_MODULE;
|
||||
|
||||
cafe->pdev = pdev;
|
||||
cafe->mmio = pci_iomap(pdev, 0, 0);
|
||||
if (!cafe->mmio) {
|
||||
dev_warn(&pdev->dev, "failed to iomap\n");
|
||||
err = -ENOMEM;
|
||||
goto out_free_mtd;
|
||||
}
|
||||
cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112 + sizeof(struct nand_buffers),
|
||||
&cafe->dmaaddr, GFP_KERNEL);
|
||||
if (!cafe->dmabuf) {
|
||||
err = -ENOMEM;
|
||||
goto out_ior;
|
||||
}
|
||||
cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
|
||||
|
||||
cafe->nand.cmdfunc = cafe_nand_cmdfunc;
|
||||
cafe->nand.dev_ready = cafe_device_ready;
|
||||
cafe->nand.read_byte = cafe_read_byte;
|
||||
cafe->nand.read_buf = cafe_read_buf;
|
||||
cafe->nand.write_buf = cafe_write_buf;
|
||||
cafe->nand.select_chip = cafe_select_chip;
|
||||
|
||||
cafe->nand.chip_delay = 0;
|
||||
|
||||
/* Enable the following for a flash based bad block table */
|
||||
cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
|
||||
|
||||
if (skipbbt) {
|
||||
cafe->nand.options |= NAND_SKIP_BBTSCAN;
|
||||
cafe->nand.block_bad = cafe_nand_block_bad;
|
||||
}
|
||||
|
||||
/* Start off by resetting the NAND controller completely */
|
||||
cafe_writel(cafe, 1, NAND_RESET);
|
||||
cafe_writel(cafe, 0, NAND_RESET);
|
||||
|
||||
cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
|
||||
|
||||
/* Timings from Marvell's test code (not verified or calculated by us) */
|
||||
if (!slowtiming) {
|
||||
cafe_writel(cafe, 0x01010a0a, NAND_TIMING1);
|
||||
cafe_writel(cafe, 0x24121212, NAND_TIMING2);
|
||||
cafe_writel(cafe, 0x11000000, NAND_TIMING3);
|
||||
} else {
|
||||
cafe_writel(cafe, 0xffffffff, NAND_TIMING1);
|
||||
cafe_writel(cafe, 0xffffffff, NAND_TIMING2);
|
||||
cafe_writel(cafe, 0xffffffff, NAND_TIMING3);
|
||||
}
|
||||
cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
|
||||
err = request_irq(pdev->irq, &cafe_nand_interrupt, SA_SHIRQ, "CAFE NAND", mtd);
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
|
||||
|
||||
goto out_free_dma;
|
||||
}
|
||||
#if 1
|
||||
/* Disable master reset, enable NAND clock */
|
||||
ctrl = cafe_readl(cafe, GLOBAL_CTRL);
|
||||
ctrl &= 0xffffeff0;
|
||||
ctrl |= 0x00007000;
|
||||
cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
|
||||
cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
|
||||
cafe_writel(cafe, 0, NAND_DMA_CTRL);
|
||||
|
||||
cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
|
||||
cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
|
||||
|
||||
/* Set up DMA address */
|
||||
cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
|
||||
if (sizeof(cafe->dmaaddr) > 4)
|
||||
/* Shift in two parts to shut the compiler up */
|
||||
cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
|
||||
else
|
||||
cafe_writel(cafe, 0, NAND_DMA_ADDR1);
|
||||
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
|
||||
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
|
||||
|
||||
/* Enable NAND IRQ in global IRQ mask register */
|
||||
cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
|
||||
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
|
||||
#endif
|
||||
#if 1
|
||||
mtd->writesize=2048;
|
||||
mtd->oobsize = 0x40;
|
||||
memset(cafe->dmabuf, 0x5a, 2112);
|
||||
cafe->nand.cmdfunc(mtd, NAND_CMD_READID, 0, -1);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
#endif
|
||||
#if 0
|
||||
cafe->nand.cmdfunc(mtd, NAND_CMD_READ0, 0, 0);
|
||||
// nand_wait_ready(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
cafe->nand.read_byte(mtd);
|
||||
#endif
|
||||
#if 0
|
||||
writel(0x84600070, cafe->mmio);
|
||||
udelay(10);
|
||||
cafe_dev_dbg(&cafe->pdev->dev, "Status %x\n", cafe_readl(cafe, NAND_NONMEM));
|
||||
#endif
|
||||
/* Scan to find existance of the device */
|
||||
if (nand_scan_ident(mtd, 1)) {
|
||||
err = -ENXIO;
|
||||
goto out_irq;
|
||||
}
|
||||
|
||||
cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
|
||||
if (mtd->writesize == 2048)
|
||||
cafe->ctl2 |= 1<<29; /* 2KiB page size */
|
||||
|
||||
/* Set up ECC according to the type of chip we found */
|
||||
if (mtd->writesize == 2048) {
|
||||
cafe->nand.ecc.layout = &cafe_oobinfo_2048;
|
||||
cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
|
||||
cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
|
||||
} else if (mtd->writesize == 512) {
|
||||
cafe->nand.ecc.layout = &cafe_oobinfo_512;
|
||||
cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
|
||||
cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
|
||||
} else {
|
||||
printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
|
||||
mtd->writesize);
|
||||
goto out_irq;
|
||||
}
|
||||
cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
|
||||
cafe->nand.ecc.size = mtd->writesize;
|
||||
cafe->nand.ecc.bytes = 14;
|
||||
cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
|
||||
cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
|
||||
cafe->nand.ecc.correct = (void *)cafe_nand_bug;
|
||||
cafe->nand.write_page = cafe_nand_write_page;
|
||||
cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
|
||||
cafe->nand.ecc.write_oob = cafe_nand_write_oob;
|
||||
cafe->nand.ecc.read_page = cafe_nand_read_page;
|
||||
cafe->nand.ecc.read_oob = cafe_nand_read_oob;
|
||||
|
||||
err = nand_scan_tail(mtd);
|
||||
if (err)
|
||||
goto out_irq;
|
||||
|
||||
pci_set_drvdata(pdev, mtd);
|
||||
add_mtd_device(mtd);
|
||||
goto out;
|
||||
|
||||
out_irq:
|
||||
/* Disable NAND IRQ in global IRQ mask register */
|
||||
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
|
||||
free_irq(pdev->irq, mtd);
|
||||
out_free_dma:
|
||||
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
|
||||
out_ior:
|
||||
pci_iounmap(pdev, cafe->mmio);
|
||||
out_free_mtd:
|
||||
kfree(mtd);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __devexit cafe_nand_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mtd_info *mtd = pci_get_drvdata(pdev);
|
||||
struct cafe_priv *cafe = mtd->priv;
|
||||
|
||||
del_mtd_device(mtd);
|
||||
/* Disable NAND IRQ in global IRQ mask register */
|
||||
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
|
||||
free_irq(pdev->irq, mtd);
|
||||
nand_release(mtd);
|
||||
pci_iounmap(pdev, cafe->mmio);
|
||||
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
|
||||
kfree(mtd);
|
||||
}
|
||||
|
||||
static struct pci_device_id cafe_nand_tbl[] = {
|
||||
{ 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MEMORY_FLASH << 8, 0xFFFF0 }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
|
||||
|
||||
static struct pci_driver cafe_nand_pci_driver = {
|
||||
.name = "CAFÉ NAND",
|
||||
.id_table = cafe_nand_tbl,
|
||||
.probe = cafe_nand_probe,
|
||||
.remove = __devexit_p(cafe_nand_remove),
|
||||
#ifdef CONFIG_PMx
|
||||
.suspend = cafe_nand_suspend,
|
||||
.resume = cafe_nand_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int cafe_nand_init(void)
|
||||
{
|
||||
return pci_register_driver(&cafe_nand_pci_driver);
|
||||
}
|
||||
|
||||
static void cafe_nand_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&cafe_nand_pci_driver);
|
||||
}
|
||||
module_init(cafe_nand_init);
|
||||
module_exit(cafe_nand_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
|
||||
MODULE_DESCRIPTION("NAND flash driver for OLPC CAFE chip");
|
||||
|
||||
/* Correct ECC for 2048 bytes of 0xff:
|
||||
41 a0 71 65 54 27 f3 93 ec a9 be ed 0b a1 */
|
||||
|
||||
/* dwmw2's B-test board, in case of completely screwing it:
|
||||
Bad eraseblock 2394 at 0x12b40000
|
||||
Bad eraseblock 2627 at 0x14860000
|
||||
Bad eraseblock 3349 at 0x1a2a0000
|
||||
*/
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -11,7 +11,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Overview:
|
||||
* This is a device driver for the NAND flash controller found on
|
||||
* This is a device driver for the NAND flash controller found on
|
||||
* the AMD CS5535/CS5536 companion chipsets for the Geode processor.
|
||||
*
|
||||
*/
|
||||
|
@ -303,7 +303,7 @@ static int __init cs553x_init(void)
|
|||
err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
/* Register all devices together here. This means we can easily hack it to
|
||||
/* Register all devices together here. This means we can easily hack it to
|
||||
do mtdconcat etc. if we want to. */
|
||||
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
|
||||
if (cs553x_mtd[i]) {
|
||||
|
|
|
@ -1635,13 +1635,12 @@ static int __init doc_probe(unsigned long physadr)
|
|||
|
||||
len = sizeof(struct mtd_info) +
|
||||
sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
|
||||
mtd = kmalloc(len, GFP_KERNEL);
|
||||
mtd = kzalloc(len, GFP_KERNEL);
|
||||
if (!mtd) {
|
||||
printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
memset(mtd, 0, len);
|
||||
|
||||
nand = (struct nand_chip *) (mtd + 1);
|
||||
doc = (struct doc_priv *) (nand + 1);
|
||||
|
|
|
@ -362,7 +362,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|||
* access
|
||||
*/
|
||||
ofs += mtd->oobsize;
|
||||
chip->ops.len = 2;
|
||||
chip->ops.len = chip->ops.ooblen = 2;
|
||||
chip->ops.datbuf = NULL;
|
||||
chip->ops.oobbuf = buf;
|
||||
chip->ops.ooboffs = chip->badblockpos & ~0x01;
|
||||
|
@ -755,7 +755,7 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
}
|
||||
|
||||
/**
|
||||
* nand_read_page_swecc - {REPLACABLE] software ecc based page read function
|
||||
* nand_read_page_swecc - [REPLACABLE] software ecc based page read function
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: buffer to store read data
|
||||
|
@ -795,7 +795,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
}
|
||||
|
||||
/**
|
||||
* nand_read_page_hwecc - {REPLACABLE] hardware ecc based page read function
|
||||
* nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: buffer to store read data
|
||||
|
@ -839,7 +839,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
}
|
||||
|
||||
/**
|
||||
* nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
|
||||
* nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: buffer to store read data
|
||||
|
@ -897,12 +897,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
* @chip: nand chip structure
|
||||
* @oob: oob destination address
|
||||
* @ops: oob ops structure
|
||||
* @len: size of oob to transfer
|
||||
*/
|
||||
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
|
||||
struct mtd_oob_ops *ops)
|
||||
struct mtd_oob_ops *ops, size_t len)
|
||||
{
|
||||
size_t len = ops->ooblen;
|
||||
|
||||
switch(ops->mode) {
|
||||
|
||||
case MTD_OOB_PLACE:
|
||||
|
@ -960,6 +959,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
|
|||
int sndcmd = 1;
|
||||
int ret = 0;
|
||||
uint32_t readlen = ops->len;
|
||||
uint32_t oobreadlen = ops->ooblen;
|
||||
uint8_t *bufpoi, *oob, *buf;
|
||||
|
||||
stats = mtd->ecc_stats;
|
||||
|
@ -971,7 +971,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
|
|||
page = realpage & chip->pagemask;
|
||||
|
||||
col = (int)(from & (mtd->writesize - 1));
|
||||
chip->oob_poi = chip->buffers->oobrbuf;
|
||||
|
||||
buf = ops->datbuf;
|
||||
oob = ops->oobbuf;
|
||||
|
@ -1007,10 +1006,17 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
|
|||
|
||||
if (unlikely(oob)) {
|
||||
/* Raw mode does data:oob:data:oob */
|
||||
if (ops->mode != MTD_OOB_RAW)
|
||||
oob = nand_transfer_oob(chip, oob, ops);
|
||||
else
|
||||
buf = nand_transfer_oob(chip, buf, ops);
|
||||
if (ops->mode != MTD_OOB_RAW) {
|
||||
int toread = min(oobreadlen,
|
||||
chip->ecc.layout->oobavail);
|
||||
if (toread) {
|
||||
oob = nand_transfer_oob(chip,
|
||||
oob, ops, toread);
|
||||
oobreadlen -= toread;
|
||||
}
|
||||
} else
|
||||
buf = nand_transfer_oob(chip,
|
||||
buf, ops, mtd->oobsize);
|
||||
}
|
||||
|
||||
if (!(chip->options & NAND_NO_READRDY)) {
|
||||
|
@ -1057,6 +1063,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
|
|||
}
|
||||
|
||||
ops->retlen = ops->len - (size_t) readlen;
|
||||
if (oob)
|
||||
ops->oobretlen = ops->ooblen - oobreadlen;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1257,12 +1265,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
int page, realpage, chipnr, sndcmd = 1;
|
||||
struct nand_chip *chip = mtd->priv;
|
||||
int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
|
||||
int readlen = ops->len;
|
||||
int readlen = ops->ooblen;
|
||||
int len;
|
||||
uint8_t *buf = ops->oobbuf;
|
||||
|
||||
DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n",
|
||||
(unsigned long long)from, readlen);
|
||||
|
||||
if (ops->mode == MTD_OOB_RAW)
|
||||
len = mtd->oobsize;
|
||||
else
|
||||
len = chip->ecc.layout->oobavail;
|
||||
|
||||
chipnr = (int)(from >> chip->chip_shift);
|
||||
chip->select_chip(mtd, chipnr);
|
||||
|
||||
|
@ -1270,11 +1284,11 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
realpage = (int)(from >> chip->page_shift);
|
||||
page = realpage & chip->pagemask;
|
||||
|
||||
chip->oob_poi = chip->buffers->oobrbuf;
|
||||
|
||||
while(1) {
|
||||
sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
|
||||
buf = nand_transfer_oob(chip, buf, ops);
|
||||
|
||||
len = min(len, readlen);
|
||||
buf = nand_transfer_oob(chip, buf, ops, len);
|
||||
|
||||
if (!(chip->options & NAND_NO_READRDY)) {
|
||||
/*
|
||||
|
@ -1289,7 +1303,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
nand_wait_ready(mtd);
|
||||
}
|
||||
|
||||
readlen -= ops->ooblen;
|
||||
readlen -= len;
|
||||
if (!readlen)
|
||||
break;
|
||||
|
||||
|
@ -1311,7 +1325,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
sndcmd = 1;
|
||||
}
|
||||
|
||||
ops->retlen = ops->len;
|
||||
ops->oobretlen = ops->ooblen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1332,7 +1346,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
ops->retlen = 0;
|
||||
|
||||
/* Do not allow reads past end of device */
|
||||
if ((from + ops->len) > mtd->size) {
|
||||
if (ops->datbuf && (from + ops->len) > mtd->size) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
|
||||
"Attempt read beyond end of device\n");
|
||||
return -EINVAL;
|
||||
|
@ -1375,7 +1389,7 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
}
|
||||
|
||||
/**
|
||||
* nand_write_page_swecc - {REPLACABLE] software ecc based page write function
|
||||
* nand_write_page_swecc - [REPLACABLE] software ecc based page write function
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: data buffer
|
||||
|
@ -1401,7 +1415,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
}
|
||||
|
||||
/**
|
||||
* nand_write_page_hwecc - {REPLACABLE] hardware ecc based page write function
|
||||
* nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: data buffer
|
||||
|
@ -1429,7 +1443,7 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
|
|||
}
|
||||
|
||||
/**
|
||||
* nand_write_page_syndrome - {REPLACABLE] hardware ecc syndrom based page write
|
||||
* nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
|
||||
* @mtd: mtd info structure
|
||||
* @chip: nand chip info structure
|
||||
* @buf: data buffer
|
||||
|
@ -1577,7 +1591,7 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#define NOTALIGNED(x) (x & (mtd->writesize-1)) != 0
|
||||
#define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0
|
||||
|
||||
/**
|
||||
* nand_do_write_ops - [Internal] NAND write with ECC
|
||||
|
@ -1590,15 +1604,16 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
|
|||
static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
|
||||
struct mtd_oob_ops *ops)
|
||||
{
|
||||
int chipnr, realpage, page, blockmask;
|
||||
int chipnr, realpage, page, blockmask, column;
|
||||
struct nand_chip *chip = mtd->priv;
|
||||
uint32_t writelen = ops->len;
|
||||
uint8_t *oob = ops->oobbuf;
|
||||
uint8_t *buf = ops->datbuf;
|
||||
int bytes = mtd->writesize;
|
||||
int ret;
|
||||
int ret, subpage;
|
||||
|
||||
ops->retlen = 0;
|
||||
if (!writelen)
|
||||
return 0;
|
||||
|
||||
/* reject writes, which are not page aligned */
|
||||
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
|
||||
|
@ -1607,8 +1622,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!writelen)
|
||||
return 0;
|
||||
column = to & (mtd->writesize - 1);
|
||||
subpage = column || (writelen & (mtd->writesize - 1));
|
||||
|
||||
if (subpage && oob)
|
||||
return -EINVAL;
|
||||
|
||||
chipnr = (int)(to >> chip->chip_shift);
|
||||
chip->select_chip(mtd, chipnr);
|
||||
|
@ -1626,15 +1644,29 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
|
|||
(chip->pagebuf << chip->page_shift) < (to + ops->len))
|
||||
chip->pagebuf = -1;
|
||||
|
||||
chip->oob_poi = chip->buffers->oobwbuf;
|
||||
/* If we're not given explicit OOB data, let it be 0xFF */
|
||||
if (likely(!oob))
|
||||
memset(chip->oob_poi, 0xff, mtd->oobsize);
|
||||
|
||||
while(1) {
|
||||
int bytes = mtd->writesize;
|
||||
int cached = writelen > bytes && page != blockmask;
|
||||
uint8_t *wbuf = buf;
|
||||
|
||||
/* Partial page write ? */
|
||||
if (unlikely(column || writelen < (mtd->writesize - 1))) {
|
||||
cached = 0;
|
||||
bytes = min_t(int, bytes - column, (int) writelen);
|
||||
chip->pagebuf = -1;
|
||||
memset(chip->buffers->databuf, 0xff, mtd->writesize);
|
||||
memcpy(&chip->buffers->databuf[column], buf, bytes);
|
||||
wbuf = chip->buffers->databuf;
|
||||
}
|
||||
|
||||
if (unlikely(oob))
|
||||
oob = nand_fill_oob(chip, oob, ops);
|
||||
|
||||
ret = chip->write_page(mtd, chip, buf, page, cached,
|
||||
ret = chip->write_page(mtd, chip, wbuf, page, cached,
|
||||
(ops->mode == MTD_OOB_RAW));
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -1643,6 +1675,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
|
|||
if (!writelen)
|
||||
break;
|
||||
|
||||
column = 0;
|
||||
buf += bytes;
|
||||
realpage++;
|
||||
|
||||
|
@ -1655,10 +1688,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
|
|||
}
|
||||
}
|
||||
|
||||
if (unlikely(oob))
|
||||
memset(chip->oob_poi, 0xff, mtd->oobsize);
|
||||
|
||||
ops->retlen = ops->len - writelen;
|
||||
if (unlikely(oob))
|
||||
ops->oobretlen = ops->ooblen;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1714,10 +1746,10 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
|
|||
struct nand_chip *chip = mtd->priv;
|
||||
|
||||
DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n",
|
||||
(unsigned int)to, (int)ops->len);
|
||||
(unsigned int)to, (int)ops->ooblen);
|
||||
|
||||
/* Do not allow write past end of page */
|
||||
if ((ops->ooboffs + ops->len) > mtd->oobsize) {
|
||||
if ((ops->ooboffs + ops->ooblen) > mtd->oobsize) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
|
||||
"Attempt to write past end of page\n");
|
||||
return -EINVAL;
|
||||
|
@ -1745,7 +1777,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
|
|||
if (page == chip->pagebuf)
|
||||
chip->pagebuf = -1;
|
||||
|
||||
chip->oob_poi = chip->buffers->oobwbuf;
|
||||
memset(chip->oob_poi, 0xff, mtd->oobsize);
|
||||
nand_fill_oob(chip, ops->oobbuf, ops);
|
||||
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
|
||||
|
@ -1754,7 +1785,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
|
|||
if (status)
|
||||
return status;
|
||||
|
||||
ops->retlen = ops->len;
|
||||
ops->oobretlen = ops->ooblen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1774,7 +1805,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
|
|||
ops->retlen = 0;
|
||||
|
||||
/* Do not allow writes past end of device */
|
||||
if ((to + ops->len) > mtd->size) {
|
||||
if (ops->datbuf && (to + ops->len) > mtd->size) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
|
||||
"Attempt read beyond end of device\n");
|
||||
return -EINVAL;
|
||||
|
@ -2188,8 +2219,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
|
|||
/* Newer devices have all the information in additional id bytes */
|
||||
if (!type->pagesize) {
|
||||
int extid;
|
||||
/* The 3rd id byte contains non relevant data ATM */
|
||||
extid = chip->read_byte(mtd);
|
||||
/* The 3rd id byte holds MLC / multichip data */
|
||||
chip->cellinfo = chip->read_byte(mtd);
|
||||
/* The 4th id byte is the important one */
|
||||
extid = chip->read_byte(mtd);
|
||||
/* Calc pagesize */
|
||||
|
@ -2349,8 +2380,8 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|||
if (!chip->buffers)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Preset the internal oob write buffer */
|
||||
memset(chip->buffers->oobwbuf, 0xff, mtd->oobsize);
|
||||
/* Set the internal oob buffer location, just after the page data */
|
||||
chip->oob_poi = chip->buffers->databuf + mtd->writesize;
|
||||
|
||||
/*
|
||||
* If no default placement scheme is given, select an appropriate one
|
||||
|
@ -2469,6 +2500,24 @@ int nand_scan_tail(struct mtd_info *mtd)
|
|||
}
|
||||
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
|
||||
|
||||
/*
|
||||
* Allow subpage writes up to ecc.steps. Not possible for MLC
|
||||
* FLASH.
|
||||
*/
|
||||
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
|
||||
!(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
|
||||
switch(chip->ecc.steps) {
|
||||
case 2:
|
||||
mtd->subpage_sft = 1;
|
||||
break;
|
||||
case 4:
|
||||
case 8:
|
||||
mtd->subpage_sft = 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
|
||||
|
||||
/* Initialize state */
|
||||
chip->state = FL_READY;
|
||||
|
||||
|
|
|
@ -333,7 +333,6 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
|
|||
struct mtd_oob_ops ops;
|
||||
int j, ret;
|
||||
|
||||
ops.len = mtd->oobsize;
|
||||
ops.ooblen = mtd->oobsize;
|
||||
ops.oobbuf = buf;
|
||||
ops.ooboffs = 0;
|
||||
|
@ -676,10 +675,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
|
|||
"bad block table\n");
|
||||
}
|
||||
/* Read oob data */
|
||||
ops.len = (len >> this->page_shift) * mtd->oobsize;
|
||||
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
|
||||
ops.oobbuf = &buf[len];
|
||||
res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
|
||||
if (res < 0 || ops.retlen != ops.len)
|
||||
if (res < 0 || ops.oobretlen != ops.ooblen)
|
||||
goto outerr;
|
||||
|
||||
/* Calc the byte offset in the buffer */
|
||||
|
@ -961,14 +960,12 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
|
|||
struct nand_bbt_descr *md = this->bbt_md;
|
||||
|
||||
len = mtd->size >> (this->bbt_erase_shift + 2);
|
||||
/* Allocate memory (2bit per block) */
|
||||
this->bbt = kmalloc(len, GFP_KERNEL);
|
||||
/* Allocate memory (2bit per block) and clear the memory bad block table */
|
||||
this->bbt = kzalloc(len, GFP_KERNEL);
|
||||
if (!this->bbt) {
|
||||
printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Clear the memory bad block table */
|
||||
memset(this->bbt, 0x00, len);
|
||||
|
||||
/* If no primary table decriptor is given, scan the device
|
||||
* to build a memory based bad block table
|
||||
|
|
|
@ -112,7 +112,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
|
|||
tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
|
||||
|
||||
/* Calculate final ECC code */
|
||||
#ifdef CONFIG_NAND_ECC_SMC
|
||||
#ifdef CONFIG_MTD_NAND_ECC_SMC
|
||||
ecc_code[0] = ~tmp2;
|
||||
ecc_code[1] = ~tmp1;
|
||||
#else
|
||||
|
@ -148,7 +148,7 @@ int nand_correct_data(struct mtd_info *mtd, u_char *dat,
|
|||
{
|
||||
uint8_t s0, s1, s2;
|
||||
|
||||
#ifdef CONFIG_NAND_ECC_SMC
|
||||
#ifdef CONFIG_MTD_NAND_ECC_SMC
|
||||
s0 = calc_ecc[0] ^ read_ecc[0];
|
||||
s1 = calc_ecc[1] ^ read_ecc[1];
|
||||
s2 = calc_ecc[2] ^ read_ecc[2];
|
||||
|
|
|
@ -37,10 +37,6 @@
|
|||
#include <linux/mtd/nand.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/delay.h>
|
||||
#ifdef CONFIG_NS_ABS_POS
|
||||
#include <asm/io.h>
|
||||
#endif
|
||||
|
||||
|
||||
/* Default simulator parameters values */
|
||||
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
|
||||
|
@ -164,7 +160,7 @@ MODULE_PARM_DESC(dbg, "Output debug information if not zero");
|
|||
/* After a command is input, the simulator goes to one of the following states */
|
||||
#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
|
||||
#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
|
||||
#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
|
||||
#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
|
||||
#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
|
||||
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
|
||||
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
|
||||
|
@ -230,6 +226,14 @@ MODULE_PARM_DESC(dbg, "Output debug information if not zero");
|
|||
*/
|
||||
#define NS_MAX_PREVSTATES 1
|
||||
|
||||
/*
|
||||
* A union to represent flash memory contents and flash buffer.
|
||||
*/
|
||||
union ns_mem {
|
||||
u_char *byte; /* for byte access */
|
||||
uint16_t *word; /* for 16-bit word access */
|
||||
};
|
||||
|
||||
/*
|
||||
* The structure which describes all the internal simulator data.
|
||||
*/
|
||||
|
@ -247,17 +251,11 @@ struct nandsim {
|
|||
uint16_t npstates; /* number of previous states saved */
|
||||
uint16_t stateidx; /* current state index */
|
||||
|
||||
/* The simulated NAND flash image */
|
||||
union flash_media {
|
||||
u_char *byte;
|
||||
uint16_t *word;
|
||||
} mem;
|
||||
/* The simulated NAND flash pages array */
|
||||
union ns_mem *pages;
|
||||
|
||||
/* Internal buffer of page + OOB size bytes */
|
||||
union internal_buffer {
|
||||
u_char *byte; /* for byte access */
|
||||
uint16_t *word; /* for 16-bit word access */
|
||||
} buf;
|
||||
union ns_mem buf;
|
||||
|
||||
/* NAND flash "geometry" */
|
||||
struct nandsin_geometry {
|
||||
|
@ -345,13 +343,50 @@ static struct mtd_info *nsmtd;
|
|||
|
||||
static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
|
||||
|
||||
/*
|
||||
* Allocate array of page pointers and initialize the array to NULL
|
||||
* pointers.
|
||||
*
|
||||
* RETURNS: 0 if success, -ENOMEM if memory alloc fails.
|
||||
*/
|
||||
static int alloc_device(struct nandsim *ns)
|
||||
{
|
||||
int i;
|
||||
|
||||
ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
|
||||
if (!ns->pages) {
|
||||
NS_ERR("alloc_map: unable to allocate page array\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < ns->geom.pgnum; i++) {
|
||||
ns->pages[i].byte = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free any allocated pages, and free the array of page pointers.
|
||||
*/
|
||||
static void free_device(struct nandsim *ns)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ns->pages) {
|
||||
for (i = 0; i < ns->geom.pgnum; i++) {
|
||||
if (ns->pages[i].byte)
|
||||
kfree(ns->pages[i].byte);
|
||||
}
|
||||
vfree(ns->pages);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the nandsim structure.
|
||||
*
|
||||
* RETURNS: 0 if success, -ERRNO if failure.
|
||||
*/
|
||||
static int
|
||||
init_nandsim(struct mtd_info *mtd)
|
||||
static int init_nandsim(struct mtd_info *mtd)
|
||||
{
|
||||
struct nand_chip *chip = (struct nand_chip *)mtd->priv;
|
||||
struct nandsim *ns = (struct nandsim *)(chip->priv);
|
||||
|
@ -405,7 +440,7 @@ init_nandsim(struct mtd_info *mtd)
|
|||
}
|
||||
} else {
|
||||
if (ns->geom.totsz <= (128 << 20)) {
|
||||
ns->geom.pgaddrbytes = 5;
|
||||
ns->geom.pgaddrbytes = 4;
|
||||
ns->geom.secaddrbytes = 2;
|
||||
} else {
|
||||
ns->geom.pgaddrbytes = 5;
|
||||
|
@ -439,23 +474,8 @@ init_nandsim(struct mtd_info *mtd)
|
|||
printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
|
||||
printk("options: %#x\n", ns->options);
|
||||
|
||||
/* Map / allocate and initialize the flash image */
|
||||
#ifdef CONFIG_NS_ABS_POS
|
||||
ns->mem.byte = ioremap(CONFIG_NS_ABS_POS, ns->geom.totszoob);
|
||||
if (!ns->mem.byte) {
|
||||
NS_ERR("init_nandsim: failed to map the NAND flash image at address %p\n",
|
||||
(void *)CONFIG_NS_ABS_POS);
|
||||
return -ENOMEM;
|
||||
}
|
||||
#else
|
||||
ns->mem.byte = vmalloc(ns->geom.totszoob);
|
||||
if (!ns->mem.byte) {
|
||||
NS_ERR("init_nandsim: unable to allocate %u bytes for flash image\n",
|
||||
ns->geom.totszoob);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(ns->mem.byte, 0xFF, ns->geom.totszoob);
|
||||
#endif
|
||||
if (alloc_device(ns) != 0)
|
||||
goto error;
|
||||
|
||||
/* Allocate / initialize the internal buffer */
|
||||
ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
|
||||
|
@ -474,11 +494,7 @@ init_nandsim(struct mtd_info *mtd)
|
|||
return 0;
|
||||
|
||||
error:
|
||||
#ifdef CONFIG_NS_ABS_POS
|
||||
iounmap(ns->mem.byte);
|
||||
#else
|
||||
vfree(ns->mem.byte);
|
||||
#endif
|
||||
free_device(ns);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -486,16 +502,10 @@ error:
|
|||
/*
|
||||
* Free the nandsim structure.
|
||||
*/
|
||||
static void
|
||||
free_nandsim(struct nandsim *ns)
|
||||
static void free_nandsim(struct nandsim *ns)
|
||||
{
|
||||
kfree(ns->buf.byte);
|
||||
|
||||
#ifdef CONFIG_NS_ABS_POS
|
||||
iounmap(ns->mem.byte);
|
||||
#else
|
||||
vfree(ns->mem.byte);
|
||||
#endif
|
||||
free_device(ns);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -503,8 +513,7 @@ free_nandsim(struct nandsim *ns)
|
|||
/*
|
||||
* Returns the string representation of 'state' state.
|
||||
*/
|
||||
static char *
|
||||
get_state_name(uint32_t state)
|
||||
static char *get_state_name(uint32_t state)
|
||||
{
|
||||
switch (NS_STATE(state)) {
|
||||
case STATE_CMD_READ0:
|
||||
|
@ -562,8 +571,7 @@ get_state_name(uint32_t state)
|
|||
*
|
||||
* RETURNS: 1 if wrong command, 0 if right.
|
||||
*/
|
||||
static int
|
||||
check_command(int cmd)
|
||||
static int check_command(int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
|
||||
|
@ -589,8 +597,7 @@ check_command(int cmd)
|
|||
/*
|
||||
* Returns state after command is accepted by command number.
|
||||
*/
|
||||
static uint32_t
|
||||
get_state_by_command(unsigned command)
|
||||
static uint32_t get_state_by_command(unsigned command)
|
||||
{
|
||||
switch (command) {
|
||||
case NAND_CMD_READ0:
|
||||
|
@ -626,8 +633,7 @@ get_state_by_command(unsigned command)
|
|||
/*
|
||||
* Move an address byte to the correspondent internal register.
|
||||
*/
|
||||
static inline void
|
||||
accept_addr_byte(struct nandsim *ns, u_char bt)
|
||||
static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
|
||||
{
|
||||
uint byte = (uint)bt;
|
||||
|
||||
|
@ -645,8 +651,7 @@ accept_addr_byte(struct nandsim *ns, u_char bt)
|
|||
/*
|
||||
* Switch to STATE_READY state.
|
||||
*/
|
||||
static inline void
|
||||
switch_to_ready_state(struct nandsim *ns, u_char status)
|
||||
static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
|
||||
{
|
||||
NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
|
||||
|
||||
|
@ -705,8 +710,7 @@ switch_to_ready_state(struct nandsim *ns, u_char status)
|
|||
* -1 - several matches.
|
||||
* 0 - operation is found.
|
||||
*/
|
||||
static int
|
||||
find_operation(struct nandsim *ns, uint32_t flag)
|
||||
static int find_operation(struct nandsim *ns, uint32_t flag)
|
||||
{
|
||||
int opsfound = 0;
|
||||
int i, j, idx = 0;
|
||||
|
@ -790,15 +794,94 @@ find_operation(struct nandsim *ns, uint32_t flag)
|
|||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a pointer to the current page.
|
||||
*/
|
||||
static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
|
||||
{
|
||||
return &(ns->pages[ns->regs.row]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retuns a pointer to the current byte, within the current page.
|
||||
*/
|
||||
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
|
||||
{
|
||||
return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill the NAND buffer with data read from the specified page.
|
||||
*/
|
||||
static void read_page(struct nandsim *ns, int num)
|
||||
{
|
||||
union ns_mem *mypage;
|
||||
|
||||
mypage = NS_GET_PAGE(ns);
|
||||
if (mypage->byte == NULL) {
|
||||
NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
|
||||
memset(ns->buf.byte, 0xFF, num);
|
||||
} else {
|
||||
NS_DBG("read_page: page %d allocated, reading from %d\n",
|
||||
ns->regs.row, ns->regs.column + ns->regs.off);
|
||||
memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Erase all pages in the specified sector.
|
||||
*/
|
||||
static void erase_sector(struct nandsim *ns)
|
||||
{
|
||||
union ns_mem *mypage;
|
||||
int i;
|
||||
|
||||
mypage = NS_GET_PAGE(ns);
|
||||
for (i = 0; i < ns->geom.pgsec; i++) {
|
||||
if (mypage->byte != NULL) {
|
||||
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
|
||||
kfree(mypage->byte);
|
||||
mypage->byte = NULL;
|
||||
}
|
||||
mypage++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Program the specified page with the contents from the NAND buffer.
|
||||
*/
|
||||
static int prog_page(struct nandsim *ns, int num)
|
||||
{
|
||||
int i;
|
||||
union ns_mem *mypage;
|
||||
u_char *pg_off;
|
||||
|
||||
mypage = NS_GET_PAGE(ns);
|
||||
if (mypage->byte == NULL) {
|
||||
NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
|
||||
mypage->byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
|
||||
if (mypage->byte == NULL) {
|
||||
NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
|
||||
return -1;
|
||||
}
|
||||
memset(mypage->byte, 0xFF, ns->geom.pgszoob);
|
||||
}
|
||||
|
||||
pg_off = NS_PAGE_BYTE_OFF(ns);
|
||||
for (i = 0; i < num; i++)
|
||||
pg_off[i] &= ns->buf.byte[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If state has any action bit, perform this action.
|
||||
*
|
||||
* RETURNS: 0 if success, -1 if error.
|
||||
*/
|
||||
static int
|
||||
do_state_action(struct nandsim *ns, uint32_t action)
|
||||
static int do_state_action(struct nandsim *ns, uint32_t action)
|
||||
{
|
||||
int i, num;
|
||||
int num;
|
||||
int busdiv = ns->busw == 8 ? 1 : 2;
|
||||
|
||||
action &= ACTION_MASK;
|
||||
|
@ -822,7 +905,7 @@ do_state_action(struct nandsim *ns, uint32_t action)
|
|||
break;
|
||||
}
|
||||
num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
|
||||
memcpy(ns->buf.byte, ns->mem.byte + NS_RAW_OFFSET(ns) + ns->regs.off, num);
|
||||
read_page(ns, num);
|
||||
|
||||
NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
|
||||
num, NS_RAW_OFFSET(ns) + ns->regs.off);
|
||||
|
@ -863,7 +946,7 @@ do_state_action(struct nandsim *ns, uint32_t action)
|
|||
ns->regs.row, NS_RAW_OFFSET(ns));
|
||||
NS_LOG("erase sector %d\n", ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift));
|
||||
|
||||
memset(ns->mem.byte + NS_RAW_OFFSET(ns), 0xFF, ns->geom.secszoob);
|
||||
erase_sector(ns);
|
||||
|
||||
NS_MDELAY(erase_delay);
|
||||
|
||||
|
@ -886,8 +969,8 @@ do_state_action(struct nandsim *ns, uint32_t action)
|
|||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
ns->mem.byte[NS_RAW_OFFSET(ns) + ns->regs.off + i] &= ns->buf.byte[i];
|
||||
if (prog_page(ns, num) == -1)
|
||||
return -1;
|
||||
|
||||
NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
|
||||
num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
|
||||
|
@ -928,8 +1011,7 @@ do_state_action(struct nandsim *ns, uint32_t action)
|
|||
/*
|
||||
* Switch simulator's state.
|
||||
*/
|
||||
static void
|
||||
switch_state(struct nandsim *ns)
|
||||
static void switch_state(struct nandsim *ns)
|
||||
{
|
||||
if (ns->op) {
|
||||
/*
|
||||
|
@ -1070,8 +1152,7 @@ switch_state(struct nandsim *ns)
|
|||
}
|
||||
}
|
||||
|
||||
static u_char
|
||||
ns_nand_read_byte(struct mtd_info *mtd)
|
||||
static u_char ns_nand_read_byte(struct mtd_info *mtd)
|
||||
{
|
||||
struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
|
||||
u_char outb = 0x00;
|
||||
|
@ -1144,8 +1225,7 @@ ns_nand_read_byte(struct mtd_info *mtd)
|
|||
return outb;
|
||||
}
|
||||
|
||||
static void
|
||||
ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
|
||||
static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
|
||||
{
|
||||
struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
|
||||
|
||||
|
@ -1308,15 +1388,13 @@ static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
|
|||
ns_nand_write_byte(mtd, cmd);
|
||||
}
|
||||
|
||||
static int
|
||||
ns_device_ready(struct mtd_info *mtd)
|
||||
static int ns_device_ready(struct mtd_info *mtd)
|
||||
{
|
||||
NS_DBG("device_ready\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
ns_nand_read_word(struct mtd_info *mtd)
|
||||
static uint16_t ns_nand_read_word(struct mtd_info *mtd)
|
||||
{
|
||||
struct nand_chip *chip = (struct nand_chip *)mtd->priv;
|
||||
|
||||
|
@ -1325,8 +1403,7 @@ ns_nand_read_word(struct mtd_info *mtd)
|
|||
return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
|
||||
}
|
||||
|
||||
static void
|
||||
ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
|
||||
static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
|
||||
{
|
||||
struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
|
||||
|
||||
|
@ -1353,8 +1430,7 @@ ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
|
||||
static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
|
||||
{
|
||||
struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
|
||||
|
||||
|
@ -1407,8 +1483,7 @@ ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
|
|||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
|
||||
static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
|
||||
{
|
||||
ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
|
||||
|
||||
|
@ -1436,14 +1511,12 @@ static int __init ns_init_module(void)
|
|||
}
|
||||
|
||||
/* Allocate and initialize mtd_info, nand_chip and nandsim structures */
|
||||
nsmtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
|
||||
nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
|
||||
+ sizeof(struct nandsim), GFP_KERNEL);
|
||||
if (!nsmtd) {
|
||||
NS_ERR("unable to allocate core structures.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(nsmtd, 0, sizeof(struct mtd_info) + sizeof(struct nand_chip) +
|
||||
sizeof(struct nandsim));
|
||||
chip = (struct nand_chip *)(nsmtd + 1);
|
||||
nsmtd->priv = (void *)chip;
|
||||
nand = (struct nandsim *)(chip + 1);
|
||||
|
|
|
@ -56,7 +56,7 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
|
|||
ccr |= NDFC_CCR_BS(chip + pchip->chip_offset);
|
||||
} else
|
||||
ccr |= NDFC_CCR_RESET_CE;
|
||||
writel(ccr, ndfc->ndfcbase + NDFC_CCR);
|
||||
__raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR);
|
||||
}
|
||||
|
||||
static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rslib.h>
|
||||
#include <linux/bitrev.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mtd/compatmac.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
|
@ -152,47 +153,6 @@ static struct nand_ecclayout rtc_from4_nand_oobinfo = {
|
|||
.oobfree = {{32, 32}}
|
||||
};
|
||||
|
||||
/* Aargh. I missed the reversed bit order, when I
|
||||
* was talking to Renesas about the FPGA.
|
||||
*
|
||||
* The table is used for bit reordering and inversion
|
||||
* of the ecc byte which we get from the FPGA
|
||||
*/
|
||||
static uint8_t revbits[256] = {
|
||||
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
|
||||
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
|
||||
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
|
||||
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
|
||||
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
|
||||
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
|
||||
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
|
||||
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
|
||||
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
|
||||
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
|
||||
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
|
||||
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
|
||||
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
|
||||
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
|
||||
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
|
||||
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
|
||||
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
|
||||
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
|
||||
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
|
||||
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
|
||||
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
|
||||
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
|
||||
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
|
||||
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
|
||||
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
|
||||
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
|
||||
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
|
||||
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
|
||||
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
|
||||
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
|
||||
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
|
||||
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -397,7 +357,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
|
|||
/* Read the syndrom pattern from the FPGA and correct the bitorder */
|
||||
rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
|
||||
for (i = 0; i < 8; i++) {
|
||||
ecc[i] = revbits[(*rs_ecc) & 0xFF];
|
||||
ecc[i] = bitrev8(*rs_ecc);
|
||||
rs_ecc++;
|
||||
}
|
||||
|
||||
|
@ -496,7 +456,7 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
|
|||
rtn = nand_do_read(mtd, page, len, &retlen, buf);
|
||||
|
||||
/* if read failed or > 1-bit error corrected */
|
||||
if (rtn || (mtd->ecc_stats.corrected - corrected) > 1) {
|
||||
if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
|
||||
er_stat |= 1 << 1;
|
||||
kfree(buf);
|
||||
}
|
||||
|
|
|
@ -283,7 +283,7 @@ static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
|
|||
unsigned int ctrl)
|
||||
{
|
||||
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
|
||||
|
||||
|
||||
if (cmd == NAND_CMD_NONE)
|
||||
return;
|
||||
|
||||
|
|
|
@ -57,17 +57,16 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|||
|
||||
DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
|
||||
|
||||
nftl = kmalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
|
||||
nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
|
||||
|
||||
if (!nftl) {
|
||||
printk(KERN_WARNING "NFTL: out of memory for data structures\n");
|
||||
return;
|
||||
}
|
||||
memset(nftl, 0, sizeof(*nftl));
|
||||
|
||||
nftl->mbd.mtd = mtd;
|
||||
nftl->mbd.devnum = -1;
|
||||
nftl->mbd.blksize = 512;
|
||||
|
||||
nftl->mbd.tr = tr;
|
||||
|
||||
if (NFTL_mount(nftl) < 0) {
|
||||
|
@ -147,10 +146,9 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
|
|||
ops.ooblen = len;
|
||||
ops.oobbuf = buf;
|
||||
ops.datbuf = NULL;
|
||||
ops.len = len;
|
||||
|
||||
res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
|
||||
*retlen = ops.retlen;
|
||||
*retlen = ops.oobretlen;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -168,10 +166,9 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
|
|||
ops.ooblen = len;
|
||||
ops.oobbuf = buf;
|
||||
ops.datbuf = NULL;
|
||||
ops.len = len;
|
||||
|
||||
res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
|
||||
*retlen = ops.retlen;
|
||||
*retlen = ops.oobretlen;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -797,6 +794,7 @@ static struct mtd_blktrans_ops nftl_tr = {
|
|||
.name = "nftl",
|
||||
.major = NFTL_MAJOR,
|
||||
.part_bits = NFTL_PARTN_BITS,
|
||||
.blksize = 512,
|
||||
.getgeo = nftl_getgeo,
|
||||
.readsect = nftl_readblock,
|
||||
#ifdef CONFIG_NFTL_RW
|
||||
|
|
|
@ -45,12 +45,10 @@ static int __devinit generic_onenand_probe(struct device *dev)
|
|||
unsigned long size = res->end - res->start + 1;
|
||||
int err;
|
||||
|
||||
info = kmalloc(sizeof(struct onenand_info), GFP_KERNEL);
|
||||
info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(info, 0, sizeof(struct onenand_info));
|
||||
|
||||
if (!request_mem_region(res->start, size, dev->driver->name)) {
|
||||
err = -EBUSY;
|
||||
goto out_free_info;
|
||||
|
@ -63,6 +61,7 @@ static int __devinit generic_onenand_probe(struct device *dev)
|
|||
}
|
||||
|
||||
info->onenand.mmcontrol = pdata->mmcontrol;
|
||||
info->onenand.irq = platform_get_irq(pdev, 0);
|
||||
|
||||
info->mtd.name = pdev->dev.bus_id;
|
||||
info->mtd.priv = &info->onenand;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/onenand.h>
|
||||
|
@ -191,8 +192,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
|
|||
struct onenand_chip *this = mtd->priv;
|
||||
int value, readcmd = 0, block_cmd = 0;
|
||||
int block, page;
|
||||
/* Now we use page size operation */
|
||||
int sectors = 4, count = 4;
|
||||
|
||||
/* Address translation */
|
||||
switch (cmd) {
|
||||
|
@ -244,6 +243,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
|
|||
}
|
||||
|
||||
if (page != -1) {
|
||||
/* Now we use page size operation */
|
||||
int sectors = 4, count = 4;
|
||||
int dataram;
|
||||
|
||||
switch (cmd) {
|
||||
|
@ -297,7 +298,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
|
|||
unsigned long timeout;
|
||||
unsigned int flags = ONENAND_INT_MASTER;
|
||||
unsigned int interrupt = 0;
|
||||
unsigned int ctrl, ecc;
|
||||
unsigned int ctrl;
|
||||
|
||||
/* The 20 msec is enough */
|
||||
timeout = jiffies + msecs_to_jiffies(20);
|
||||
|
@ -309,7 +310,6 @@ static int onenand_wait(struct mtd_info *mtd, int state)
|
|||
|
||||
if (state != FL_READING)
|
||||
cond_resched();
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
/* To get correct interrupt status in timeout case */
|
||||
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
|
||||
|
@ -317,28 +317,126 @@ static int onenand_wait(struct mtd_info *mtd, int state)
|
|||
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
|
||||
|
||||
if (ctrl & ONENAND_CTRL_ERROR) {
|
||||
/* It maybe occur at initial bad block */
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_wait: controller error = 0x%04x\n", ctrl);
|
||||
/* Clear other interrupt bits for preventing ECC error */
|
||||
interrupt &= ONENAND_INT_MASTER;
|
||||
}
|
||||
|
||||
if (ctrl & ONENAND_CTRL_LOCK) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_wait: it's locked error = 0x%04x\n", ctrl);
|
||||
return -EACCES;
|
||||
if (ctrl & ONENAND_CTRL_LOCK)
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_wait: it's locked error.\n");
|
||||
return ctrl;
|
||||
}
|
||||
|
||||
if (interrupt & ONENAND_INT_READ) {
|
||||
ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
|
||||
if (ecc & ONENAND_ECC_2BIT_ALL) {
|
||||
int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
|
||||
if (ecc) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_wait: ECC error = 0x%04x\n", ecc);
|
||||
return -EBADMSG;
|
||||
if (ecc & ONENAND_ECC_2BIT_ALL) {
|
||||
mtd->ecc_stats.failed++;
|
||||
return ecc;
|
||||
} else if (ecc & ONENAND_ECC_1BIT_ALL)
|
||||
mtd->ecc_stats.corrected++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* onenand_interrupt - [DEFAULT] onenand interrupt handler
|
||||
* @param irq onenand interrupt number
|
||||
* @param dev_id interrupt data
|
||||
*
|
||||
* complete the work
|
||||
*/
|
||||
static irqreturn_t onenand_interrupt(int irq, void *data)
|
||||
{
|
||||
struct onenand_chip *this = (struct onenand_chip *) data;
|
||||
|
||||
/* To handle shared interrupt */
|
||||
if (!this->complete.done)
|
||||
complete(&this->complete);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* onenand_interrupt_wait - [DEFAULT] wait until the command is done
|
||||
* @param mtd MTD device structure
|
||||
* @param state state to select the max. timeout value
|
||||
*
|
||||
* Wait for command done.
|
||||
*/
|
||||
static int onenand_interrupt_wait(struct mtd_info *mtd, int state)
|
||||
{
|
||||
struct onenand_chip *this = mtd->priv;
|
||||
|
||||
wait_for_completion(&this->complete);
|
||||
|
||||
return onenand_wait(mtd, state);
|
||||
}
|
||||
|
||||
/*
|
||||
* onenand_try_interrupt_wait - [DEFAULT] try interrupt wait
|
||||
* @param mtd MTD device structure
|
||||
* @param state state to select the max. timeout value
|
||||
*
|
||||
* Try interrupt based wait (It is used one-time)
|
||||
*/
|
||||
static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state)
|
||||
{
|
||||
struct onenand_chip *this = mtd->priv;
|
||||
unsigned long remain, timeout;
|
||||
|
||||
/* We use interrupt wait first */
|
||||
this->wait = onenand_interrupt_wait;
|
||||
|
||||
timeout = msecs_to_jiffies(100);
|
||||
remain = wait_for_completion_timeout(&this->complete, timeout);
|
||||
if (!remain) {
|
||||
printk(KERN_INFO "OneNAND: There's no interrupt. "
|
||||
"We use the normal wait\n");
|
||||
|
||||
/* Release the irq */
|
||||
free_irq(this->irq, this);
|
||||
|
||||
this->wait = onenand_wait;
|
||||
}
|
||||
|
||||
return onenand_wait(mtd, state);
|
||||
}
|
||||
|
||||
/*
|
||||
* onenand_setup_wait - [OneNAND Interface] setup onenand wait method
|
||||
* @param mtd MTD device structure
|
||||
*
|
||||
* There's two method to wait onenand work
|
||||
* 1. polling - read interrupt status register
|
||||
* 2. interrupt - use the kernel interrupt method
|
||||
*/
|
||||
static void onenand_setup_wait(struct mtd_info *mtd)
|
||||
{
|
||||
struct onenand_chip *this = mtd->priv;
|
||||
int syscfg;
|
||||
|
||||
init_completion(&this->complete);
|
||||
|
||||
if (this->irq <= 0) {
|
||||
this->wait = onenand_wait;
|
||||
return;
|
||||
}
|
||||
|
||||
if (request_irq(this->irq, &onenand_interrupt,
|
||||
IRQF_SHARED, "onenand", this)) {
|
||||
/* If we can't get irq, use the normal wait */
|
||||
this->wait = onenand_wait;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Enable interrupt */
|
||||
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
|
||||
syscfg |= ONENAND_SYS_CFG1_IOBE;
|
||||
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
|
||||
|
||||
this->wait = onenand_try_interrupt_wait;
|
||||
}
|
||||
|
||||
/**
|
||||
* onenand_bufferram_offset - [DEFAULT] BufferRAM offset
|
||||
* @param mtd MTD data structure
|
||||
|
@ -609,9 +707,10 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|||
size_t *retlen, u_char *buf)
|
||||
{
|
||||
struct onenand_chip *this = mtd->priv;
|
||||
struct mtd_ecc_stats stats;
|
||||
int read = 0, column;
|
||||
int thislen;
|
||||
int ret = 0;
|
||||
int ret = 0, boundary = 0;
|
||||
|
||||
DEBUG(MTD_DEBUG_LEVEL3, "onenand_read: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
|
||||
|
||||
|
@ -627,38 +726,61 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|||
|
||||
/* TODO handling oob */
|
||||
|
||||
while (read < len) {
|
||||
thislen = min_t(int, mtd->writesize, len - read);
|
||||
stats = mtd->ecc_stats;
|
||||
|
||||
column = from & (mtd->writesize - 1);
|
||||
if (column + thislen > mtd->writesize)
|
||||
thislen = mtd->writesize - column;
|
||||
/* Read-while-load method */
|
||||
|
||||
if (!onenand_check_bufferram(mtd, from)) {
|
||||
this->command(mtd, ONENAND_CMD_READ, from, mtd->writesize);
|
||||
/* Do first load to bufferRAM */
|
||||
if (read < len) {
|
||||
if (!onenand_check_bufferram(mtd, from)) {
|
||||
this->command(mtd, ONENAND_CMD_READ, from, mtd->writesize);
|
||||
ret = this->wait(mtd, FL_READING);
|
||||
onenand_update_bufferram(mtd, from, !ret);
|
||||
}
|
||||
}
|
||||
|
||||
ret = this->wait(mtd, FL_READING);
|
||||
/* First copy data and check return value for ECC handling */
|
||||
onenand_update_bufferram(mtd, from, 1);
|
||||
}
|
||||
thislen = min_t(int, mtd->writesize, len - read);
|
||||
column = from & (mtd->writesize - 1);
|
||||
if (column + thislen > mtd->writesize)
|
||||
thislen = mtd->writesize - column;
|
||||
|
||||
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
|
||||
while (!ret) {
|
||||
/* If there is more to load then start next load */
|
||||
from += thislen;
|
||||
if (read + thislen < len) {
|
||||
this->command(mtd, ONENAND_CMD_READ, from, mtd->writesize);
|
||||
/*
|
||||
* Chip boundary handling in DDP
|
||||
* Now we issued chip 1 read and pointed chip 1
|
||||
* bufferam so we have to point chip 0 bufferam.
|
||||
*/
|
||||
if (this->device_id & ONENAND_DEVICE_IS_DDP &&
|
||||
unlikely(from == (this->chipsize >> 1))) {
|
||||
this->write_word(0, this->base + ONENAND_REG_START_ADDRESS2);
|
||||
boundary = 1;
|
||||
} else
|
||||
boundary = 0;
|
||||
ONENAND_SET_PREV_BUFFERRAM(this);
|
||||
}
|
||||
/* While load is going, read from last bufferRAM */
|
||||
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
|
||||
/* See if we are done */
|
||||
read += thislen;
|
||||
if (read == len)
|
||||
break;
|
||||
/* Set up for next read from bufferRAM */
|
||||
if (unlikely(boundary))
|
||||
this->write_word(0x8000, this->base + ONENAND_REG_START_ADDRESS2);
|
||||
ONENAND_SET_NEXT_BUFFERRAM(this);
|
||||
buf += thislen;
|
||||
thislen = min_t(int, mtd->writesize, len - read);
|
||||
column = 0;
|
||||
cond_resched();
|
||||
/* Now wait for load */
|
||||
ret = this->wait(mtd, FL_READING);
|
||||
onenand_update_bufferram(mtd, from, !ret);
|
||||
}
|
||||
|
||||
read += thislen;
|
||||
|
||||
if (read == len)
|
||||
break;
|
||||
|
||||
if (ret) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_read: read failed = %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
from += thislen;
|
||||
buf += thislen;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Deselect and wake up anyone waiting on the device */
|
||||
onenand_release_device(mtd);
|
||||
|
||||
|
@ -668,7 +790,14 @@ out:
|
|||
* retlen == desired len and result == -EBADMSG
|
||||
*/
|
||||
*retlen = read;
|
||||
return ret;
|
||||
|
||||
if (mtd->ecc_stats.failed - stats.failed)
|
||||
return -EBADMSG;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -705,6 +834,8 @@ int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
|
|||
column = from & (mtd->oobsize - 1);
|
||||
|
||||
while (read < len) {
|
||||
cond_resched();
|
||||
|
||||
thislen = mtd->oobsize - column;
|
||||
thislen = min_t(int, thislen, len);
|
||||
|
||||
|
@ -717,16 +848,16 @@ int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
|
|||
|
||||
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
|
||||
|
||||
if (ret) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_read_oob: read failed = 0x%x\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
read += thislen;
|
||||
|
||||
if (read == len)
|
||||
break;
|
||||
|
||||
if (ret) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_read_oob: read failed = %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
buf += thislen;
|
||||
|
||||
/* Read more? */
|
||||
|
@ -756,8 +887,8 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
|
|||
{
|
||||
BUG_ON(ops->mode != MTD_OOB_PLACE);
|
||||
|
||||
return onenand_do_read_oob(mtd, from + ops->ooboffs, ops->len,
|
||||
&ops->retlen, ops->oobbuf);
|
||||
return onenand_do_read_oob(mtd, from + ops->ooboffs, ops->ooblen,
|
||||
&ops->oobretlen, ops->oobbuf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
|
||||
|
@ -804,6 +935,10 @@ static int onenand_verify_page(struct mtd_info *mtd, u_char *buf, loff_t addr)
|
|||
void __iomem *dataram0, *dataram1;
|
||||
int ret = 0;
|
||||
|
||||
/* In partial page write, just skip it */
|
||||
if ((addr & (mtd->writesize - 1)) != 0)
|
||||
return 0;
|
||||
|
||||
this->command(mtd, ONENAND_CMD_READ, addr, mtd->writesize);
|
||||
|
||||
ret = this->wait(mtd, FL_READING);
|
||||
|
@ -826,7 +961,7 @@ static int onenand_verify_page(struct mtd_info *mtd, u_char *buf, loff_t addr)
|
|||
#define onenand_verify_oob(...) (0)
|
||||
#endif
|
||||
|
||||
#define NOTALIGNED(x) ((x & (mtd->writesize - 1)) != 0)
|
||||
#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
|
||||
|
||||
/**
|
||||
* onenand_write - [MTD Interface] write buffer to FLASH
|
||||
|
@ -844,6 +979,7 @@ static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|||
struct onenand_chip *this = mtd->priv;
|
||||
int written = 0;
|
||||
int ret = 0;
|
||||
int column, subpage;
|
||||
|
||||
DEBUG(MTD_DEBUG_LEVEL3, "onenand_write: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
|
||||
|
||||
|
@ -862,45 +998,63 @@ static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
column = to & (mtd->writesize - 1);
|
||||
subpage = column || (len & (mtd->writesize - 1));
|
||||
|
||||
/* Grab the lock and see if the device is available */
|
||||
onenand_get_device(mtd, FL_WRITING);
|
||||
|
||||
/* Loop until all data write */
|
||||
while (written < len) {
|
||||
int thislen = min_t(int, mtd->writesize, len - written);
|
||||
int bytes = mtd->writesize;
|
||||
int thislen = min_t(int, bytes, len - written);
|
||||
u_char *wbuf = (u_char *) buf;
|
||||
|
||||
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->writesize);
|
||||
cond_resched();
|
||||
|
||||
this->write_bufferram(mtd, ONENAND_DATARAM, buf, 0, thislen);
|
||||
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, bytes);
|
||||
|
||||
/* Partial page write */
|
||||
if (subpage) {
|
||||
bytes = min_t(int, bytes - column, (int) len);
|
||||
memset(this->page_buf, 0xff, mtd->writesize);
|
||||
memcpy(this->page_buf + column, buf, bytes);
|
||||
wbuf = this->page_buf;
|
||||
/* Even though partial write, we need page size */
|
||||
thislen = mtd->writesize;
|
||||
}
|
||||
|
||||
this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, thislen);
|
||||
this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
|
||||
|
||||
this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
|
||||
|
||||
onenand_update_bufferram(mtd, to, 1);
|
||||
/* In partial page write we don't update bufferram */
|
||||
onenand_update_bufferram(mtd, to, !subpage);
|
||||
|
||||
ret = this->wait(mtd, FL_WRITING);
|
||||
if (ret) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: write filaed %d\n", ret);
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Only check verify write turn on */
|
||||
ret = onenand_verify_page(mtd, (u_char *) wbuf, to);
|
||||
if (ret) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: verify failed %d\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
written += thislen;
|
||||
|
||||
/* Only check verify write turn on */
|
||||
ret = onenand_verify_page(mtd, (u_char *) buf, to);
|
||||
if (ret) {
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: verify failed %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (written == len)
|
||||
break;
|
||||
|
||||
column = 0;
|
||||
to += thislen;
|
||||
buf += thislen;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Deselect and wake up anyone waiting on the device */
|
||||
onenand_release_device(mtd);
|
||||
|
||||
|
@ -944,6 +1098,8 @@ static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
|
|||
while (written < len) {
|
||||
int thislen = min_t(int, mtd->oobsize, len - written);
|
||||
|
||||
cond_resched();
|
||||
|
||||
column = to & (mtd->oobsize - 1);
|
||||
|
||||
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
|
||||
|
@ -999,8 +1155,8 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
|
|||
{
|
||||
BUG_ON(ops->mode != MTD_OOB_PLACE);
|
||||
|
||||
return onenand_do_write_oob(mtd, to + ops->ooboffs, ops->len,
|
||||
&ops->retlen, ops->oobbuf);
|
||||
return onenand_do_write_oob(mtd, to + ops->ooboffs, ops->ooblen,
|
||||
&ops->oobretlen, ops->oobbuf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1071,6 +1227,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|||
instr->state = MTD_ERASING;
|
||||
|
||||
while (len) {
|
||||
cond_resched();
|
||||
|
||||
/* Check if we have a bad block, we do not erase bad blocks */
|
||||
if (onenand_block_checkbad(mtd, addr, 0, 0)) {
|
||||
|
@ -1084,10 +1241,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|||
ret = this->wait(mtd, FL_ERASING);
|
||||
/* Check, if it is write protected */
|
||||
if (ret) {
|
||||
if (ret == -EPERM)
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_erase: Device is write protected!!!\n");
|
||||
else
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
|
||||
DEBUG(MTD_DEBUG_LEVEL0, "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
|
||||
instr->state = MTD_ERASE_FAILED;
|
||||
instr->fail_addr = addr;
|
||||
goto erase_exit;
|
||||
|
@ -1129,7 +1283,6 @@ static void onenand_sync(struct mtd_info *mtd)
|
|||
onenand_release_device(mtd);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
|
||||
* @param mtd MTD device structure
|
||||
|
@ -1196,32 +1349,38 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
|
|||
}
|
||||
|
||||
/**
|
||||
* onenand_unlock - [MTD Interface] Unlock block(s)
|
||||
* onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
|
||||
* @param mtd MTD device structure
|
||||
* @param ofs offset relative to mtd start
|
||||
* @param len number of bytes to unlock
|
||||
* @param len number of bytes to lock or unlock
|
||||
*
|
||||
* Unlock one or more blocks
|
||||
* Lock or unlock one or more blocks
|
||||
*/
|
||||
static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
|
||||
static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
|
||||
{
|
||||
struct onenand_chip *this = mtd->priv;
|
||||
int start, end, block, value, status;
|
||||
int wp_status_mask;
|
||||
|
||||
start = ofs >> this->erase_shift;
|
||||
end = len >> this->erase_shift;
|
||||
|
||||
if (cmd == ONENAND_CMD_LOCK)
|
||||
wp_status_mask = ONENAND_WP_LS;
|
||||
else
|
||||
wp_status_mask = ONENAND_WP_US;
|
||||
|
||||
/* Continuous lock scheme */
|
||||
if (this->options & ONENAND_HAS_CONT_LOCK) {
|
||||
/* Set start block address */
|
||||
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
|
||||
/* Set end block address */
|
||||
this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
|
||||
/* Write unlock command */
|
||||
this->command(mtd, ONENAND_CMD_UNLOCK, 0, 0);
|
||||
/* Write lock command */
|
||||
this->command(mtd, cmd, 0, 0);
|
||||
|
||||
/* There's no return value */
|
||||
this->wait(mtd, FL_UNLOCKING);
|
||||
this->wait(mtd, FL_LOCKING);
|
||||
|
||||
/* Sanity check */
|
||||
while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
|
||||
|
@ -1230,7 +1389,7 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
|
|||
|
||||
/* Check lock status */
|
||||
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
|
||||
if (!(status & ONENAND_WP_US))
|
||||
if (!(status & wp_status_mask))
|
||||
printk(KERN_ERR "wp status = 0x%x\n", status);
|
||||
|
||||
return 0;
|
||||
|
@ -1246,11 +1405,11 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
|
|||
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
|
||||
/* Set start block address */
|
||||
this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
|
||||
/* Write unlock command */
|
||||
this->command(mtd, ONENAND_CMD_UNLOCK, 0, 0);
|
||||
/* Write lock command */
|
||||
this->command(mtd, cmd, 0, 0);
|
||||
|
||||
/* There's no return value */
|
||||
this->wait(mtd, FL_UNLOCKING);
|
||||
this->wait(mtd, FL_LOCKING);
|
||||
|
||||
/* Sanity check */
|
||||
while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
|
||||
|
@ -1259,13 +1418,39 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
|
|||
|
||||
/* Check lock status */
|
||||
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
|
||||
if (!(status & ONENAND_WP_US))
|
||||
if (!(status & wp_status_mask))
|
||||
printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* onenand_lock - [MTD Interface] Lock block(s)
|
||||
* @param mtd MTD device structure
|
||||
* @param ofs offset relative to mtd start
|
||||
* @param len number of bytes to unlock
|
||||
*
|
||||
* Lock one or more blocks
|
||||
*/
|
||||
static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
|
||||
{
|
||||
return onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
|
||||
}
|
||||
|
||||
/**
|
||||
* onenand_unlock - [MTD Interface] Unlock block(s)
|
||||
* @param mtd MTD device structure
|
||||
* @param ofs offset relative to mtd start
|
||||
* @param len number of bytes to unlock
|
||||
*
|
||||
* Unlock one or more blocks
|
||||
*/
|
||||
static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
|
||||
{
|
||||
return onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
|
||||
}
|
||||
|
||||
/**
|
||||
* onenand_check_lock_status - [OneNAND Interface] Check lock status
|
||||
* @param this onenand chip data structure
|
||||
|
@ -1310,7 +1495,7 @@ static int onenand_unlock_all(struct mtd_info *mtd)
|
|||
this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
|
||||
|
||||
/* There's no return value */
|
||||
this->wait(mtd, FL_UNLOCKING);
|
||||
this->wait(mtd, FL_LOCKING);
|
||||
|
||||
/* Sanity check */
|
||||
while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
|
||||
|
@ -1334,7 +1519,7 @@ static int onenand_unlock_all(struct mtd_info *mtd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
mtd->unlock(mtd, 0x0, this->chipsize);
|
||||
onenand_unlock(mtd, 0x0, this->chipsize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1762,7 +1947,7 @@ static int onenand_probe(struct mtd_info *mtd)
|
|||
/* Read manufacturer and device IDs from Register */
|
||||
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
|
||||
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
|
||||
ver_id= this->read_word(this->base + ONENAND_REG_VERSION_ID);
|
||||
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
|
||||
|
||||
/* Check OneNAND device */
|
||||
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
|
||||
|
@ -1846,7 +2031,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
|
|||
if (!this->command)
|
||||
this->command = onenand_command;
|
||||
if (!this->wait)
|
||||
this->wait = onenand_wait;
|
||||
onenand_setup_wait(mtd);
|
||||
|
||||
if (!this->read_bufferram)
|
||||
this->read_bufferram = onenand_read_bufferram;
|
||||
|
@ -1883,23 +2068,30 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
|
|||
init_waitqueue_head(&this->wq);
|
||||
spin_lock_init(&this->chip_lock);
|
||||
|
||||
/*
|
||||
* Allow subpage writes up to oobsize.
|
||||
*/
|
||||
switch (mtd->oobsize) {
|
||||
case 64:
|
||||
this->ecclayout = &onenand_oob_64;
|
||||
mtd->subpage_sft = 2;
|
||||
break;
|
||||
|
||||
case 32:
|
||||
this->ecclayout = &onenand_oob_32;
|
||||
mtd->subpage_sft = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n",
|
||||
mtd->oobsize);
|
||||
mtd->subpage_sft = 0;
|
||||
/* To prevent kernel oops */
|
||||
this->ecclayout = &onenand_oob_32;
|
||||
break;
|
||||
}
|
||||
|
||||
this->subpagesize = mtd->writesize >> mtd->subpage_sft;
|
||||
mtd->ecclayout = this->ecclayout;
|
||||
|
||||
/* Fill in remaining MTD driver data */
|
||||
|
@ -1922,7 +2114,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
|
|||
mtd->lock_user_prot_reg = onenand_lock_user_prot_reg;
|
||||
#endif
|
||||
mtd->sync = onenand_sync;
|
||||
mtd->lock = NULL;
|
||||
mtd->lock = onenand_lock;
|
||||
mtd->unlock = onenand_unlock;
|
||||
mtd->suspend = onenand_suspend;
|
||||
mtd->resume = onenand_resume;
|
||||
|
|
|
@ -93,13 +93,15 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
|
|||
ret = onenand_do_read_oob(mtd, from + j * mtd->writesize + bd->offs,
|
||||
readlen, &retlen, &buf[0]);
|
||||
|
||||
if (ret)
|
||||
/* If it is a initial bad block, just ignore it */
|
||||
if (ret && !(ret & ONENAND_CTRL_LOAD))
|
||||
return ret;
|
||||
|
||||
if (check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
|
||||
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
|
||||
printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
|
||||
i >> 1, (unsigned int) from);
|
||||
mtd->ecc_stats.badblocks++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -177,14 +179,12 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
|
|||
int len, ret = 0;
|
||||
|
||||
len = mtd->size >> (this->erase_shift + 2);
|
||||
/* Allocate memory (2bit per block) */
|
||||
bbm->bbt = kmalloc(len, GFP_KERNEL);
|
||||
/* Allocate memory (2bit per block) and clear the memory bad block table */
|
||||
bbm->bbt = kzalloc(len, GFP_KERNEL);
|
||||
if (!bbm->bbt) {
|
||||
printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Clear the memory bad block table */
|
||||
memset(bbm->bbt, 0x00, len);
|
||||
|
||||
/* Set the bad block position */
|
||||
bbm->badblockpos = ONENAND_BADBLOCK_POS;
|
||||
|
@ -230,14 +230,12 @@ int onenand_default_bbt(struct mtd_info *mtd)
|
|||
struct onenand_chip *this = mtd->priv;
|
||||
struct bbm_info *bbm;
|
||||
|
||||
this->bbm = kmalloc(sizeof(struct bbm_info), GFP_KERNEL);
|
||||
this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
|
||||
if (!this->bbm)
|
||||
return -ENOMEM;
|
||||
|
||||
bbm = this->bbm;
|
||||
|
||||
memset(bbm, 0, sizeof(struct bbm_info));
|
||||
|
||||
/* 1KB page has same configuration as 2KB page */
|
||||
if (!bbm->badblock_pattern)
|
||||
bbm->badblock_pattern = &largepage_memorybased;
|
||||
|
|
|
@ -96,7 +96,19 @@ static int parse_redboot_partitions(struct mtd_info *master,
|
|||
*/
|
||||
if (swab32(buf[i].size) == master->erasesize) {
|
||||
int j;
|
||||
for (j = 0; j < numslots && buf[j].name[0] != 0xff; ++j) {
|
||||
for (j = 0; j < numslots; ++j) {
|
||||
|
||||
/* A single 0xff denotes a deleted entry.
|
||||
* Two of them in a row is the end of the table.
|
||||
*/
|
||||
if (buf[j].name[0] == 0xff) {
|
||||
if (buf[j].name[1] == 0xff) {
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* The unsigned long fields were written with the
|
||||
* wrong byte sex, name and pad have no byte sex.
|
||||
*/
|
||||
|
@ -110,6 +122,9 @@ static int parse_redboot_partitions(struct mtd_info *master,
|
|||
}
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
/* re-calculate of real numslots */
|
||||
numslots = buf[i].size / sizeof(struct fis_image_desc);
|
||||
}
|
||||
}
|
||||
if (i == numslots) {
|
||||
|
@ -123,8 +138,13 @@ static int parse_redboot_partitions(struct mtd_info *master,
|
|||
for (i = 0; i < numslots; i++) {
|
||||
struct fis_list *new_fl, **prev;
|
||||
|
||||
if (buf[i].name[0] == 0xff)
|
||||
continue;
|
||||
if (buf[i].name[0] == 0xff) {
|
||||
if (buf[i].name[1] == 0xff) {
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!redboot_checksum(&buf[i]))
|
||||
break;
|
||||
|
||||
|
@ -165,15 +185,13 @@ static int parse_redboot_partitions(struct mtd_info *master,
|
|||
}
|
||||
}
|
||||
#endif
|
||||
parts = kmalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
|
||||
parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
|
||||
|
||||
if (!parts) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(parts, 0, sizeof(*parts)*nrparts + nulllen + namelen);
|
||||
|
||||
nullname = (char *)&parts[nrparts];
|
||||
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
|
||||
if (nulllen > 0) {
|
||||
|
|
|
@ -787,7 +787,6 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|||
|
||||
if (scan_header(part) == 0) {
|
||||
part->mbd.size = part->sector_count;
|
||||
part->mbd.blksize = SECTOR_SIZE;
|
||||
part->mbd.tr = tr;
|
||||
part->mbd.devnum = -1;
|
||||
if (!(mtd->flags & MTD_WRITEABLE))
|
||||
|
@ -829,6 +828,8 @@ struct mtd_blktrans_ops rfd_ftl_tr = {
|
|||
.name = "rfd",
|
||||
.major = RFD_FTL_MAJOR,
|
||||
.part_bits = PART_BITS,
|
||||
.blksize = SECTOR_SIZE,
|
||||
|
||||
.readsect = rfd_ftl_readsect,
|
||||
.writesect = rfd_ftl_writesect,
|
||||
.getgeo = rfd_ftl_getgeo,
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче