2005-11-07 14:15:31 +03:00
|
|
|
/*
|
2010-08-08 23:58:20 +04:00
|
|
|
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __MTD_MTD_H__
|
|
|
|
#define __MTD_MTD_H__
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/uio.h>
|
2005-04-01 05:59:56 +04:00
|
|
|
#include <linux/notifier.h>
|
2009-03-26 10:42:41 +03:00
|
|
|
#include <linux/device.h>
|
2017-02-09 12:21:07 +03:00
|
|
|
#include <linux/of.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <mtd/mtd-abi.h>
|
|
|
|
|
2008-12-10 16:37:21 +03:00
|
|
|
#include <asm/div64.h>
|
|
|
|
|
|
|
|
#define MTD_FAIL_ADDR_UNKNOWN -1LL
|
2008-08-12 13:40:50 +04:00
|
|
|
|
2018-02-13 00:03:10 +03:00
|
|
|
struct mtd_info;
|
|
|
|
|
2011-08-24 04:17:36 +04:00
|
|
|
/*
|
|
|
|
* If the erase fails, fail_addr might indicate exactly which block failed. If
|
|
|
|
* fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
|
|
|
|
* or was not specific to any particular block.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
struct erase_info {
|
2008-12-10 16:37:21 +03:00
|
|
|
uint64_t addr;
|
|
|
|
uint64_t len;
|
|
|
|
uint64_t fail_addr;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mtd_erase_region_info {
|
2011-08-24 04:17:36 +04:00
|
|
|
uint64_t offset; /* At which this region starts, from the beginning of the MTD */
|
2008-12-10 17:08:12 +03:00
|
|
|
uint32_t erasesize; /* For this region */
|
|
|
|
uint32_t numblocks; /* Number of blocks of erasesize in this region */
|
2007-03-27 09:45:43 +04:00
|
|
|
unsigned long *lockmap; /* If keeping bitmap of locks */
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2006-05-29 05:26:58 +04:00
|
|
|
/**
|
|
|
|
* struct mtd_oob_ops - oob operation operands
|
|
|
|
* @mode: operation mode
|
|
|
|
*
|
2006-11-03 18:20:38 +03:00
|
|
|
* @len: number of data bytes to write/read
|
2006-05-29 05:26:58 +04:00
|
|
|
*
|
2006-11-03 18:20:38 +03:00
|
|
|
* @retlen: number of data bytes written/read
|
2006-05-29 05:26:58 +04:00
|
|
|
*
|
2006-11-03 18:20:38 +03:00
|
|
|
* @ooblen: number of oob bytes to write/read
|
|
|
|
* @oobretlen: number of oob bytes written/read
|
2006-05-29 05:26:58 +04:00
|
|
|
* @ooboffs: offset of oob data in the oob area (only relevant when
|
2011-08-31 05:45:44 +04:00
|
|
|
* mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
|
2006-05-29 05:26:58 +04:00
|
|
|
* @datbuf: data buffer - if NULL only oob data are read/written
|
|
|
|
* @oobbuf: oob data buffer
|
2007-01-31 12:43:13 +03:00
|
|
|
*
|
2008-10-16 21:02:37 +04:00
|
|
|
* Note, it is allowed to read more than one OOB area at one go, but not write.
|
2007-01-31 12:43:13 +03:00
|
|
|
* The interface assumes that the OOB write requests program only one page's
|
|
|
|
* OOB area.
|
2006-05-29 05:26:58 +04:00
|
|
|
*/
|
|
|
|
struct mtd_oob_ops {
|
2011-08-31 05:45:39 +04:00
|
|
|
unsigned int mode;
|
2006-05-29 05:26:58 +04:00
|
|
|
size_t len;
|
|
|
|
size_t retlen;
|
|
|
|
size_t ooblen;
|
2006-11-03 18:20:38 +03:00
|
|
|
size_t oobretlen;
|
2006-05-29 05:26:58 +04:00
|
|
|
uint32_t ooboffs;
|
|
|
|
uint8_t *datbuf;
|
|
|
|
uint8_t *oobbuf;
|
|
|
|
};
|
|
|
|
|
2010-08-25 05:12:00 +04:00
|
|
|
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
|
2012-10-09 14:44:53 +04:00
|
|
|
#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
|
2016-02-04 11:52:30 +03:00
|
|
|
/**
|
|
|
|
* struct mtd_oob_region - oob region definition
|
|
|
|
* @offset: region offset
|
|
|
|
* @length: region length
|
|
|
|
*
|
|
|
|
* This structure describes a region of the OOB area, and is used
|
|
|
|
* to retrieve ECC or free bytes sections.
|
|
|
|
* Each section is defined by an offset within the OOB area and a
|
|
|
|
* length.
|
|
|
|
*/
|
|
|
|
struct mtd_oob_region {
|
|
|
|
u32 offset;
|
|
|
|
u32 length;
|
|
|
|
};
|
|
|
|
|
2016-02-03 21:01:31 +03:00
|
|
|
/*
|
|
|
|
* struct mtd_ooblayout_ops - NAND OOB layout operations
|
|
|
|
* @ecc: function returning an ECC region in the OOB area.
|
|
|
|
* Should return -ERANGE if %section exceeds the total number of
|
|
|
|
* ECC sections.
|
|
|
|
* @free: function returning a free region in the OOB area.
|
|
|
|
* Should return -ERANGE if %section exceeds the total number of
|
|
|
|
* free sections.
|
|
|
|
*/
|
|
|
|
struct mtd_ooblayout_ops {
|
|
|
|
int (*ecc)(struct mtd_info *mtd, int section,
|
|
|
|
struct mtd_oob_region *oobecc);
|
|
|
|
int (*free)(struct mtd_info *mtd, int section,
|
|
|
|
struct mtd_oob_region *oobfree);
|
|
|
|
};
|
|
|
|
|
2015-11-16 17:53:13 +03:00
|
|
|
/**
|
|
|
|
* struct mtd_pairing_info - page pairing information
|
|
|
|
*
|
|
|
|
* @pair: pair id
|
|
|
|
* @group: group id
|
|
|
|
*
|
|
|
|
* The term "pair" is used here, even though TLC NANDs might group pages by 3
|
|
|
|
* (3 bits in a single cell). A pair should regroup all pages that are sharing
|
|
|
|
* the same cell. Pairs are then indexed in ascending order.
|
|
|
|
*
|
|
|
|
* @group is defining the position of a page in a given pair. It can also be
|
|
|
|
* seen as the bit position in the cell: page attached to bit 0 belongs to
|
|
|
|
* group 0, page attached to bit 1 belongs to group 1, etc.
|
|
|
|
*
|
|
|
|
* Example:
|
|
|
|
* The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
|
|
|
|
*
|
|
|
|
* group-0 group-1
|
|
|
|
*
|
|
|
|
* pair-0 page-0 page-4
|
|
|
|
* pair-1 page-1 page-5
|
|
|
|
* pair-2 page-2 page-8
|
|
|
|
* ...
|
|
|
|
* pair-127 page-251 page-255
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Note that the "group" and "pair" terms were extracted from Samsung and
|
|
|
|
* Hynix datasheets, and might be referenced under other names in other
|
|
|
|
* datasheets (Micron is describing this concept as "shared pages").
|
|
|
|
*/
|
|
|
|
struct mtd_pairing_info {
|
|
|
|
int pair;
|
|
|
|
int group;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mtd_pairing_scheme - page pairing scheme description
|
|
|
|
*
|
|
|
|
* @ngroups: number of groups. Should be related to the number of bits
|
|
|
|
* per cell.
|
|
|
|
* @get_info: converts a write-unit (page number within an erase block) into
|
|
|
|
* mtd_pairing information (pair + group). This function should
|
|
|
|
* fill the info parameter based on the wunit index or return
|
|
|
|
* -EINVAL if the wunit parameter is invalid.
|
|
|
|
* @get_wunit: converts pairing information into a write-unit (page) number.
|
|
|
|
* This function should return the wunit index pointed by the
|
|
|
|
* pairing information described in the info argument. It should
|
|
|
|
* return -EINVAL, if there's no wunit corresponding to the
|
|
|
|
* passed pairing information.
|
|
|
|
*
|
|
|
|
* See mtd_pairing_info documentation for a detailed explanation of the
|
|
|
|
* pair and group concepts.
|
|
|
|
*
|
|
|
|
* The mtd_pairing_scheme structure provides a generic solution to represent
|
|
|
|
* NAND page pairing scheme. Instead of exposing two big tables to do the
|
|
|
|
* write-unit <-> (pair + group) conversions, we ask the MTD drivers to
|
|
|
|
* implement the ->get_info() and ->get_wunit() functions.
|
|
|
|
*
|
|
|
|
* MTD users will then be able to query these information by using the
|
|
|
|
* mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
|
|
|
|
*
|
|
|
|
* @ngroups is here to help MTD users iterating over all the pages in a
|
|
|
|
* given pair. This value can be retrieved by MTD users using the
|
|
|
|
* mtd_pairing_groups() helper.
|
|
|
|
*
|
|
|
|
* Examples are given in the mtd_pairing_info_to_wunit() and
|
|
|
|
* mtd_wunit_to_pairing_info() documentation.
|
|
|
|
*/
|
|
|
|
struct mtd_pairing_scheme {
|
|
|
|
int ngroups;
|
|
|
|
int (*get_info)(struct mtd_info *mtd, int wunit,
|
|
|
|
struct mtd_pairing_info *info);
|
|
|
|
int (*get_wunit)(struct mtd_info *mtd,
|
|
|
|
const struct mtd_pairing_info *info);
|
|
|
|
};
|
|
|
|
|
2011-05-26 21:46:22 +04:00
|
|
|
struct module; /* only needed for owner field in mtd_info */
|
|
|
|
|
2017-05-29 14:38:41 +03:00
|
|
|
/**
|
|
|
|
* struct mtd_debug_info - debugging information for an MTD device.
|
|
|
|
*
|
|
|
|
* @dfs_dir: direntry object of the MTD device debugfs directory
|
|
|
|
*/
|
|
|
|
struct mtd_debug_info {
|
|
|
|
struct dentry *dfs_dir;
|
|
|
|
};
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
struct mtd_info {
|
|
|
|
u_char type;
|
2008-12-10 17:08:12 +03:00
|
|
|
uint32_t flags;
|
2008-12-10 16:37:21 +03:00
|
|
|
uint64_t size; // Total size of the MTD
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-05-14 04:51:54 +04:00
|
|
|
/* "Major" erase size for the device. Naïve users may take this
|
2005-04-17 02:20:36 +04:00
|
|
|
* to be the only erase size available, or may use the more detailed
|
|
|
|
* information below if they desire
|
|
|
|
*/
|
2008-12-10 17:08:12 +03:00
|
|
|
uint32_t erasesize;
|
2006-06-14 19:53:44 +04:00
|
|
|
/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
|
|
|
|
* though individual bits can be cleared), in case of NAND flash it is
|
|
|
|
* one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
|
|
|
|
* it is of ECC block size, etc. It is illegal to have writesize = 0.
|
|
|
|
* Any driver registering a struct mtd_info must ensure a writesize of
|
|
|
|
* 1 or larger.
|
2006-05-23 01:18:05 +04:00
|
|
|
*/
|
2008-12-10 17:08:12 +03:00
|
|
|
uint32_t writesize;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-12-17 01:42:14 +03:00
|
|
|
/*
|
|
|
|
* Size of the write buffer used by the MTD. MTD devices having a write
|
|
|
|
* buffer can write multiple writesize chunks at a time. E.g. while
|
|
|
|
* writing 4 * writesize bytes to a device with 2 * writesize bytes
|
|
|
|
* buffer the MTD driver can (but doesn't have to) do 2 writesize
|
|
|
|
* operations, but not 4. Currently, all NANDs have writebufsize
|
|
|
|
* equivalent to writesize (NAND page size). Some NOR flashes do have
|
|
|
|
* writebufsize greater than writesize.
|
|
|
|
*/
|
|
|
|
uint32_t writebufsize;
|
|
|
|
|
2008-12-10 17:08:12 +03:00
|
|
|
uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
|
|
|
|
uint32_t oobavail; // Available OOB bytes per block
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-12-10 16:37:21 +03:00
|
|
|
/*
|
|
|
|
* If erasesize is a power of 2 then the shift is stored in
|
|
|
|
* erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
|
|
|
|
*/
|
|
|
|
unsigned int erasesize_shift;
|
|
|
|
unsigned int writesize_shift;
|
|
|
|
/* Masks based on erasesize_shift and writesize_shift */
|
|
|
|
unsigned int erasesize_mask;
|
|
|
|
unsigned int writesize_mask;
|
2005-08-06 08:40:46 +04:00
|
|
|
|
2012-04-25 23:06:08 +04:00
|
|
|
/*
|
|
|
|
* read ops return -EUCLEAN if max number of bitflips corrected on any
|
|
|
|
* one region comprising an ecc step equals or exceeds this value.
|
|
|
|
* Settable by driver, else defaults to ecc_strength. User can override
|
|
|
|
* in sysfs. N.B. The meaning of the -EUCLEAN return code has changed;
|
|
|
|
* see Documentation/ABI/testing/sysfs-class-mtd for more detail.
|
|
|
|
*/
|
|
|
|
unsigned int bitflip_threshold;
|
|
|
|
|
2017-09-23 23:13:14 +03:00
|
|
|
/* Kernel-only stuff starts here. */
|
2008-07-02 23:46:22 +04:00
|
|
|
const char *name;
|
2005-04-17 02:20:36 +04:00
|
|
|
int index;
|
|
|
|
|
2016-02-03 21:01:31 +03:00
|
|
|
/* OOB layout description */
|
|
|
|
const struct mtd_ooblayout_ops *ooblayout;
|
|
|
|
|
2015-11-16 17:53:13 +03:00
|
|
|
/* NAND pairing scheme, only provided for MLC/TLC NANDs */
|
|
|
|
const struct mtd_pairing_scheme *pairing;
|
|
|
|
|
2013-08-16 06:10:04 +04:00
|
|
|
/* the ecc step size. */
|
|
|
|
unsigned int ecc_step_size;
|
|
|
|
|
2012-04-25 23:06:05 +04:00
|
|
|
/* max number of correctible bit errors per ecc step */
|
2012-03-12 01:21:10 +04:00
|
|
|
unsigned int ecc_strength;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Data for variable erase regions. If numeraseregions is zero,
|
2005-11-07 14:15:31 +03:00
|
|
|
* it means that the whole device has erasesize as given above.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
int numeraseregions;
|
2005-11-07 14:15:31 +03:00
|
|
|
struct mtd_erase_region_info *eraseregions;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-09-21 17:41:44 +04:00
|
|
|
/*
|
2011-12-23 17:25:39 +04:00
|
|
|
* Do not call via these pointers, use corresponding mtd_*()
|
|
|
|
* wrappers instead.
|
2007-09-21 17:41:44 +04:00
|
|
|
*/
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
|
|
|
|
int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
|
|
|
|
size_t *retlen, void **virt, resource_size_t *phys);
|
2012-02-03 15:20:43 +04:00
|
|
|
int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
|
|
|
|
size_t *retlen, u_char *buf);
|
|
|
|
int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
size_t *retlen, const u_char *buf);
|
|
|
|
int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
size_t *retlen, const u_char *buf);
|
|
|
|
int (*_read_oob) (struct mtd_info *mtd, loff_t from,
|
2011-12-23 20:29:55 +04:00
|
|
|
struct mtd_oob_ops *ops);
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_write_oob) (struct mtd_info *mtd, loff_t to,
|
|
|
|
struct mtd_oob_ops *ops);
|
2014-01-28 12:29:44 +04:00
|
|
|
int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
|
|
|
|
size_t *retlen, struct otp_info *buf);
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
|
|
|
|
size_t len, size_t *retlen, u_char *buf);
|
2014-01-28 12:29:44 +04:00
|
|
|
int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
|
|
|
|
size_t *retlen, struct otp_info *buf);
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
|
|
|
|
size_t len, size_t *retlen, u_char *buf);
|
|
|
|
int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
|
|
|
|
size_t len, size_t *retlen, u_char *buf);
|
|
|
|
int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
|
|
|
|
size_t len);
|
|
|
|
int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
|
2011-12-23 20:59:12 +04:00
|
|
|
unsigned long count, loff_t to, size_t *retlen);
|
2012-01-30 16:58:32 +04:00
|
|
|
void (*_sync) (struct mtd_info *mtd);
|
|
|
|
int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
|
|
|
int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
|
|
|
int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
2014-05-22 02:06:12 +04:00
|
|
|
int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
|
|
|
|
int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
|
2017-01-10 22:30:17 +03:00
|
|
|
int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_suspend) (struct mtd_info *mtd);
|
|
|
|
void (*_resume) (struct mtd_info *mtd);
|
2014-11-26 12:01:08 +03:00
|
|
|
void (*_reboot) (struct mtd_info *mtd);
|
2011-12-29 13:06:10 +04:00
|
|
|
/*
|
|
|
|
* If the driver is something smart, like UBI, it may need to maintain
|
|
|
|
* its own reference counting. The below functions are only for driver.
|
|
|
|
*/
|
2012-01-30 16:58:32 +04:00
|
|
|
int (*_get_device) (struct mtd_info *mtd);
|
|
|
|
void (*_put_device) (struct mtd_info *mtd);
|
2009-02-12 13:40:00 +03:00
|
|
|
|
2005-04-01 05:59:56 +04:00
|
|
|
struct notifier_block reboot_notifier; /* default mode before reboot */
|
|
|
|
|
2006-05-25 11:57:31 +04:00
|
|
|
/* ECC status information */
|
|
|
|
struct mtd_ecc_stats ecc_stats;
|
2006-09-28 17:38:36 +04:00
|
|
|
/* Subpage shift (NAND) */
|
|
|
|
int subpage_sft;
|
2006-05-25 11:57:31 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
void *priv;
|
|
|
|
|
|
|
|
struct module *owner;
|
2009-03-26 10:42:41 +03:00
|
|
|
struct device dev;
|
2005-04-17 02:20:36 +04:00
|
|
|
int usecount;
|
2017-05-29 14:38:41 +03:00
|
|
|
struct mtd_debug_info dbg;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2016-02-04 11:52:30 +03:00
|
|
|
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
|
|
struct mtd_oob_region *oobecc);
|
|
|
|
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
|
|
|
|
int *section,
|
|
|
|
struct mtd_oob_region *oobregion);
|
|
|
|
int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
|
|
|
|
const u8 *oobbuf, int start, int nbytes);
|
|
|
|
int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
|
|
|
|
u8 *oobbuf, int start, int nbytes);
|
|
|
|
int mtd_ooblayout_free(struct mtd_info *mtd, int section,
|
|
|
|
struct mtd_oob_region *oobfree);
|
|
|
|
int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
|
|
|
|
const u8 *oobbuf, int start, int nbytes);
|
|
|
|
int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
|
|
|
|
u8 *oobbuf, int start, int nbytes);
|
|
|
|
int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
|
|
|
|
int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
|
|
|
|
|
2016-02-03 21:01:31 +03:00
|
|
|
static inline void mtd_set_ooblayout(struct mtd_info *mtd,
|
|
|
|
const struct mtd_ooblayout_ops *ooblayout)
|
2016-02-03 20:53:44 +03:00
|
|
|
{
|
2016-02-03 21:01:31 +03:00
|
|
|
mtd->ooblayout = ooblayout;
|
2016-02-03 20:53:44 +03:00
|
|
|
}
|
|
|
|
|
2015-11-16 17:53:13 +03:00
|
|
|
static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
|
|
|
|
const struct mtd_pairing_scheme *pairing)
|
|
|
|
{
|
|
|
|
mtd->pairing = pairing;
|
|
|
|
}
|
|
|
|
|
2015-10-31 06:33:20 +03:00
|
|
|
static inline void mtd_set_of_node(struct mtd_info *mtd,
|
|
|
|
struct device_node *np)
|
|
|
|
{
|
|
|
|
mtd->dev.of_node = np;
|
2017-02-09 12:21:07 +03:00
|
|
|
if (!mtd->name)
|
|
|
|
of_property_read_string(np, "label", &mtd->name);
|
2015-10-31 06:33:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
|
|
|
|
{
|
2017-03-31 12:11:48 +03:00
|
|
|
return dev_of_node(&mtd->dev);
|
2015-10-31 06:33:20 +03:00
|
|
|
}
|
|
|
|
|
2016-03-07 12:46:52 +03:00
|
|
|
static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
|
|
|
|
{
|
|
|
|
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
|
|
|
|
}
|
|
|
|
|
2017-01-10 22:30:17 +03:00
|
|
|
static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
|
|
|
|
loff_t ofs, size_t len)
|
|
|
|
{
|
|
|
|
if (!mtd->_max_bad_blocks)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
|
|
if (mtd->size < (len + ofs) || ofs < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return mtd->_max_bad_blocks(mtd, ofs, len);
|
|
|
|
}
|
|
|
|
|
2015-11-16 17:53:13 +03:00
|
|
|
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
|
|
|
|
struct mtd_pairing_info *info);
|
|
|
|
int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
|
|
|
|
const struct mtd_pairing_info *info);
|
|
|
|
int mtd_pairing_groups(struct mtd_info *mtd);
|
2012-02-03 16:34:14 +04:00
|
|
|
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
|
|
|
|
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
|
|
|
|
void **virt, resource_size_t *phys);
|
|
|
|
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
|
|
|
|
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
|
|
|
|
unsigned long offset, unsigned long flags);
|
|
|
|
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
|
|
|
|
u_char *buf);
|
|
|
|
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
|
|
|
|
const u_char *buf);
|
|
|
|
int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
|
|
|
|
const u_char *buf);
|
2011-12-23 20:03:17 +04:00
|
|
|
|
2012-06-23 03:35:38 +04:00
|
|
|
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
|
2016-04-12 23:46:39 +03:00
|
|
|
int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
|
2011-12-23 20:29:55 +04:00
|
|
|
|
2014-01-28 12:29:44 +04:00
|
|
|
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
|
|
|
|
struct otp_info *buf);
|
2012-02-08 18:37:14 +04:00
|
|
|
int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
|
|
|
|
size_t *retlen, u_char *buf);
|
2014-01-28 12:29:44 +04:00
|
|
|
int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
|
|
|
|
struct otp_info *buf);
|
2012-02-08 18:37:14 +04:00
|
|
|
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
|
|
|
|
size_t *retlen, u_char *buf);
|
|
|
|
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
size_t *retlen, u_char *buf);
|
|
|
|
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
|
2011-12-23 20:55:49 +04:00
|
|
|
|
2011-12-30 18:23:41 +04:00
|
|
|
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
|
|
|
|
unsigned long count, loff_t to, size_t *retlen);
|
2011-12-23 20:59:12 +04:00
|
|
|
|
2011-12-23 21:03:12 +04:00
|
|
|
static inline void mtd_sync(struct mtd_info *mtd)
|
|
|
|
{
|
2012-01-30 16:58:32 +04:00
|
|
|
if (mtd->_sync)
|
|
|
|
mtd->_sync(mtd);
|
2011-12-23 21:03:12 +04:00
|
|
|
}
|
|
|
|
|
2012-02-03 16:34:14 +04:00
|
|
|
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
|
|
|
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
|
|
|
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
|
2014-05-22 02:06:12 +04:00
|
|
|
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
|
2012-02-03 16:34:14 +04:00
|
|
|
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
|
|
|
|
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
|
2011-12-23 21:21:16 +04:00
|
|
|
|
2011-12-23 21:25:16 +04:00
|
|
|
static inline int mtd_suspend(struct mtd_info *mtd)
|
|
|
|
{
|
2012-01-30 16:58:32 +04:00
|
|
|
return mtd->_suspend ? mtd->_suspend(mtd) : 0;
|
2011-12-23 21:25:16 +04:00
|
|
|
}
|
|
|
|
|
2011-12-23 21:31:25 +04:00
|
|
|
static inline void mtd_resume(struct mtd_info *mtd)
|
|
|
|
{
|
2012-01-30 16:58:32 +04:00
|
|
|
if (mtd->_resume)
|
|
|
|
mtd->_resume(mtd);
|
2011-12-23 21:31:25 +04:00
|
|
|
}
|
|
|
|
|
2008-12-10 17:08:12 +03:00
|
|
|
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
|
2008-12-10 16:37:21 +03:00
|
|
|
{
|
|
|
|
if (mtd->erasesize_shift)
|
|
|
|
return sz >> mtd->erasesize_shift;
|
|
|
|
do_div(sz, mtd->erasesize);
|
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
2008-12-10 17:08:12 +03:00
|
|
|
static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
|
2008-12-10 16:37:21 +03:00
|
|
|
{
|
|
|
|
if (mtd->erasesize_shift)
|
|
|
|
return sz & mtd->erasesize_mask;
|
|
|
|
return do_div(sz, mtd->erasesize);
|
|
|
|
}
|
|
|
|
|
2017-12-15 15:39:52 +03:00
|
|
|
/**
|
|
|
|
* mtd_align_erase_req - Adjust an erase request to align things on eraseblock
|
|
|
|
* boundaries.
|
|
|
|
* @mtd: the MTD device this erase request applies on
|
|
|
|
* @req: the erase request to adjust
|
|
|
|
*
|
|
|
|
* This function will adjust @req->addr and @req->len to align them on
|
|
|
|
* @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
|
|
|
|
*/
|
|
|
|
static inline void mtd_align_erase_req(struct mtd_info *mtd,
|
|
|
|
struct erase_info *req)
|
|
|
|
{
|
|
|
|
u32 mod;
|
|
|
|
|
|
|
|
if (WARN_ON(!mtd->erasesize))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mod = mtd_mod_by_eb(req->addr, mtd);
|
|
|
|
if (mod) {
|
|
|
|
req->addr -= mod;
|
|
|
|
req->len += mod;
|
|
|
|
}
|
|
|
|
|
|
|
|
mod = mtd_mod_by_eb(req->addr + req->len, mtd);
|
|
|
|
if (mod)
|
|
|
|
req->len += mtd->erasesize - mod;
|
|
|
|
}
|
|
|
|
|
2008-12-10 17:08:12 +03:00
|
|
|
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
|
2008-12-10 16:37:21 +03:00
|
|
|
{
|
|
|
|
if (mtd->writesize_shift)
|
|
|
|
return sz >> mtd->writesize_shift;
|
|
|
|
do_div(sz, mtd->writesize);
|
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
2008-12-10 17:08:12 +03:00
|
|
|
static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
|
2008-12-10 16:37:21 +03:00
|
|
|
{
|
|
|
|
if (mtd->writesize_shift)
|
|
|
|
return sz & mtd->writesize_mask;
|
|
|
|
return do_div(sz, mtd->writesize);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-11-16 17:53:13 +03:00
|
|
|
static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
return mtd->erasesize / mtd->writesize;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
|
|
|
|
{
|
|
|
|
return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
|
|
|
|
int wunit)
|
|
|
|
{
|
|
|
|
return base + (wunit * mtd->writesize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-28 20:35:07 +04:00
|
|
|
static inline int mtd_has_oob(const struct mtd_info *mtd)
|
|
|
|
{
|
2012-01-30 16:58:32 +04:00
|
|
|
return mtd->_read_oob && mtd->_write_oob;
|
2011-12-28 20:35:07 +04:00
|
|
|
}
|
|
|
|
|
2013-09-25 10:58:17 +04:00
|
|
|
static inline int mtd_type_is_nand(const struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
|
|
|
|
}
|
|
|
|
|
2012-01-02 15:48:54 +04:00
|
|
|
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
|
|
|
|
{
|
2012-01-30 16:58:32 +04:00
|
|
|
return !!mtd->_block_isbad;
|
2012-01-02 15:48:54 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Kernel-side ioctl definitions */
|
|
|
|
|
2011-05-23 20:15:46 +04:00
|
|
|
struct mtd_partition;
|
2011-06-10 18:18:28 +04:00
|
|
|
struct mtd_part_parser_data;
|
2011-05-23 20:15:46 +04:00
|
|
|
|
2011-03-25 22:26:25 +03:00
|
|
|
extern int mtd_device_parse_register(struct mtd_info *mtd,
|
2013-03-11 17:38:48 +04:00
|
|
|
const char * const *part_probe_types,
|
|
|
|
struct mtd_part_parser_data *parser_data,
|
|
|
|
const struct mtd_partition *defparts,
|
|
|
|
int defnr_parts);
|
2011-06-23 15:33:15 +04:00
|
|
|
#define mtd_device_register(master, parts, nr_parts) \
|
|
|
|
mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
|
2011-05-23 20:15:46 +04:00
|
|
|
extern int mtd_device_unregister(struct mtd_info *master);
|
2005-04-17 02:20:36 +04:00
|
|
|
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
|
2010-02-22 21:39:28 +03:00
|
|
|
extern int __get_mtd_device(struct mtd_info *mtd);
|
|
|
|
extern void __put_mtd_device(struct mtd_info *mtd);
|
2006-10-11 15:52:44 +04:00
|
|
|
extern struct mtd_info *get_mtd_device_nm(const char *name);
|
2005-04-17 02:20:36 +04:00
|
|
|
extern void put_mtd_device(struct mtd_info *mtd);
|
|
|
|
|
|
|
|
|
|
|
|
struct mtd_notifier {
|
|
|
|
void (*add)(struct mtd_info *mtd);
|
|
|
|
void (*remove)(struct mtd_info *mtd);
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
extern void register_mtd_user (struct mtd_notifier *new);
|
|
|
|
extern int unregister_mtd_user (struct mtd_notifier *old);
|
mtd: create function to perform large allocations
Introduce a common function to handle large, contiguous kmalloc buffer
allocations by exponentially backing off on the size of the requested
kernel transfer buffer until it succeeds or until the requested
transfer buffer size falls below the page size.
This helps ensure the operation can succeed under low-memory, highly-
fragmented situations albeit somewhat more slowly.
Artem: so this patch solves the problem that the kernel tries to kmalloc too
large buffers, which (a) may fail and does fail - people complain about this,
and (b) slows down the system in case of high memory fragmentation, because
the kernel starts dropping caches, writing back, swapping, etc. But we do not
really have to allocate a lot of memory to do the I/O, we may do this even with
as little as one min. I/O unit (NAND page) of RAM. So the idea of this patch is
that if the user asks to read or write a lot, we try to kmalloc a lot, with GFP
flags which make the kernel _not_ drop caches, etc. If we can allocate it - good,
if not - we try to allocate twice as less, and so on, until we reach the min.
I/O unit size, which is our last resort allocation and use the normal
GFP_KERNEL flag.
Artem: re-write the allocation function so that it makes sure the allocated
buffer is aligned to the min. I/O size of the flash.
Signed-off-by: Grant Erickson <marathon96@gmail.com>
Tested-by: Ben Gardiner <bengardiner@nanometrics.ca>
Tested-by: Stefano Babic <sbabic@denx.de>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-04-08 19:51:32 +04:00
|
|
|
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
|
|
|
|
|
2011-09-21 05:30:51 +04:00
|
|
|
static inline int mtd_is_bitflip(int err) {
|
|
|
|
return err == -EUCLEAN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mtd_is_eccerr(int err) {
|
|
|
|
return err == -EBADMSG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mtd_is_bitflip_or_eccerr(int err) {
|
|
|
|
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-01-14 12:42:32 +03:00
|
|
|
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* __MTD_MTD_H__ */
|