2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2007-02-28 17:33:10 +03:00
|
|
|
* linux/include/linux/mmc/core.h
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
2007-02-28 17:33:10 +03:00
|
|
|
#ifndef LINUX_MMC_CORE_H
|
|
|
|
#define LINUX_MMC_CORE_H
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-01-30 20:46:54 +04:00
|
|
|
#include <linux/completion.h>
|
2017-01-13 16:14:06 +03:00
|
|
|
#include <linux/types.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
struct mmc_data;
|
|
|
|
struct mmc_request;
|
|
|
|
|
2016-11-04 13:05:19 +03:00
|
|
|
enum mmc_blk_status {
|
|
|
|
MMC_BLK_SUCCESS = 0,
|
|
|
|
MMC_BLK_PARTIAL,
|
|
|
|
MMC_BLK_CMD_ERR,
|
|
|
|
MMC_BLK_RETRY,
|
|
|
|
MMC_BLK_ABORT,
|
|
|
|
MMC_BLK_DATA_ERR,
|
|
|
|
MMC_BLK_ECC_ERR,
|
|
|
|
MMC_BLK_NOMEDIUM,
|
|
|
|
MMC_BLK_NEW_REQUEST,
|
|
|
|
};
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
struct mmc_command {
|
|
|
|
u32 opcode;
|
|
|
|
u32 arg;
|
2013-02-06 12:02:46 +04:00
|
|
|
#define MMC_CMD23_ARG_REL_WR (1 << 31)
|
|
|
|
#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
|
|
|
|
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
|
2005-04-17 02:20:36 +04:00
|
|
|
u32 resp[4];
|
|
|
|
unsigned int flags; /* expected response type */
|
2006-02-02 15:23:12 +03:00
|
|
|
#define MMC_RSP_PRESENT (1 << 0)
|
|
|
|
#define MMC_RSP_136 (1 << 1) /* 136 bit response */
|
|
|
|
#define MMC_RSP_CRC (1 << 2) /* expect valid crc */
|
|
|
|
#define MMC_RSP_BUSY (1 << 3) /* card may send busy */
|
|
|
|
#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */
|
2007-08-08 20:09:01 +04:00
|
|
|
|
|
|
|
#define MMC_CMD_MASK (3 << 5) /* non-SPI command type */
|
2006-02-02 15:23:12 +03:00
|
|
|
#define MMC_CMD_AC (0 << 5)
|
|
|
|
#define MMC_CMD_ADTC (1 << 5)
|
|
|
|
#define MMC_CMD_BC (2 << 5)
|
|
|
|
#define MMC_CMD_BCR (3 << 5)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-08-08 20:09:01 +04:00
|
|
|
#define MMC_RSP_SPI_S1 (1 << 7) /* one status byte */
|
|
|
|
#define MMC_RSP_SPI_S2 (1 << 8) /* second byte */
|
|
|
|
#define MMC_RSP_SPI_B4 (1 << 9) /* four data bytes */
|
|
|
|
#define MMC_RSP_SPI_BUSY (1 << 10) /* card may send busy */
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2007-08-08 20:09:01 +04:00
|
|
|
* These are the native response types, and correspond to valid bit
|
2005-04-17 02:20:36 +04:00
|
|
|
* patterns of the above flags. One additional valid pattern
|
|
|
|
* is all zeros, which means we don't expect a response.
|
|
|
|
*/
|
2006-02-02 15:23:12 +03:00
|
|
|
#define MMC_RSP_NONE (0)
|
|
|
|
#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
|
|
|
#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
|
|
|
|
#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
|
|
|
|
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
|
2007-05-21 22:23:20 +04:00
|
|
|
#define MMC_RSP_R4 (MMC_RSP_PRESENT)
|
2007-05-22 22:25:21 +04:00
|
|
|
#define MMC_RSP_R5 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
2007-01-04 18:04:47 +03:00
|
|
|
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
2007-01-04 17:57:32 +03:00
|
|
|
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
|
2006-02-02 15:23:12 +03:00
|
|
|
|
2016-09-19 23:57:45 +03:00
|
|
|
/* Can be used by core to poll after switch to MMC HS mode */
|
|
|
|
#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
|
|
|
|
|
2006-02-02 15:23:12 +03:00
|
|
|
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
|
|
|
|
|
2007-08-08 20:09:01 +04:00
|
|
|
/*
|
|
|
|
* These are the SPI response types for MMC, SD, and SDIO cards.
|
|
|
|
* Commands return R1, with maybe more info. Zero is an error type;
|
|
|
|
* callers must always provide the appropriate MMC_RSP_SPI_Rx flags.
|
|
|
|
*/
|
|
|
|
#define MMC_RSP_SPI_R1 (MMC_RSP_SPI_S1)
|
|
|
|
#define MMC_RSP_SPI_R1B (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY)
|
|
|
|
#define MMC_RSP_SPI_R2 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2)
|
|
|
|
#define MMC_RSP_SPI_R3 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
|
|
|
|
#define MMC_RSP_SPI_R4 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
|
|
|
|
#define MMC_RSP_SPI_R5 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2)
|
|
|
|
#define MMC_RSP_SPI_R7 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
|
|
|
|
|
|
|
|
#define mmc_spi_resp_type(cmd) ((cmd)->flags & \
|
|
|
|
(MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY|MMC_RSP_SPI_S2|MMC_RSP_SPI_B4))
|
|
|
|
|
2006-02-02 15:23:12 +03:00
|
|
|
/*
|
|
|
|
* These are the command types.
|
|
|
|
*/
|
2006-02-17 23:23:29 +03:00
|
|
|
#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
unsigned int retries; /* max number of retries */
|
2015-03-31 13:41:55 +03:00
|
|
|
int error; /* command error */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-07-23 00:18:46 +04:00
|
|
|
/*
|
|
|
|
* Standard errno values are used for errors, but some have specific
|
|
|
|
* meaning in the MMC layer:
|
|
|
|
*
|
|
|
|
* ETIMEDOUT Card took too long to respond
|
|
|
|
* EILSEQ Basic format problem with the received or sent data
|
|
|
|
* (e.g. CRC check failed, incorrect opcode in response
|
|
|
|
* or bad end bit)
|
|
|
|
* EINVAL Request cannot be performed because of restrictions
|
|
|
|
* in hardware and/or the driver
|
|
|
|
* ENOMEDIUM Host can determine that the slot is empty and is
|
|
|
|
* actively failing requests
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-01-08 18:06:08 +04:00
|
|
|
unsigned int busy_timeout; /* busy detect timeout in ms */
|
2013-04-18 16:41:55 +04:00
|
|
|
/* Set this flag only for blocking sanitize request */
|
|
|
|
bool sanitize_busy;
|
mmc: add erase, secure erase, trim and secure trim operations
SD/MMC cards tend to support an erase operation. In addition, eMMC v4.4
cards can support secure erase, trim and secure trim operations that are
all variants of the basic erase command.
SD/MMC device attributes "erase_size" and "preferred_erase_size" have been
added.
"erase_size" is the minimum size, in bytes, of an erase operation. For
MMC, "erase_size" is the erase group size reported by the card. Note that
"erase_size" does not apply to trim or secure trim operations where the
minimum size is always one 512 byte sector. For SD, "erase_size" is 512
if the card is block-addressed, 0 otherwise.
SD/MMC cards can erase an arbitrarily large area up to and
including the whole card. When erasing a large area it may
be desirable to do it in smaller chunks for three reasons:
1. A single erase command will make all other I/O on the card
wait. This is not a problem if the whole card is being erased, but
erasing one partition will make I/O for another partition on the
same card wait for the duration of the erase - which could be a
several minutes.
2. To be able to inform the user of erase progress.
3. The erase timeout becomes too large to be very useful.
Because the erase timeout contains a margin which is multiplied by
the size of the erase area, the value can end up being several
minutes for large areas.
"erase_size" is not the most efficient unit to erase (especially for SD
where it is just one sector), hence "preferred_erase_size" provides a good
chunk size for erasing large areas.
For MMC, "preferred_erase_size" is the high-capacity erase size if a card
specifies one, otherwise it is based on the capacity of the card.
For SD, "preferred_erase_size" is the allocation unit size specified by
the card.
"preferred_erase_size" is in bytes.
Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Cc: Kyungmin Park <kmpark@infradead.org>
Cc: Madhusudhan Chikkature <madhu.cr@ti.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Gardiner <bengardiner@nanometrics.ca>
Cc: <linux-mmc@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-12 01:17:46 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
struct mmc_data *data; /* data segment associated with cmd */
|
2005-10-28 19:28:04 +04:00
|
|
|
struct mmc_request *mrq; /* associated request */
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mmc_data {
|
|
|
|
unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */
|
|
|
|
unsigned int timeout_clks; /* data timeout (in clocks) */
|
2006-05-20 00:48:03 +04:00
|
|
|
unsigned int blksz; /* data block size */
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned int blocks; /* number of blocks */
|
2017-08-10 15:08:07 +03:00
|
|
|
unsigned int blk_addr; /* block address */
|
2015-03-31 13:41:55 +03:00
|
|
|
int error; /* data error */
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned int flags;
|
|
|
|
|
2017-08-10 15:08:07 +03:00
|
|
|
#define MMC_DATA_WRITE BIT(8)
|
|
|
|
#define MMC_DATA_READ BIT(9)
|
|
|
|
/* Extra flags used by CQE */
|
|
|
|
#define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/
|
|
|
|
#define MMC_DATA_PRIO BIT(11) /* CQE high priority */
|
|
|
|
#define MMC_DATA_REL_WR BIT(12) /* Reliable write */
|
|
|
|
#define MMC_DATA_DAT_TAG BIT(13) /* Tag request */
|
|
|
|
#define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
unsigned int bytes_xfered;
|
|
|
|
|
|
|
|
struct mmc_command *stop; /* stop command */
|
2005-10-28 19:28:04 +04:00
|
|
|
struct mmc_request *mrq; /* associated request */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
unsigned int sg_len; /* size of scatter list */
|
2015-06-15 14:20:48 +03:00
|
|
|
int sg_count; /* mapped sg entries */
|
2005-04-17 02:20:36 +04:00
|
|
|
struct scatterlist *sg; /* I/O scatter list */
|
2011-07-01 20:55:22 +04:00
|
|
|
s32 host_cookie; /* host private data */
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2013-01-14 23:28:17 +04:00
|
|
|
struct mmc_host;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct mmc_request {
|
2011-05-24 00:06:36 +04:00
|
|
|
struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
|
2005-04-17 02:20:36 +04:00
|
|
|
struct mmc_command *cmd;
|
|
|
|
struct mmc_data *data;
|
|
|
|
struct mmc_command *stop;
|
|
|
|
|
2011-07-01 20:55:22 +04:00
|
|
|
struct completion completion;
|
mmc: core: Add support for sending commands during data transfer
A host controller driver exposes its capability using caps flag
MMC_CAP_CMD_DURING_TFR. A driver with that capability can accept requests
that are marked mrq->cap_cmd_during_tfr = true. Then the driver informs the
upper layers when the command line is available for further commands by
calling mmc_command_done(). Because of that, the driver will not then
automatically send STOP commands, and it is the responsibility of the upper
layer to send a STOP command if it is required.
For requests submitted through the mmc_wait_for_req() interface, the caller
sets mrq->cap_cmd_during_tfr = true which causes mmc_wait_for_req() in fact
not to wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete by calling
mmc_wait_for_req_done() which is now exported.
For requests submitted through the mmc_start_req() interface, the caller
again sets mrq->cap_cmd_during_tfr = true, but mmc_start_req() anyway does
not wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete in the
normal way i.e. calling mmc_start_req() again.
Irrespective of how a cap_cmd_during_tfr request is started,
mmc_is_req_done() can be called if the upper layer needs to determine if
the request is done. However the appropriate waiting function (either
mmc_wait_for_req_done() or mmc_start_req()) must still be called.
The implementation consists primarily of a new completion
mrq->cmd_completion which notifies when the command line is available for
further commands. That completion is completed by mmc_command_done().
When there is an ongoing data transfer, calls to mmc_wait_for_req() will
automatically wait on that completion, so the caller does not have to do
anything special.
Note, in the case of errors, the driver may call mmc_request_done() without
calling mmc_command_done() because mmc_request_done() always calls
mmc_command_done().
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-16 13:44:11 +03:00
|
|
|
struct completion cmd_completion;
|
2005-04-17 02:20:36 +04:00
|
|
|
void (*done)(struct mmc_request *);/* completion function */
|
2017-08-25 15:43:46 +03:00
|
|
|
/*
|
|
|
|
* Notify uppers layers (e.g. mmc block driver) that recovery is needed
|
|
|
|
* due to an error associated with the mmc_request. Currently used only
|
|
|
|
* by CQE.
|
|
|
|
*/
|
|
|
|
void (*recovery_notifier)(struct mmc_request *);
|
2013-01-14 23:28:17 +04:00
|
|
|
struct mmc_host *host;
|
mmc: core: Add support for sending commands during data transfer
A host controller driver exposes its capability using caps flag
MMC_CAP_CMD_DURING_TFR. A driver with that capability can accept requests
that are marked mrq->cap_cmd_during_tfr = true. Then the driver informs the
upper layers when the command line is available for further commands by
calling mmc_command_done(). Because of that, the driver will not then
automatically send STOP commands, and it is the responsibility of the upper
layer to send a STOP command if it is required.
For requests submitted through the mmc_wait_for_req() interface, the caller
sets mrq->cap_cmd_during_tfr = true which causes mmc_wait_for_req() in fact
not to wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete by calling
mmc_wait_for_req_done() which is now exported.
For requests submitted through the mmc_start_req() interface, the caller
again sets mrq->cap_cmd_during_tfr = true, but mmc_start_req() anyway does
not wait. The caller can then send commands that do not use the data
lines. Finally the caller can wait for the transfer to complete in the
normal way i.e. calling mmc_start_req() again.
Irrespective of how a cap_cmd_during_tfr request is started,
mmc_is_req_done() can be called if the upper layer needs to determine if
the request is done. However the appropriate waiting function (either
mmc_wait_for_req_done() or mmc_start_req()) must still be called.
The implementation consists primarily of a new completion
mrq->cmd_completion which notifies when the command line is available for
further commands. That completion is completed by mmc_command_done().
When there is an ongoing data transfer, calls to mmc_wait_for_req() will
automatically wait on that completion, so the caller does not have to do
anything special.
Note, in the case of errors, the driver may call mmc_request_done() without
calling mmc_command_done() because mmc_request_done() always calls
mmc_command_done().
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2016-08-16 13:44:11 +03:00
|
|
|
|
|
|
|
/* Allow other commands during this ongoing data transfer or busy wait */
|
|
|
|
bool cap_cmd_during_tfr;
|
2017-08-10 15:08:07 +03:00
|
|
|
|
|
|
|
int tag;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mmc_card;
|
|
|
|
|
2017-01-13 16:14:16 +03:00
|
|
|
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
|
|
|
|
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
|
|
|
|
int retries);
|
|
|
|
|
|
|
|
int mmc_hw_reset(struct mmc_host *host);
|
2018-04-05 14:42:00 +03:00
|
|
|
int mmc_sw_reset(struct mmc_host *host);
|
2017-01-13 16:14:16 +03:00
|
|
|
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
|
2007-06-30 18:21:52 +04:00
|
|
|
|
2011-05-28 00:04:03 +04:00
|
|
|
#endif /* LINUX_MMC_CORE_H */
|