MMC core:
- Add support to enable irq wake for slot gpio - Remove MMC_CAP2_HC_ERASE_SZ and make it the default behaviour - Improve R1 response error checks for stop commands - Cleanup and clarify some MMC specific code - Keep card runtime resumed while adding SDIO function devices - Use device_property_read instead of of_property_read in mmc_of_parse() - Move boot partition locking into a driver op to enable proper I/O scheduling - Move multi/single-ioctl() to use block layer to enable proper I/O scheduling - Delete bounce buffer Kconfig option - Improve the eMMC HW reset support provided via the eMMC pwrseq - Add host API to manage SDIO IRQs from a workqueue MMC host: - dw_mmc: Drop support for multiple slots - dw_mmc: Use device_property_read instead of of_property_read - dw_mmc-rockchip: Optional improved tuning to greatly decrease tuning time - dw_mmc: Prevent rpm suspend for SDIO IRQs instead of always for SDIO cards - dw_mmc: Convert to use MMC_CAP2_SDIO_IRQ_NOTHREAD for SDIO IRQs - omap_hsmmc: Convert to mmc regulator APIs to consolidate code - omap_hsmmc: Deprecate "vmmc_aux" in DT and use "vqmmc" instead - tmio: make sure SDIO gets reinitialized after resume - sdhi: add CMD23 support to R-Car Gen2 & Gen3 - tmio: add CMD23 support - sdhi/tmio: Refactor code and rename files to simplify Kconfig options - sdhci-pci: Enable card detect wake for Intel BYT-related SD controllers - sdhci-pci: Add support for Intel CNP - sdhci-esdhc-imx: Remove ENGcm07207 workaround - allow multi block transfers - sdhci-esdhc-imx: Allow all supported prescaler values - sdhci-esdhc-imx: Fix DAT line software reset - sdhci-esdhc: Add SDHCI_QUIRK_32BIT_DMA_ADDR - atmel-mci: Drop AVR32 support -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJZWg0DAAoJEP4mhCVzWIwpyaUQAKpJl/QF5S8E4XaM+GKhhQfz Af4EEGz/ySIJILCEe/A1W+xjTJh/n5jdTUTa197mDr136wp4UP8OUIhjtHHLY92E Q74+piB77wSP0zeGFuDmtY/BRmMsXS2GOK1DlLovsDnThVNe5vZQgdXMTpx8Cfau Bqea566L5EqGL4clOhYgmw3f9QVREDfqw2UahhrvTViSeJ2BZAYgl1lX/5YyX3vX TIIrnSllGGTYTFTB+TLPsH6ac21IDJvagXySyMBUTVfHau9AXo8tDH+WbwfVYLwH OmqmKaI6WT8z7oQy+WNC0P/r9GDRuqWAxdNPPF2PsRE+RbFeZVrBuN5brQxB+gMm IJCA1z7RgLOoiu8tVOj30GXmmMM54NTkD2SLnsWG99Edh7jQ0gCVE8TGnFbgi7jl 8o/N1bB5fOeOMojgoFrTZmRKwIw1nPGFTehfiOzoll1BQy4CRqj7ZM8ji3THz11B GrDMlbv+ocHRW9BVD0PNRUq+AVp5HRzV9actANArhhrHOwG/s9foVdEF+lVpSXty M8PImSDrJ2bNi3aqbdkJ4IFMlAcrHw7m6mBlwTF7uCfRtcacbAy+cxYYCqNh+EOt 5l4qwbU1knH+IEEhgeSL0QQ8QNJcfR9fygifPovLN5Jo2yr0ZBMVD3wKPkZgOIJ2 Lsqoo2URLwa0uE2VU8Nm =zJi3 -----END PGP SIGNATURE----- Merge tag 'mmc-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc Pull MMC updates from Ulf Hansson: "MMC core: - Add support to enable irq wake for slot gpio - Remove MMC_CAP2_HC_ERASE_SZ and make it the default behaviour - Improve R1 response error checks for stop commands - Cleanup and clarify some MMC specific code - Keep card runtime resumed while adding SDIO function devices - Use device_property_read instead of of_property_read in mmc_of_parse() - Move boot partition locking into a driver op to enable proper I/O scheduling - Move multi/single-ioctl() to use block layer to enable proper I/O scheduling - Delete bounce buffer Kconfig option - Improve the eMMC HW reset support provided via the eMMC pwrseq - Add host API to manage SDIO IRQs from a workqueue MMC host: - dw_mmc: Drop support for multiple slots - dw_mmc: Use device_property_read instead of of_property_read - dw_mmc-rockchip: Optional improved tuning to greatly decrease tuning time - dw_mmc: Prevent rpm suspend for SDIO IRQs instead of always for SDIO cards - dw_mmc: Convert to use MMC_CAP2_SDIO_IRQ_NOTHREAD for SDIO IRQs - omap_hsmmc: Convert to mmc regulator APIs to consolidate code - omap_hsmmc: Deprecate "vmmc_aux" in DT and use "vqmmc" instead - tmio: make sure SDIO gets reinitialized after resume - sdhi: add CMD23 support to R-Car Gen2 & Gen3 - tmio: add CMD23 support - sdhi/tmio: Refactor code and rename files to simplify Kconfig options - sdhci-pci: Enable card detect wake for Intel BYT-related SD controllers - sdhci-pci: Add support for Intel CNP - sdhci-esdhc-imx: Remove ENGcm07207 workaround - allow multi block transfers - sdhci-esdhc-imx: Allow all supported prescaler values - sdhci-esdhc-imx: Fix DAT line software reset - sdhci-esdhc: Add SDHCI_QUIRK_32BIT_DMA_ADDR - atmel-mci: Drop AVR32 support" * tag 'mmc-v4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (86 commits) mmc: dw_mmc: remove the unnecessary slot variable mmc: dw_mmc: use the 'slot' instead of 'cur_slot' mmc: dw_mmc: remove the 'id' arguments about functions relevant to slot mmc: dw_mmc: change the array of slots mmc: dw_mmc: remove the loop about finding slots mmc: dw_mmc: deprecated the "num-slots" property mmc: dw_mmc-rockchip: parse rockchip, desired-num-phases from DT dt-bindings: rockchip-dw-mshc: add optional rockchip, desired-num-phases mmc: renesas-sdhi: improve checkpatch cleanness mmc: tmio: improve checkpatch cleanness mmc: sdhci-pci: Enable card detect wake for Intel BYT-related SD controllers mmc: slot-gpio: Add support to enable irq wake on cd_irq mmc: core: Remove MMC_CAP2_HC_ERASE_SZ mmc: core: for data errors, take response of stop cmd into account mmc: core: check also R1 response for stop commands mmc: core: Clarify code for sending CSD mmc: core: Drop mmc_all_send_cid() and use mmc_send_cxd_native() instead mmc: core: Re-factor code for sending CID mmc: core: Remove redundant code in mmc_send_cid() mmc: core: Make mmc_can_reset() static ...
This commit is contained in:
Коммит
17ece345a0
|
@ -15,6 +15,7 @@ Required Properties:
|
|||
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
|
||||
- "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
|
||||
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
|
||||
- "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
|
||||
- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
|
||||
- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
|
||||
|
||||
|
@ -31,6 +32,10 @@ Optional Properties:
|
|||
probing, low speeds or in case where all phases work at tuning time.
|
||||
If not specified 0 deg will be used.
|
||||
|
||||
* rockchip,desired-num-phases: The desired number of times that the host
|
||||
execute tuning when needed. If not specified, the host will do tuning
|
||||
for 360 times, namely tuning for each degree.
|
||||
|
||||
Example:
|
||||
|
||||
rkdwmmc0@12200000 {
|
||||
|
|
|
@ -18,7 +18,7 @@ Required properties:
|
|||
Optional properties:
|
||||
ti,dual-volt: boolean, supports dual voltage cards
|
||||
<supply-name>-supply: phandle to the regulator device tree node
|
||||
"supply-name" examples are "vmmc", "vmmc_aux" etc
|
||||
"supply-name" examples are "vmmc", "vmmc_aux"(deprecated)/"vqmmc" etc
|
||||
ti,non-removable: non-removable slot (like eMMC)
|
||||
ti,needs-special-reset: Requires a special softreset sequence
|
||||
ti,needs-special-hs-handling: HSMMC IP needs special setting for handling High Speed
|
||||
|
|
|
@ -12915,7 +12915,7 @@ M: Wolfram Sang <wsa+renesas@sang-engineering.com>
|
|||
L: linux-mmc@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/mmc/host/tmio_mmc*
|
||||
F: drivers/mmc/host/sh_mobile_sdhi.c
|
||||
F: drivers/mmc/host/renesas_sdhi*
|
||||
F: include/linux/mfd/tmio.h
|
||||
|
||||
TMP401 HARDWARE MONITOR DRIVER
|
||||
|
|
|
@ -74,16 +74,6 @@
|
|||
gpio = <&gpio1 16 GPIO_ACTIVE_HIGH>; /* gpio_16: WiFi nReset */
|
||||
startup-delay-us = <10000>;
|
||||
};
|
||||
|
||||
/* Regulator to trigger the nReset signal of the Bluetooth module */
|
||||
w3cbw003c_bt_nreset: regulator-w3cbw003c-bt-nreset {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "regulator-w3cbw003c-bt-nreset";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* gpio_164: BT nReset */
|
||||
startup-delay-us = <10000>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap3_pmx_core {
|
||||
|
@ -191,7 +181,6 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc2_pins>;
|
||||
vmmc-supply = <&w3cbw003c_npoweron>;
|
||||
vqmmc-supply = <&w3cbw003c_bt_nreset>;
|
||||
vmmc_aux-supply = <&w3cbw003c_wifi_nreset>;
|
||||
bus-width = <4>;
|
||||
cap-sdio-irq;
|
||||
|
|
|
@ -61,24 +61,6 @@ config MMC_BLOCK_MINORS
|
|||
|
||||
If unsure, say 8 here.
|
||||
|
||||
config MMC_BLOCK_BOUNCE
|
||||
bool "Use bounce buffer for simple hosts"
|
||||
depends on MMC_BLOCK
|
||||
default y
|
||||
help
|
||||
SD/MMC is a high latency protocol where it is crucial to
|
||||
send large requests in order to get high performance. Many
|
||||
controllers, however, are restricted to continuous memory
|
||||
(i.e. they can't do scatter-gather), something the kernel
|
||||
rarely can provide.
|
||||
|
||||
Say Y here to help these restricted hosts by bouncing
|
||||
requests back and forth from a large buffer. You will get
|
||||
a big performance gain at the cost of up to 64 KiB of
|
||||
physical memory.
|
||||
|
||||
If unsure, say Y here.
|
||||
|
||||
config SDIO_UART
|
||||
tristate "SDIO UART/GPS class support"
|
||||
depends on TTY
|
||||
|
|
|
@ -127,14 +127,6 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
|
|||
|
||||
static inline int mmc_blk_part_switch(struct mmc_card *card,
|
||||
struct mmc_blk_data *md);
|
||||
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
|
||||
|
||||
static void mmc_blk_requeue(struct request_queue *q, struct request *req)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blk_requeue_request(q, req);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
|
||||
{
|
||||
|
@ -197,6 +189,8 @@ static ssize_t power_ro_lock_store(struct device *dev,
|
|||
int ret;
|
||||
struct mmc_blk_data *md, *part_md;
|
||||
struct mmc_card *card;
|
||||
struct mmc_queue *mq;
|
||||
struct request *req;
|
||||
unsigned long set;
|
||||
|
||||
if (kstrtoul(buf, 0, &set))
|
||||
|
@ -206,20 +200,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
|
|||
return count;
|
||||
|
||||
md = mmc_blk_get(dev_to_disk(dev));
|
||||
mq = &md->queue;
|
||||
card = md->queue.card;
|
||||
|
||||
mmc_get_card(card);
|
||||
|
||||
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
|
||||
card->ext_csd.boot_ro_lock |
|
||||
EXT_CSD_BOOT_WP_B_PWR_WP_EN,
|
||||
card->ext_csd.part_time);
|
||||
if (ret)
|
||||
pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
|
||||
else
|
||||
card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
|
||||
|
||||
mmc_put_card(card);
|
||||
/* Dispatch locking to the block layer */
|
||||
req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
|
||||
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
|
||||
blk_execute_rq(mq->queue, NULL, req, 0);
|
||||
ret = req_to_mmc_queue_req(req)->drv_op_result;
|
||||
|
||||
if (!ret) {
|
||||
pr_info("%s: Locking boot partition ro until next power on\n",
|
||||
|
@ -392,7 +380,7 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
|
|||
return -EINVAL;
|
||||
|
||||
do {
|
||||
err = get_card_status(card, status, 5);
|
||||
err = __mmc_send_status(card, status, 5);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
|
@ -450,7 +438,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|||
struct mmc_request mrq = {};
|
||||
struct scatterlist sg;
|
||||
int err;
|
||||
int is_rpmb = false;
|
||||
bool is_rpmb = false;
|
||||
u32 status = 0;
|
||||
|
||||
if (!card || !md || !idata)
|
||||
|
@ -570,9 +558,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
|||
struct mmc_ioc_cmd __user *ic_ptr)
|
||||
{
|
||||
struct mmc_blk_ioc_data *idata;
|
||||
struct mmc_blk_ioc_data *idatas[1];
|
||||
struct mmc_blk_data *md;
|
||||
struct mmc_queue *mq;
|
||||
struct mmc_card *card;
|
||||
int err = 0, ioc_err = 0;
|
||||
struct request *req;
|
||||
|
||||
/*
|
||||
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
||||
|
@ -598,17 +589,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
|||
goto cmd_done;
|
||||
}
|
||||
|
||||
mmc_get_card(card);
|
||||
|
||||
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
|
||||
|
||||
/* Always switch back to main area after RPMB access */
|
||||
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
|
||||
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
|
||||
|
||||
mmc_put_card(card);
|
||||
|
||||
/*
|
||||
* Dispatch the ioctl() into the block request queue.
|
||||
*/
|
||||
mq = &md->queue;
|
||||
req = blk_get_request(mq->queue,
|
||||
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
|
||||
__GFP_RECLAIM);
|
||||
idatas[0] = idata;
|
||||
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
|
||||
req_to_mmc_queue_req(req)->idata = idatas;
|
||||
req_to_mmc_queue_req(req)->ioc_count = 1;
|
||||
blk_execute_rq(mq->queue, NULL, req, 0);
|
||||
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
|
||||
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
|
||||
blk_put_request(req);
|
||||
|
||||
cmd_done:
|
||||
mmc_blk_put(md);
|
||||
|
@ -625,8 +620,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
|
|||
struct mmc_ioc_cmd __user *cmds = user->cmds;
|
||||
struct mmc_card *card;
|
||||
struct mmc_blk_data *md;
|
||||
struct mmc_queue *mq;
|
||||
int i, err = 0, ioc_err = 0;
|
||||
__u64 num_of_cmds;
|
||||
struct request *req;
|
||||
|
||||
/*
|
||||
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
||||
|
@ -668,21 +665,26 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
|
|||
goto cmd_done;
|
||||
}
|
||||
|
||||
mmc_get_card(card);
|
||||
|
||||
for (i = 0; i < num_of_cmds && !ioc_err; i++)
|
||||
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
|
||||
|
||||
/* Always switch back to main area after RPMB access */
|
||||
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
|
||||
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
|
||||
|
||||
mmc_put_card(card);
|
||||
/*
|
||||
* Dispatch the ioctl()s into the block request queue.
|
||||
*/
|
||||
mq = &md->queue;
|
||||
req = blk_get_request(mq->queue,
|
||||
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
|
||||
__GFP_RECLAIM);
|
||||
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
|
||||
req_to_mmc_queue_req(req)->idata = idata;
|
||||
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
|
||||
blk_execute_rq(mq->queue, NULL, req, 0);
|
||||
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
|
||||
|
||||
/* copy to user if data and response */
|
||||
for (i = 0; i < num_of_cmds && !err; i++)
|
||||
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
|
||||
|
||||
blk_put_request(req);
|
||||
|
||||
cmd_done:
|
||||
mmc_blk_put(md);
|
||||
cmd_err:
|
||||
|
@ -852,21 +854,6 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int get_card_status(struct mmc_card *card, u32 *status, int retries)
|
||||
{
|
||||
struct mmc_command cmd = {};
|
||||
int err;
|
||||
|
||||
cmd.opcode = MMC_SEND_STATUS;
|
||||
if (!mmc_host_is_spi(card->host))
|
||||
cmd.arg = card->rca << 16;
|
||||
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
|
||||
err = mmc_wait_for_cmd(card->host, &cmd, retries);
|
||||
if (err == 0)
|
||||
*status = cmd.resp[0];
|
||||
return err;
|
||||
}
|
||||
|
||||
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
|
||||
bool hw_busy_detect, struct request *req, bool *gen_err)
|
||||
{
|
||||
|
@ -875,7 +862,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
|
|||
u32 status;
|
||||
|
||||
do {
|
||||
err = get_card_status(card, &status, 5);
|
||||
err = __mmc_send_status(card, &status, 5);
|
||||
if (err) {
|
||||
pr_err("%s: error %d requesting status\n",
|
||||
req->rq_disk->disk_name, err);
|
||||
|
@ -1043,7 +1030,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
|
|||
* we can't be sure the returned status is for the r/w command.
|
||||
*/
|
||||
for (retry = 2; retry >= 0; retry--) {
|
||||
err = get_card_status(card, &status, 0);
|
||||
err = __mmc_send_status(card, &status, 0);
|
||||
if (!err)
|
||||
break;
|
||||
|
||||
|
@ -1178,6 +1165,54 @@ int mmc_access_rpmb(struct mmc_queue *mq)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The non-block commands come back from the block layer after it queued it and
|
||||
* processed it with all other requests and then they get issued in this
|
||||
* function.
|
||||
*/
|
||||
static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
|
||||
{
|
||||
struct mmc_queue_req *mq_rq;
|
||||
struct mmc_card *card = mq->card;
|
||||
struct mmc_blk_data *md = mq->blkdata;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
mq_rq = req_to_mmc_queue_req(req);
|
||||
|
||||
switch (mq_rq->drv_op) {
|
||||
case MMC_DRV_OP_IOCTL:
|
||||
for (i = 0; i < mq_rq->ioc_count; i++) {
|
||||
ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
/* Always switch back to main area after RPMB access */
|
||||
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
|
||||
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
|
||||
break;
|
||||
case MMC_DRV_OP_BOOT_WP:
|
||||
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
|
||||
card->ext_csd.boot_ro_lock |
|
||||
EXT_CSD_BOOT_WP_B_PWR_WP_EN,
|
||||
card->ext_csd.part_time);
|
||||
if (ret)
|
||||
pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
|
||||
md->disk->disk_name, ret);
|
||||
else
|
||||
card->ext_csd.boot_ro_lock |=
|
||||
EXT_CSD_BOOT_WP_B_PWR_WP_EN;
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: unknown driver specific operation\n",
|
||||
md->disk->disk_name);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mq_rq->drv_op_result = ret;
|
||||
blk_end_request_all(req, ret);
|
||||
}
|
||||
|
||||
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
|
||||
{
|
||||
struct mmc_blk_data *md = mq->blkdata;
|
||||
|
@ -1329,16 +1364,25 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
|
|||
R1_ADDRESS_ERROR | /* Misaligned address */ \
|
||||
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
|
||||
R1_WP_VIOLATION | /* Tried to write to protected block */ \
|
||||
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
|
||||
R1_CC_ERROR | /* Card controller error */ \
|
||||
R1_ERROR) /* General/unknown error */
|
||||
|
||||
static bool mmc_blk_has_cmd_err(struct mmc_command *cmd)
|
||||
{
|
||||
if (!cmd->error && cmd->resp[0] & CMD_ERRORS)
|
||||
cmd->error = -EIO;
|
||||
|
||||
return cmd->error;
|
||||
}
|
||||
|
||||
static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
|
||||
struct mmc_async_req *areq)
|
||||
{
|
||||
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
|
||||
areq);
|
||||
struct mmc_blk_request *brq = &mq_mrq->brq;
|
||||
struct request *req = mq_mrq->req;
|
||||
struct request *req = mmc_queue_req_to_req(mq_mrq);
|
||||
int need_retune = card->host->need_retune;
|
||||
bool ecc_err = false;
|
||||
bool gen_err = false;
|
||||
|
@ -1353,7 +1397,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
|
|||
* stop.error indicates a problem with the stop command. Data
|
||||
* may have been transferred, or may still be transferring.
|
||||
*/
|
||||
if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
|
||||
if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) ||
|
||||
brq->data.error) {
|
||||
switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
|
||||
case ERR_RETRY:
|
||||
|
@ -1407,7 +1451,8 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
|
|||
return MMC_BLK_RETRY;
|
||||
}
|
||||
|
||||
if (brq->data.error) {
|
||||
/* Some errors (ECC) are flagged on the next commmand, so check stop, too */
|
||||
if (brq->data.error || brq->stop.error) {
|
||||
if (need_retune && !brq->retune_retry_done) {
|
||||
pr_debug("%s: retrying because a re-tune was needed\n",
|
||||
req->rq_disk->disk_name);
|
||||
|
@ -1415,7 +1460,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
|
|||
return MMC_BLK_RETRY;
|
||||
}
|
||||
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
|
||||
req->rq_disk->disk_name, brq->data.error,
|
||||
req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
|
||||
(unsigned)blk_rq_pos(req),
|
||||
(unsigned)blk_rq_sectors(req),
|
||||
brq->cmd.resp[0], brq->stop.resp[0]);
|
||||
|
@ -1445,7 +1490,7 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
|
|||
struct mmc_blk_data *md = mq->blkdata;
|
||||
struct mmc_card *card = md->queue.card;
|
||||
struct mmc_blk_request *brq = &mqrq->brq;
|
||||
struct request *req = mqrq->req;
|
||||
struct request *req = mmc_queue_req_to_req(mqrq);
|
||||
|
||||
/*
|
||||
* Reliable writes are used to implement Forced Unit Access and
|
||||
|
@ -1550,7 +1595,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
|
|||
{
|
||||
u32 readcmd, writecmd;
|
||||
struct mmc_blk_request *brq = &mqrq->brq;
|
||||
struct request *req = mqrq->req;
|
||||
struct request *req = mmc_queue_req_to_req(mqrq);
|
||||
struct mmc_blk_data *md = mq->blkdata;
|
||||
bool do_rel_wr, do_data_tag;
|
||||
|
||||
|
@ -1647,7 +1692,7 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
|
|||
if (mmc_card_removed(card))
|
||||
req->rq_flags |= RQF_QUIET;
|
||||
while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
|
||||
mmc_queue_req_free(mq, mqrq);
|
||||
mq->qcnt--;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1667,7 +1712,7 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
|
|||
if (mmc_card_removed(mq->card)) {
|
||||
req->rq_flags |= RQF_QUIET;
|
||||
blk_end_request_all(req, BLK_STS_IOERR);
|
||||
mmc_queue_req_free(mq, mqrq);
|
||||
mq->qcnt--; /* FIXME: just set to 0? */
|
||||
return;
|
||||
}
|
||||
/* Else proceed and try to restart the current async request */
|
||||
|
@ -1690,12 +1735,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
|
|||
bool req_pending = true;
|
||||
|
||||
if (new_req) {
|
||||
mqrq_cur = mmc_queue_req_find(mq, new_req);
|
||||
if (!mqrq_cur) {
|
||||
WARN_ON(1);
|
||||
mmc_blk_requeue(mq->queue, new_req);
|
||||
new_req = NULL;
|
||||
}
|
||||
mqrq_cur = req_to_mmc_queue_req(new_req);
|
||||
mq->qcnt++;
|
||||
}
|
||||
|
||||
if (!mq->qcnt)
|
||||
|
@ -1736,7 +1777,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
|
|||
*/
|
||||
mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
|
||||
brq = &mq_rq->brq;
|
||||
old_req = mq_rq->req;
|
||||
old_req = mmc_queue_req_to_req(mq_rq);
|
||||
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
|
||||
mmc_queue_bounce_post(mq_rq);
|
||||
|
||||
|
@ -1769,12 +1810,12 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
|
|||
if (req_pending)
|
||||
mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
|
||||
else
|
||||
mmc_queue_req_free(mq, mq_rq);
|
||||
mq->qcnt--;
|
||||
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
|
||||
return;
|
||||
}
|
||||
if (!req_pending) {
|
||||
mmc_queue_req_free(mq, mq_rq);
|
||||
mq->qcnt--;
|
||||
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
|
||||
return;
|
||||
}
|
||||
|
@ -1819,7 +1860,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
|
|||
req_pending = blk_end_request(old_req, BLK_STS_IOERR,
|
||||
brq->data.blksz);
|
||||
if (!req_pending) {
|
||||
mmc_queue_req_free(mq, mq_rq);
|
||||
mq->qcnt--;
|
||||
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
|
||||
return;
|
||||
}
|
||||
|
@ -1849,7 +1890,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
|
|||
}
|
||||
} while (req_pending);
|
||||
|
||||
mmc_queue_req_free(mq, mq_rq);
|
||||
mq->qcnt--;
|
||||
}
|
||||
|
||||
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||
|
@ -1870,23 +1911,54 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (req && req_op(req) == REQ_OP_DISCARD) {
|
||||
/* complete ongoing async transfer before issuing discard */
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_discard_rq(mq, req);
|
||||
} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
|
||||
/* complete ongoing async transfer before issuing secure erase*/
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_secdiscard_rq(mq, req);
|
||||
} else if (req && req_op(req) == REQ_OP_FLUSH) {
|
||||
/* complete ongoing async transfer before issuing flush */
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_flush(mq, req);
|
||||
if (req) {
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_DRV_IN:
|
||||
case REQ_OP_DRV_OUT:
|
||||
/*
|
||||
* Complete ongoing async transfer before issuing
|
||||
* ioctl()s
|
||||
*/
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_drv_op(mq, req);
|
||||
break;
|
||||
case REQ_OP_DISCARD:
|
||||
/*
|
||||
* Complete ongoing async transfer before issuing
|
||||
* discard.
|
||||
*/
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_discard_rq(mq, req);
|
||||
break;
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
/*
|
||||
* Complete ongoing async transfer before issuing
|
||||
* secure erase.
|
||||
*/
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_secdiscard_rq(mq, req);
|
||||
break;
|
||||
case REQ_OP_FLUSH:
|
||||
/*
|
||||
* Complete ongoing async transfer before issuing
|
||||
* flush.
|
||||
*/
|
||||
if (mq->qcnt)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
mmc_blk_issue_flush(mq, req);
|
||||
break;
|
||||
default:
|
||||
/* Normal request, just issue it */
|
||||
mmc_blk_issue_rw_rq(mq, req);
|
||||
card->host->context_info.is_waiting_last_req = false;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
mmc_blk_issue_rw_rq(mq, req);
|
||||
/* No request, flushing the pipeline with NULL */
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
card->host->context_info.is_waiting_last_req = false;
|
||||
}
|
||||
|
||||
|
@ -2171,7 +2243,6 @@ static int mmc_blk_probe(struct mmc_card *card)
|
|||
{
|
||||
struct mmc_blk_data *md, *part_md;
|
||||
char cap_str[10];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Check that the card supports the command class(es) we need.
|
||||
|
@ -2181,15 +2252,9 @@ static int mmc_blk_probe(struct mmc_card *card)
|
|||
|
||||
mmc_fixup_device(card, mmc_blk_fixups);
|
||||
|
||||
ret = mmc_queue_alloc_shared_queue(card);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
md = mmc_blk_alloc(card);
|
||||
if (IS_ERR(md)) {
|
||||
mmc_queue_free_shared_queue(card);
|
||||
if (IS_ERR(md))
|
||||
return PTR_ERR(md);
|
||||
}
|
||||
|
||||
string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
|
||||
cap_str, sizeof(cap_str));
|
||||
|
@ -2227,7 +2292,6 @@ static int mmc_blk_probe(struct mmc_card *card)
|
|||
out:
|
||||
mmc_blk_remove_parts(card, md);
|
||||
mmc_blk_remove_req(md);
|
||||
mmc_queue_free_shared_queue(card);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2245,7 +2309,6 @@ static void mmc_blk_remove(struct mmc_card *card)
|
|||
pm_runtime_put_noidle(&card->dev);
|
||||
mmc_blk_remove_req(md);
|
||||
dev_set_drvdata(&card->dev, NULL);
|
||||
mmc_queue_free_shared_queue(card);
|
||||
}
|
||||
|
||||
static int _mmc_blk_suspend(struct mmc_card *card)
|
||||
|
|
|
@ -53,12 +53,6 @@
|
|||
/* If the device is not responding */
|
||||
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
|
||||
|
||||
/*
|
||||
* Background operations can take a long time, depending on the housekeeping
|
||||
* operations the card has to perform.
|
||||
*/
|
||||
#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
|
||||
|
||||
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
|
||||
#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
|
||||
|
||||
|
@ -362,74 +356,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_start_bkops - start BKOPS for supported cards
|
||||
* @card: MMC card to start BKOPS
|
||||
* @form_exception: A flag to indicate if this function was
|
||||
* called due to an exception raised by the card
|
||||
*
|
||||
* Start background operations whenever requested.
|
||||
* When the urgent BKOPS bit is set in a R1 command response
|
||||
* then background operations should be started immediately.
|
||||
*/
|
||||
void mmc_start_bkops(struct mmc_card *card, bool from_exception)
|
||||
{
|
||||
int err;
|
||||
int timeout;
|
||||
bool use_busy_signal;
|
||||
|
||||
if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
|
||||
return;
|
||||
|
||||
err = mmc_read_bkops_status(card);
|
||||
if (err) {
|
||||
pr_err("%s: Failed to read bkops status: %d\n",
|
||||
mmc_hostname(card->host), err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!card->ext_csd.raw_bkops_status)
|
||||
return;
|
||||
|
||||
if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
|
||||
from_exception)
|
||||
return;
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
|
||||
timeout = MMC_BKOPS_MAX_TIMEOUT;
|
||||
use_busy_signal = true;
|
||||
} else {
|
||||
timeout = 0;
|
||||
use_busy_signal = false;
|
||||
}
|
||||
|
||||
mmc_retune_hold(card->host);
|
||||
|
||||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_BKOPS_START, 1, timeout, 0,
|
||||
use_busy_signal, true, false);
|
||||
if (err) {
|
||||
pr_warn("%s: Error %d starting bkops\n",
|
||||
mmc_hostname(card->host), err);
|
||||
mmc_retune_release(card->host);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* For urgent bkops status (LEVEL_2 and more)
|
||||
* bkops executed synchronously, otherwise
|
||||
* the operation is in progress
|
||||
*/
|
||||
if (!use_busy_signal)
|
||||
mmc_card_set_doing_bkops(card);
|
||||
else
|
||||
mmc_retune_release(card->host);
|
||||
out:
|
||||
mmc_release_host(card->host);
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_start_bkops);
|
||||
|
||||
/*
|
||||
* mmc_wait_data_done() - done callback for data request
|
||||
* @mrq: done data request
|
||||
|
@ -748,71 +674,6 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
|
|||
}
|
||||
EXPORT_SYMBOL(mmc_wait_for_req);
|
||||
|
||||
/**
|
||||
* mmc_interrupt_hpi - Issue for High priority Interrupt
|
||||
* @card: the MMC card associated with the HPI transfer
|
||||
*
|
||||
* Issued High Priority Interrupt, and check for card status
|
||||
* until out-of prg-state.
|
||||
*/
|
||||
int mmc_interrupt_hpi(struct mmc_card *card)
|
||||
{
|
||||
int err;
|
||||
u32 status;
|
||||
unsigned long prg_wait;
|
||||
|
||||
if (!card->ext_csd.hpi_en) {
|
||||
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
|
||||
return 1;
|
||||
}
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
err = mmc_send_status(card, &status);
|
||||
if (err) {
|
||||
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (R1_CURRENT_STATE(status)) {
|
||||
case R1_STATE_IDLE:
|
||||
case R1_STATE_READY:
|
||||
case R1_STATE_STBY:
|
||||
case R1_STATE_TRAN:
|
||||
/*
|
||||
* In idle and transfer states, HPI is not needed and the caller
|
||||
* can issue the next intended command immediately
|
||||
*/
|
||||
goto out;
|
||||
case R1_STATE_PRG:
|
||||
break;
|
||||
default:
|
||||
/* In all other states, it's illegal to issue HPI */
|
||||
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
|
||||
mmc_hostname(card->host), R1_CURRENT_STATE(status));
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mmc_send_hpi_cmd(card, &status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
|
||||
do {
|
||||
err = mmc_send_status(card, &status);
|
||||
|
||||
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
|
||||
break;
|
||||
if (time_after(jiffies, prg_wait))
|
||||
err = -ETIMEDOUT;
|
||||
} while (!err);
|
||||
|
||||
out:
|
||||
mmc_release_host(card->host);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_interrupt_hpi);
|
||||
|
||||
/**
|
||||
* mmc_wait_for_cmd - start a command and wait for completion
|
||||
* @host: MMC host to start command
|
||||
|
@ -842,53 +703,6 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
|
|||
|
||||
EXPORT_SYMBOL(mmc_wait_for_cmd);
|
||||
|
||||
/**
|
||||
* mmc_stop_bkops - stop ongoing BKOPS
|
||||
* @card: MMC card to check BKOPS
|
||||
*
|
||||
* Send HPI command to stop ongoing background operations to
|
||||
* allow rapid servicing of foreground operations, e.g. read/
|
||||
* writes. Wait until the card comes out of the programming state
|
||||
* to avoid errors in servicing read/write requests.
|
||||
*/
|
||||
int mmc_stop_bkops(struct mmc_card *card)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = mmc_interrupt_hpi(card);
|
||||
|
||||
/*
|
||||
* If err is EINVAL, we can't issue an HPI.
|
||||
* It should complete the BKOPS.
|
||||
*/
|
||||
if (!err || (err == -EINVAL)) {
|
||||
mmc_card_clr_doing_bkops(card);
|
||||
mmc_retune_release(card->host);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_stop_bkops);
|
||||
|
||||
int mmc_read_bkops_status(struct mmc_card *card)
|
||||
{
|
||||
int err;
|
||||
u8 *ext_csd;
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
err = mmc_get_ext_csd(card, &ext_csd);
|
||||
mmc_release_host(card->host);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
|
||||
card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
|
||||
kfree(ext_csd);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_read_bkops_status);
|
||||
|
||||
/**
|
||||
* mmc_set_data_timeout - set the timeout for a data command
|
||||
* @data: data phase for command
|
||||
|
@ -2597,6 +2411,8 @@ EXPORT_SYMBOL(mmc_set_blockcount);
|
|||
|
||||
static void mmc_hw_reset_for_init(struct mmc_host *host)
|
||||
{
|
||||
mmc_pwrseq_reset(host);
|
||||
|
||||
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
|
||||
return;
|
||||
host->ops->hw_reset(host);
|
||||
|
@ -2836,8 +2652,11 @@ void mmc_stop_host(struct mmc_host *host)
|
|||
host->removed = 1;
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
#endif
|
||||
if (host->slot.cd_irq >= 0)
|
||||
if (host->slot.cd_irq >= 0) {
|
||||
if (host->slot.cd_wake_enabled)
|
||||
disable_irq_wake(host->slot.cd_irq);
|
||||
disable_irq(host->slot.cd_irq);
|
||||
}
|
||||
|
||||
host->rescan_disable = 1;
|
||||
cancel_delayed_work_sync(&host->detect);
|
||||
|
@ -2913,27 +2732,6 @@ int mmc_power_restore_host(struct mmc_host *host)
|
|||
}
|
||||
EXPORT_SYMBOL(mmc_power_restore_host);
|
||||
|
||||
/*
|
||||
* Flush the cache to the non-volatile storage.
|
||||
*/
|
||||
int mmc_flush_cache(struct mmc_card *card)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (mmc_card_mmc(card) &&
|
||||
(card->ext_csd.cache_size > 0) &&
|
||||
(card->ext_csd.cache_ctrl & 1)) {
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_FLUSH_CACHE, 1, 0);
|
||||
if (err)
|
||||
pr_err("%s: cache flush error %d\n",
|
||||
mmc_hostname(card->host), err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_flush_cache);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/* Do the card removal on suspend if card is assumed removeable
|
||||
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "host.h"
|
||||
#include "slot-gpio.h"
|
||||
#include "pwrseq.h"
|
||||
#include "sdio_ops.h"
|
||||
|
||||
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
|
||||
|
||||
|
@ -176,19 +177,17 @@ static void mmc_retune_timer(unsigned long data)
|
|||
*/
|
||||
int mmc_of_parse(struct mmc_host *host)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device *dev = host->parent;
|
||||
u32 bus_width;
|
||||
int ret;
|
||||
bool cd_cap_invert, cd_gpio_invert = false;
|
||||
bool ro_cap_invert, ro_gpio_invert = false;
|
||||
|
||||
if (!host->parent || !host->parent->of_node)
|
||||
if (!dev || !dev_fwnode(dev))
|
||||
return 0;
|
||||
|
||||
np = host->parent->of_node;
|
||||
|
||||
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
|
||||
if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
|
||||
if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
|
||||
dev_dbg(host->parent,
|
||||
"\"bus-width\" property is missing, assuming 1 bit.\n");
|
||||
bus_width = 1;
|
||||
|
@ -210,7 +209,7 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
}
|
||||
|
||||
/* f_max is obtained from the optional "max-frequency" property */
|
||||
of_property_read_u32(np, "max-frequency", &host->f_max);
|
||||
device_property_read_u32(dev, "max-frequency", &host->f_max);
|
||||
|
||||
/*
|
||||
* Configure CD and WP pins. They are both by default active low to
|
||||
|
@ -225,12 +224,12 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
*/
|
||||
|
||||
/* Parse Card Detection */
|
||||
if (of_property_read_bool(np, "non-removable")) {
|
||||
if (device_property_read_bool(dev, "non-removable")) {
|
||||
host->caps |= MMC_CAP_NONREMOVABLE;
|
||||
} else {
|
||||
cd_cap_invert = of_property_read_bool(np, "cd-inverted");
|
||||
cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
|
||||
|
||||
if (of_property_read_bool(np, "broken-cd"))
|
||||
if (device_property_read_bool(dev, "broken-cd"))
|
||||
host->caps |= MMC_CAP_NEEDS_POLL;
|
||||
|
||||
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
|
||||
|
@ -256,7 +255,7 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
}
|
||||
|
||||
/* Parse Write Protection */
|
||||
ro_cap_invert = of_property_read_bool(np, "wp-inverted");
|
||||
ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
|
||||
|
||||
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
|
||||
if (!ret)
|
||||
|
@ -264,64 +263,64 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
else if (ret != -ENOENT && ret != -ENOSYS)
|
||||
return ret;
|
||||
|
||||
if (of_property_read_bool(np, "disable-wp"))
|
||||
if (device_property_read_bool(dev, "disable-wp"))
|
||||
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
|
||||
|
||||
/* See the comment on CD inversion above */
|
||||
if (ro_cap_invert ^ ro_gpio_invert)
|
||||
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
||||
|
||||
if (of_property_read_bool(np, "cap-sd-highspeed"))
|
||||
if (device_property_read_bool(dev, "cap-sd-highspeed"))
|
||||
host->caps |= MMC_CAP_SD_HIGHSPEED;
|
||||
if (of_property_read_bool(np, "cap-mmc-highspeed"))
|
||||
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
|
||||
host->caps |= MMC_CAP_MMC_HIGHSPEED;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr12"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr12"))
|
||||
host->caps |= MMC_CAP_UHS_SDR12;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr25"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr25"))
|
||||
host->caps |= MMC_CAP_UHS_SDR25;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr50"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr50"))
|
||||
host->caps |= MMC_CAP_UHS_SDR50;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr104"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr104"))
|
||||
host->caps |= MMC_CAP_UHS_SDR104;
|
||||
if (of_property_read_bool(np, "sd-uhs-ddr50"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-ddr50"))
|
||||
host->caps |= MMC_CAP_UHS_DDR50;
|
||||
if (of_property_read_bool(np, "cap-power-off-card"))
|
||||
if (device_property_read_bool(dev, "cap-power-off-card"))
|
||||
host->caps |= MMC_CAP_POWER_OFF_CARD;
|
||||
if (of_property_read_bool(np, "cap-mmc-hw-reset"))
|
||||
if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
|
||||
host->caps |= MMC_CAP_HW_RESET;
|
||||
if (of_property_read_bool(np, "cap-sdio-irq"))
|
||||
if (device_property_read_bool(dev, "cap-sdio-irq"))
|
||||
host->caps |= MMC_CAP_SDIO_IRQ;
|
||||
if (of_property_read_bool(np, "full-pwr-cycle"))
|
||||
if (device_property_read_bool(dev, "full-pwr-cycle"))
|
||||
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
|
||||
if (of_property_read_bool(np, "keep-power-in-suspend"))
|
||||
if (device_property_read_bool(dev, "keep-power-in-suspend"))
|
||||
host->pm_caps |= MMC_PM_KEEP_POWER;
|
||||
if (of_property_read_bool(np, "wakeup-source") ||
|
||||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
|
||||
if (device_property_read_bool(dev, "wakeup-source") ||
|
||||
device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
|
||||
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
|
||||
if (of_property_read_bool(np, "mmc-ddr-3_3v"))
|
||||
if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
|
||||
host->caps |= MMC_CAP_3_3V_DDR;
|
||||
if (of_property_read_bool(np, "mmc-ddr-1_8v"))
|
||||
if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
|
||||
host->caps |= MMC_CAP_1_8V_DDR;
|
||||
if (of_property_read_bool(np, "mmc-ddr-1_2v"))
|
||||
if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
|
||||
host->caps |= MMC_CAP_1_2V_DDR;
|
||||
if (of_property_read_bool(np, "mmc-hs200-1_8v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
|
||||
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs200-1_2v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
|
||||
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs400-1_8v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
|
||||
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs400-1_2v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
|
||||
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
|
||||
if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
|
||||
host->caps2 |= MMC_CAP2_HS400_ES;
|
||||
if (of_property_read_bool(np, "no-sdio"))
|
||||
if (device_property_read_bool(dev, "no-sdio"))
|
||||
host->caps2 |= MMC_CAP2_NO_SDIO;
|
||||
if (of_property_read_bool(np, "no-sd"))
|
||||
if (device_property_read_bool(dev, "no-sd"))
|
||||
host->caps2 |= MMC_CAP2_NO_SD;
|
||||
if (of_property_read_bool(np, "no-mmc"))
|
||||
if (device_property_read_bool(dev, "no-mmc"))
|
||||
host->caps2 |= MMC_CAP2_NO_MMC;
|
||||
|
||||
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
|
||||
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
|
||||
if (host->dsr_req && (host->dsr & ~0xffff)) {
|
||||
dev_err(host->parent,
|
||||
"device tree specified broken value for DSR: 0x%x, ignoring\n",
|
||||
|
@ -379,6 +378,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
|||
spin_lock_init(&host->lock);
|
||||
init_waitqueue_head(&host->wq);
|
||||
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
|
||||
INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
|
||||
setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
|
||||
|
||||
/*
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "mmc_ops.h"
|
||||
#include "quirks.h"
|
||||
#include "sd_ops.h"
|
||||
#include "pwrseq.h"
|
||||
|
||||
#define DEFAULT_CMD6_TIMEOUT_MS 500
|
||||
|
||||
|
@ -1555,10 +1556,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
|||
/*
|
||||
* Fetch CID from card.
|
||||
*/
|
||||
if (mmc_host_is_spi(host))
|
||||
err = mmc_send_cid(host, cid);
|
||||
else
|
||||
err = mmc_all_send_cid(host, cid);
|
||||
err = mmc_send_cid(host, cid);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
|
@ -1653,12 +1651,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
|||
mmc_set_erase_size(card);
|
||||
}
|
||||
|
||||
/*
|
||||
* If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
|
||||
* bit. This bit will be lost every time after a reset or power off.
|
||||
*/
|
||||
if (card->ext_csd.partition_setting_completed ||
|
||||
(card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
|
||||
/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
|
||||
if (card->ext_csd.rev >= 3) {
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_ERASE_GROUP_DEF, 1,
|
||||
card->ext_csd.generic_cmd6_time);
|
||||
|
@ -2096,7 +2090,7 @@ static int mmc_runtime_resume(struct mmc_host *host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mmc_can_reset(struct mmc_card *card)
|
||||
static int mmc_can_reset(struct mmc_card *card)
|
||||
{
|
||||
u8 rst_n_function;
|
||||
|
||||
|
@ -2105,7 +2099,6 @@ int mmc_can_reset(struct mmc_card *card)
|
|||
return 0;
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_can_reset);
|
||||
|
||||
static int mmc_reset(struct mmc_host *host)
|
||||
{
|
||||
|
@ -2127,6 +2120,7 @@ static int mmc_reset(struct mmc_host *host)
|
|||
} else {
|
||||
/* Do a brute force power cycle */
|
||||
mmc_power_cycle(host, card->ocr);
|
||||
mmc_pwrseq_reset(host);
|
||||
}
|
||||
return mmc_init_card(host, card->ocr, card);
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/mmc/mmc.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "card.h"
|
||||
#include "host.h"
|
||||
#include "mmc_ops.h"
|
||||
|
||||
|
@ -54,7 +55,7 @@ static const u8 tuning_blk_pattern_8bit[] = {
|
|||
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
|
||||
};
|
||||
|
||||
int mmc_send_status(struct mmc_card *card, u32 *status)
|
||||
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
|
||||
{
|
||||
int err;
|
||||
struct mmc_command cmd = {};
|
||||
|
@ -64,7 +65,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
|
|||
cmd.arg = card->rca << 16;
|
||||
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
|
||||
|
||||
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
|
||||
err = mmc_wait_for_cmd(card->host, &cmd, retries);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -76,6 +77,12 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmc_send_status);
|
||||
|
||||
int mmc_send_status(struct mmc_card *card, u32 *status)
|
||||
{
|
||||
return __mmc_send_status(card, status, MMC_CMD_RETRIES);
|
||||
}
|
||||
|
||||
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
|
||||
{
|
||||
|
@ -200,24 +207,6 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
|
||||
{
|
||||
int err;
|
||||
struct mmc_command cmd = {};
|
||||
|
||||
cmd.opcode = MMC_ALL_SEND_CID;
|
||||
cmd.arg = 0;
|
||||
cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
|
||||
|
||||
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(cid, cmd.resp, sizeof(u32) * 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mmc_set_relative_addr(struct mmc_card *card)
|
||||
{
|
||||
struct mmc_command cmd = {};
|
||||
|
@ -302,15 +291,11 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mmc_send_csd(struct mmc_card *card, u32 *csd)
|
||||
static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
|
||||
{
|
||||
int ret, i;
|
||||
__be32 *csd_tmp;
|
||||
|
||||
if (!mmc_host_is_spi(card->host))
|
||||
return mmc_send_cxd_native(card->host, card->rca << 16,
|
||||
csd, MMC_SEND_CSD);
|
||||
|
||||
csd_tmp = kzalloc(16, GFP_KERNEL);
|
||||
if (!csd_tmp)
|
||||
return -ENOMEM;
|
||||
|
@ -327,18 +312,20 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int mmc_send_cid(struct mmc_host *host, u32 *cid)
|
||||
int mmc_send_csd(struct mmc_card *card, u32 *csd)
|
||||
{
|
||||
if (mmc_host_is_spi(card->host))
|
||||
return mmc_spi_send_csd(card, csd);
|
||||
|
||||
return mmc_send_cxd_native(card->host, card->rca << 16, csd,
|
||||
MMC_SEND_CSD);
|
||||
}
|
||||
|
||||
static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
|
||||
{
|
||||
int ret, i;
|
||||
__be32 *cid_tmp;
|
||||
|
||||
if (!mmc_host_is_spi(host)) {
|
||||
if (!host->card)
|
||||
return -EINVAL;
|
||||
return mmc_send_cxd_native(host, host->card->rca << 16,
|
||||
cid, MMC_SEND_CID);
|
||||
}
|
||||
|
||||
cid_tmp = kzalloc(16, GFP_KERNEL);
|
||||
if (!cid_tmp)
|
||||
return -ENOMEM;
|
||||
|
@ -355,6 +342,14 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int mmc_send_cid(struct mmc_host *host, u32 *cid)
|
||||
{
|
||||
if (mmc_host_is_spi(host))
|
||||
return mmc_spi_send_cid(host, cid);
|
||||
|
||||
return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
|
||||
}
|
||||
|
||||
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
|
||||
{
|
||||
int err;
|
||||
|
@ -800,7 +795,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
|
|||
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
|
||||
}
|
||||
|
||||
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
|
||||
static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
|
||||
{
|
||||
struct mmc_command cmd = {};
|
||||
unsigned int opcode;
|
||||
|
@ -834,11 +829,208 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_interrupt_hpi - Issue for High priority Interrupt
|
||||
* @card: the MMC card associated with the HPI transfer
|
||||
*
|
||||
* Issued High Priority Interrupt, and check for card status
|
||||
* until out-of prg-state.
|
||||
*/
|
||||
int mmc_interrupt_hpi(struct mmc_card *card)
|
||||
{
|
||||
int err;
|
||||
u32 status;
|
||||
unsigned long prg_wait;
|
||||
|
||||
if (!card->ext_csd.hpi_en) {
|
||||
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
|
||||
return 1;
|
||||
}
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
err = mmc_send_status(card, &status);
|
||||
if (err) {
|
||||
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (R1_CURRENT_STATE(status)) {
|
||||
case R1_STATE_IDLE:
|
||||
case R1_STATE_READY:
|
||||
case R1_STATE_STBY:
|
||||
case R1_STATE_TRAN:
|
||||
/*
|
||||
* In idle and transfer states, HPI is not needed and the caller
|
||||
* can issue the next intended command immediately
|
||||
*/
|
||||
goto out;
|
||||
case R1_STATE_PRG:
|
||||
break;
|
||||
default:
|
||||
/* In all other states, it's illegal to issue HPI */
|
||||
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
|
||||
mmc_hostname(card->host), R1_CURRENT_STATE(status));
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mmc_send_hpi_cmd(card, &status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
|
||||
do {
|
||||
err = mmc_send_status(card, &status);
|
||||
|
||||
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
|
||||
break;
|
||||
if (time_after(jiffies, prg_wait))
|
||||
err = -ETIMEDOUT;
|
||||
} while (!err);
|
||||
|
||||
out:
|
||||
mmc_release_host(card->host);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mmc_can_ext_csd(struct mmc_card *card)
|
||||
{
|
||||
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_stop_bkops - stop ongoing BKOPS
|
||||
* @card: MMC card to check BKOPS
|
||||
*
|
||||
* Send HPI command to stop ongoing background operations to
|
||||
* allow rapid servicing of foreground operations, e.g. read/
|
||||
* writes. Wait until the card comes out of the programming state
|
||||
* to avoid errors in servicing read/write requests.
|
||||
*/
|
||||
int mmc_stop_bkops(struct mmc_card *card)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = mmc_interrupt_hpi(card);
|
||||
|
||||
/*
|
||||
* If err is EINVAL, we can't issue an HPI.
|
||||
* It should complete the BKOPS.
|
||||
*/
|
||||
if (!err || (err == -EINVAL)) {
|
||||
mmc_card_clr_doing_bkops(card);
|
||||
mmc_retune_release(card->host);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mmc_read_bkops_status(struct mmc_card *card)
|
||||
{
|
||||
int err;
|
||||
u8 *ext_csd;
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
err = mmc_get_ext_csd(card, &ext_csd);
|
||||
mmc_release_host(card->host);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
|
||||
card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
|
||||
kfree(ext_csd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_start_bkops - start BKOPS for supported cards
|
||||
* @card: MMC card to start BKOPS
|
||||
* @form_exception: A flag to indicate if this function was
|
||||
* called due to an exception raised by the card
|
||||
*
|
||||
* Start background operations whenever requested.
|
||||
* When the urgent BKOPS bit is set in a R1 command response
|
||||
* then background operations should be started immediately.
|
||||
*/
|
||||
void mmc_start_bkops(struct mmc_card *card, bool from_exception)
|
||||
{
|
||||
int err;
|
||||
int timeout;
|
||||
bool use_busy_signal;
|
||||
|
||||
if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
|
||||
return;
|
||||
|
||||
err = mmc_read_bkops_status(card);
|
||||
if (err) {
|
||||
pr_err("%s: Failed to read bkops status: %d\n",
|
||||
mmc_hostname(card->host), err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!card->ext_csd.raw_bkops_status)
|
||||
return;
|
||||
|
||||
if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
|
||||
from_exception)
|
||||
return;
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
|
||||
timeout = MMC_OPS_TIMEOUT_MS;
|
||||
use_busy_signal = true;
|
||||
} else {
|
||||
timeout = 0;
|
||||
use_busy_signal = false;
|
||||
}
|
||||
|
||||
mmc_retune_hold(card->host);
|
||||
|
||||
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_BKOPS_START, 1, timeout, 0,
|
||||
use_busy_signal, true, false);
|
||||
if (err) {
|
||||
pr_warn("%s: Error %d starting bkops\n",
|
||||
mmc_hostname(card->host), err);
|
||||
mmc_retune_release(card->host);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* For urgent bkops status (LEVEL_2 and more)
|
||||
* bkops executed synchronously, otherwise
|
||||
* the operation is in progress
|
||||
*/
|
||||
if (!use_busy_signal)
|
||||
mmc_card_set_doing_bkops(card);
|
||||
else
|
||||
mmc_retune_release(card->host);
|
||||
out:
|
||||
mmc_release_host(card->host);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the cache to the non-volatile storage.
|
||||
*/
|
||||
int mmc_flush_cache(struct mmc_card *card)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (mmc_card_mmc(card) &&
|
||||
(card->ext_csd.cache_size > 0) &&
|
||||
(card->ext_csd.cache_ctrl & 1)) {
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_FLUSH_CACHE, 1, 0);
|
||||
if (err)
|
||||
pr_err("%s: cache flush error %d\n",
|
||||
mmc_hostname(card->host), err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_flush_cache);
|
||||
|
||||
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
|
||||
{
|
||||
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
|
||||
|
|
|
@ -22,15 +22,14 @@ int mmc_deselect_cards(struct mmc_host *host);
|
|||
int mmc_set_dsr(struct mmc_host *host);
|
||||
int mmc_go_idle(struct mmc_host *host);
|
||||
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
|
||||
int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
|
||||
int mmc_set_relative_addr(struct mmc_card *card);
|
||||
int mmc_send_csd(struct mmc_card *card, u32 *csd);
|
||||
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
|
||||
int mmc_send_status(struct mmc_card *card, u32 *status);
|
||||
int mmc_send_cid(struct mmc_host *host, u32 *cid);
|
||||
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
|
||||
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
|
||||
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
|
||||
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
|
||||
int mmc_interrupt_hpi(struct mmc_card *card);
|
||||
int mmc_can_ext_csd(struct mmc_card *card);
|
||||
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
|
||||
|
@ -42,9 +41,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|||
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
unsigned int timeout_ms);
|
||||
int mmc_stop_bkops(struct mmc_card *card);
|
||||
int mmc_read_bkops_status(struct mmc_card *card);
|
||||
void mmc_start_bkops(struct mmc_card *card, bool from_exception);
|
||||
int mmc_can_reset(struct mmc_card *card);
|
||||
int mmc_flush_cache(struct mmc_card *card);
|
||||
int mmc_cmdq_enable(struct mmc_card *card);
|
||||
int mmc_cmdq_disable(struct mmc_card *card);
|
||||
|
|
|
@ -3220,8 +3220,6 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
|
|||
df = kmalloc(sizeof(*df), GFP_KERNEL);
|
||||
if (!df) {
|
||||
debugfs_remove(file);
|
||||
dev_err(&card->dev,
|
||||
"Can't allocate memory for internal usage.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,14 @@ void mmc_pwrseq_power_off(struct mmc_host *host)
|
|||
pwrseq->ops->power_off(host);
|
||||
}
|
||||
|
||||
void mmc_pwrseq_reset(struct mmc_host *host)
|
||||
{
|
||||
struct mmc_pwrseq *pwrseq = host->pwrseq;
|
||||
|
||||
if (pwrseq && pwrseq->ops->reset)
|
||||
pwrseq->ops->reset(host);
|
||||
}
|
||||
|
||||
void mmc_pwrseq_free(struct mmc_host *host)
|
||||
{
|
||||
struct mmc_pwrseq *pwrseq = host->pwrseq;
|
||||
|
|
|
@ -18,6 +18,7 @@ struct mmc_pwrseq_ops {
|
|||
void (*pre_power_on)(struct mmc_host *host);
|
||||
void (*post_power_on)(struct mmc_host *host);
|
||||
void (*power_off)(struct mmc_host *host);
|
||||
void (*reset)(struct mmc_host *host);
|
||||
};
|
||||
|
||||
struct mmc_pwrseq {
|
||||
|
@ -36,6 +37,7 @@ int mmc_pwrseq_alloc(struct mmc_host *host);
|
|||
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
|
||||
void mmc_pwrseq_post_power_on(struct mmc_host *host);
|
||||
void mmc_pwrseq_power_off(struct mmc_host *host);
|
||||
void mmc_pwrseq_reset(struct mmc_host *host);
|
||||
void mmc_pwrseq_free(struct mmc_host *host);
|
||||
|
||||
#else
|
||||
|
@ -49,6 +51,7 @@ static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
|
|||
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
|
||||
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
|
||||
static inline void mmc_pwrseq_power_off(struct mmc_host *host) {}
|
||||
static inline void mmc_pwrseq_reset(struct mmc_host *host) {}
|
||||
static inline void mmc_pwrseq_free(struct mmc_host *host) {}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -56,7 +56,7 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
|
|||
}
|
||||
|
||||
static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
|
||||
.post_power_on = mmc_pwrseq_emmc_reset,
|
||||
.reset = mmc_pwrseq_emmc_reset,
|
||||
};
|
||||
|
||||
static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -40,35 +40,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
|
|||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
|
||||
struct request *req)
|
||||
{
|
||||
struct mmc_queue_req *mqrq;
|
||||
int i = ffz(mq->qslots);
|
||||
|
||||
if (i >= mq->qdepth)
|
||||
return NULL;
|
||||
|
||||
mqrq = &mq->mqrq[i];
|
||||
WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
|
||||
test_bit(mqrq->task_id, &mq->qslots));
|
||||
mqrq->req = req;
|
||||
mq->qcnt += 1;
|
||||
__set_bit(mqrq->task_id, &mq->qslots);
|
||||
|
||||
return mqrq;
|
||||
}
|
||||
|
||||
void mmc_queue_req_free(struct mmc_queue *mq,
|
||||
struct mmc_queue_req *mqrq)
|
||||
{
|
||||
WARN_ON(!mqrq->req || mq->qcnt < 1 ||
|
||||
!test_bit(mqrq->task_id, &mq->qslots));
|
||||
mqrq->req = NULL;
|
||||
mq->qcnt -= 1;
|
||||
__clear_bit(mqrq->task_id, &mq->qslots);
|
||||
}
|
||||
|
||||
static int mmc_queue_thread(void *d)
|
||||
{
|
||||
struct mmc_queue *mq = d;
|
||||
|
@ -149,11 +120,11 @@ static void mmc_request_fn(struct request_queue *q)
|
|||
wake_up_process(mq->thread);
|
||||
}
|
||||
|
||||
static struct scatterlist *mmc_alloc_sg(int sg_len)
|
||||
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
|
||||
sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
|
||||
sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
|
||||
if (sg)
|
||||
sg_init_table(sg, sg_len);
|
||||
|
||||
|
@ -179,86 +150,11 @@ static void mmc_queue_setup_discard(struct request_queue *q,
|
|||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
|
||||
}
|
||||
|
||||
static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
|
||||
{
|
||||
kfree(mqrq->bounce_sg);
|
||||
mqrq->bounce_sg = NULL;
|
||||
|
||||
kfree(mqrq->sg);
|
||||
mqrq->sg = NULL;
|
||||
|
||||
kfree(mqrq->bounce_buf);
|
||||
mqrq->bounce_buf = NULL;
|
||||
}
|
||||
|
||||
static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qdepth; i++)
|
||||
mmc_queue_req_free_bufs(&mqrq[i]);
|
||||
}
|
||||
|
||||
static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
|
||||
{
|
||||
mmc_queue_reqs_free_bufs(mqrq, qdepth);
|
||||
kfree(mqrq);
|
||||
}
|
||||
|
||||
static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
|
||||
{
|
||||
struct mmc_queue_req *mqrq;
|
||||
int i;
|
||||
|
||||
mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
|
||||
if (mqrq) {
|
||||
for (i = 0; i < qdepth; i++)
|
||||
mqrq[i].task_id = i;
|
||||
}
|
||||
|
||||
return mqrq;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_BLOCK_BOUNCE
|
||||
static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
|
||||
unsigned int bouncesz)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qdepth; i++) {
|
||||
mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
||||
if (!mqrq[i].bounce_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
mqrq[i].sg = mmc_alloc_sg(1);
|
||||
if (!mqrq[i].sg)
|
||||
return -ENOMEM;
|
||||
|
||||
mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
|
||||
if (!mqrq[i].bounce_sg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
|
||||
unsigned int bouncesz)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
|
||||
if (ret)
|
||||
mmc_queue_reqs_free_bufs(mqrq, qdepth);
|
||||
|
||||
return !ret;
|
||||
}
|
||||
|
||||
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
||||
{
|
||||
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
|
||||
|
||||
if (host->max_segs != 1)
|
||||
if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
|
||||
return 0;
|
||||
|
||||
if (bouncesz > host->max_req_size)
|
||||
|
@ -273,84 +169,58 @@ static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
|||
|
||||
return bouncesz;
|
||||
}
|
||||
#else
|
||||
static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
|
||||
int qdepth, unsigned int bouncesz)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
||||
/**
|
||||
* mmc_init_request() - initialize the MMC-specific per-request data
|
||||
* @q: the request queue
|
||||
* @req: the request
|
||||
* @gfp: memory allocation policy
|
||||
*/
|
||||
static int mmc_init_request(struct request_queue *q, struct request *req,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
|
||||
struct mmc_queue *mq = q->queuedata;
|
||||
struct mmc_card *card = mq->card;
|
||||
struct mmc_host *host = card->host;
|
||||
|
||||
static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
|
||||
int max_segs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qdepth; i++) {
|
||||
mqrq[i].sg = mmc_alloc_sg(max_segs);
|
||||
if (!mqrq[i].sg)
|
||||
if (card->bouncesz) {
|
||||
mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
|
||||
if (!mq_rq->bounce_buf)
|
||||
return -ENOMEM;
|
||||
if (card->bouncesz > 512) {
|
||||
mq_rq->sg = mmc_alloc_sg(1, gfp);
|
||||
if (!mq_rq->sg)
|
||||
return -ENOMEM;
|
||||
mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
|
||||
gfp);
|
||||
if (!mq_rq->bounce_sg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
mq_rq->bounce_buf = NULL;
|
||||
mq_rq->bounce_sg = NULL;
|
||||
mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
|
||||
if (!mq_rq->sg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mmc_queue_free_shared_queue(struct mmc_card *card)
|
||||
static void mmc_exit_request(struct request_queue *q, struct request *req)
|
||||
{
|
||||
if (card->mqrq) {
|
||||
mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
|
||||
card->mqrq = NULL;
|
||||
}
|
||||
}
|
||||
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
|
||||
|
||||
static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
struct mmc_queue_req *mqrq;
|
||||
unsigned int bouncesz;
|
||||
int ret = 0;
|
||||
/* It is OK to kfree(NULL) so this will be smooth */
|
||||
kfree(mq_rq->bounce_sg);
|
||||
mq_rq->bounce_sg = NULL;
|
||||
|
||||
if (card->mqrq)
|
||||
return -EINVAL;
|
||||
kfree(mq_rq->bounce_buf);
|
||||
mq_rq->bounce_buf = NULL;
|
||||
|
||||
mqrq = mmc_queue_alloc_mqrqs(qdepth);
|
||||
if (!mqrq)
|
||||
return -ENOMEM;
|
||||
|
||||
card->mqrq = mqrq;
|
||||
card->qdepth = qdepth;
|
||||
|
||||
bouncesz = mmc_queue_calc_bouncesz(host);
|
||||
|
||||
if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
|
||||
bouncesz = 0;
|
||||
pr_warn("%s: unable to allocate bounce buffers\n",
|
||||
mmc_card_name(card));
|
||||
}
|
||||
|
||||
card->bouncesz = bouncesz;
|
||||
|
||||
if (!bouncesz) {
|
||||
ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_err:
|
||||
mmc_queue_free_shared_queue(card);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mmc_queue_alloc_shared_queue(struct mmc_card *card)
|
||||
{
|
||||
return __mmc_queue_alloc_shared_queue(card, 2);
|
||||
kfree(mq_rq->sg);
|
||||
mq_rq->sg = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -373,13 +243,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|||
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
|
||||
|
||||
mq->card = card;
|
||||
mq->queue = blk_init_queue(mmc_request_fn, lock);
|
||||
mq->queue = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!mq->queue)
|
||||
return -ENOMEM;
|
||||
|
||||
mq->mqrq = card->mqrq;
|
||||
mq->qdepth = card->qdepth;
|
||||
mq->queue->queue_lock = lock;
|
||||
mq->queue->request_fn = mmc_request_fn;
|
||||
mq->queue->init_rq_fn = mmc_init_request;
|
||||
mq->queue->exit_rq_fn = mmc_exit_request;
|
||||
mq->queue->cmd_size = sizeof(struct mmc_queue_req);
|
||||
mq->queue->queuedata = mq;
|
||||
mq->qcnt = 0;
|
||||
ret = blk_init_allocated_queue(mq->queue);
|
||||
if (ret) {
|
||||
blk_cleanup_queue(mq->queue);
|
||||
return ret;
|
||||
}
|
||||
|
||||
blk_queue_prep_rq(mq->queue, mmc_prep_request);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
|
||||
|
@ -387,6 +265,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|||
if (mmc_can_erase(card))
|
||||
mmc_queue_setup_discard(mq->queue, card);
|
||||
|
||||
card->bouncesz = mmc_queue_calc_bouncesz(host);
|
||||
if (card->bouncesz) {
|
||||
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
|
||||
blk_queue_max_segments(mq->queue, card->bouncesz / 512);
|
||||
|
@ -412,7 +291,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|||
return 0;
|
||||
|
||||
cleanup_queue:
|
||||
mq->mqrq = NULL;
|
||||
blk_cleanup_queue(mq->queue);
|
||||
return ret;
|
||||
}
|
||||
|
@ -434,7 +312,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|||
blk_start_queue(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
mq->mqrq = NULL;
|
||||
mq->card = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_cleanup_queue);
|
||||
|
@ -491,12 +368,13 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
|
|||
unsigned int sg_len;
|
||||
size_t buflen;
|
||||
struct scatterlist *sg;
|
||||
struct request *req = mmc_queue_req_to_req(mqrq);
|
||||
int i;
|
||||
|
||||
if (!mqrq->bounce_buf)
|
||||
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
|
||||
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
|
||||
|
||||
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
|
||||
sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
|
||||
|
||||
mqrq->bounce_sg_len = sg_len;
|
||||
|
||||
|
@ -518,7 +396,7 @@ void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
|
|||
if (!mqrq->bounce_buf)
|
||||
return;
|
||||
|
||||
if (rq_data_dir(mqrq->req) != WRITE)
|
||||
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
|
||||
return;
|
||||
|
||||
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
|
||||
|
@ -534,7 +412,7 @@ void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
|
|||
if (!mqrq->bounce_buf)
|
||||
return;
|
||||
|
||||
if (rq_data_dir(mqrq->req) != READ)
|
||||
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
|
||||
return;
|
||||
|
||||
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
|
||||
|
|
|
@ -3,19 +3,25 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include <linux/mmc/host.h>
|
||||
|
||||
static inline bool mmc_req_is_special(struct request *req)
|
||||
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
|
||||
{
|
||||
return req &&
|
||||
(req_op(req) == REQ_OP_FLUSH ||
|
||||
req_op(req) == REQ_OP_DISCARD ||
|
||||
req_op(req) == REQ_OP_SECURE_ERASE);
|
||||
return blk_mq_rq_to_pdu(rq);
|
||||
}
|
||||
|
||||
struct mmc_queue_req;
|
||||
|
||||
static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
|
||||
{
|
||||
return blk_mq_rq_from_pdu(mqr);
|
||||
}
|
||||
|
||||
struct task_struct;
|
||||
struct mmc_blk_data;
|
||||
struct mmc_blk_ioc_data;
|
||||
|
||||
struct mmc_blk_request {
|
||||
struct mmc_request mrq;
|
||||
|
@ -26,15 +32,27 @@ struct mmc_blk_request {
|
|||
int retune_retry_done;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
|
||||
* @MMC_DRV_OP_IOCTL: ioctl operation
|
||||
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
|
||||
*/
|
||||
enum mmc_drv_op {
|
||||
MMC_DRV_OP_IOCTL,
|
||||
MMC_DRV_OP_BOOT_WP,
|
||||
};
|
||||
|
||||
struct mmc_queue_req {
|
||||
struct request *req;
|
||||
struct mmc_blk_request brq;
|
||||
struct scatterlist *sg;
|
||||
char *bounce_buf;
|
||||
struct scatterlist *bounce_sg;
|
||||
unsigned int bounce_sg_len;
|
||||
struct mmc_async_req areq;
|
||||
int task_id;
|
||||
enum mmc_drv_op drv_op;
|
||||
int drv_op_result;
|
||||
struct mmc_blk_ioc_data **idata;
|
||||
unsigned int ioc_count;
|
||||
};
|
||||
|
||||
struct mmc_queue {
|
||||
|
@ -45,14 +63,15 @@ struct mmc_queue {
|
|||
bool asleep;
|
||||
struct mmc_blk_data *blkdata;
|
||||
struct request_queue *queue;
|
||||
struct mmc_queue_req *mqrq;
|
||||
int qdepth;
|
||||
/*
|
||||
* FIXME: this counter is not a very reliable way of keeping
|
||||
* track of how many requests that are ongoing. Switch to just
|
||||
* letting the block core keep track of requests and per-request
|
||||
* associated mmc_queue_req data.
|
||||
*/
|
||||
int qcnt;
|
||||
unsigned long qslots;
|
||||
};
|
||||
|
||||
extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
|
||||
extern void mmc_queue_free_shared_queue(struct mmc_card *card);
|
||||
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
||||
const char *);
|
||||
extern void mmc_cleanup_queue(struct mmc_queue *);
|
||||
|
@ -66,8 +85,4 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
|
|||
|
||||
extern int mmc_access_rpmb(struct mmc_queue *);
|
||||
|
||||
extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
|
||||
struct request *);
|
||||
extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -294,12 +294,8 @@ static int mmc_read_switch(struct mmc_card *card)
|
|||
err = -EIO;
|
||||
|
||||
status = kmalloc(64, GFP_KERNEL);
|
||||
if (!status) {
|
||||
pr_err("%s: could not allocate a buffer for "
|
||||
"switch capabilities.\n",
|
||||
mmc_hostname(card->host));
|
||||
if (!status)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find out the card's support bits with a mode 0 operation.
|
||||
|
@ -359,11 +355,8 @@ int mmc_sd_switch_hs(struct mmc_card *card)
|
|||
return 0;
|
||||
|
||||
status = kmalloc(64, GFP_KERNEL);
|
||||
if (!status) {
|
||||
pr_err("%s: could not allocate a buffer for "
|
||||
"switch capabilities.\n", mmc_hostname(card->host));
|
||||
if (!status)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = mmc_sd_switch(card, 1, 0, 1, status);
|
||||
if (err)
|
||||
|
@ -596,11 +589,8 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
|
|||
return 0;
|
||||
|
||||
status = kmalloc(64, GFP_KERNEL);
|
||||
if (!status) {
|
||||
pr_err("%s: could not allocate a buffer for "
|
||||
"switch capabilities.\n", mmc_hostname(card->host));
|
||||
if (!status)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Set 4-bit bus width */
|
||||
if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
|
||||
|
@ -798,11 +788,7 @@ try_again:
|
|||
}
|
||||
}
|
||||
|
||||
if (mmc_host_is_spi(host))
|
||||
err = mmc_send_cid(host, cid);
|
||||
else
|
||||
err = mmc_all_send_cid(host, cid);
|
||||
|
||||
err = mmc_send_cid(host, cid);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1103,6 +1103,12 @@ int mmc_attach_sdio(struct mmc_host *host)
|
|||
* Enable runtime PM only if supported by host+card+board
|
||||
*/
|
||||
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
|
||||
/*
|
||||
* Do not allow runtime suspend until after SDIO function
|
||||
* devices are added.
|
||||
*/
|
||||
pm_runtime_get_noresume(&card->dev);
|
||||
|
||||
/*
|
||||
* Let runtime PM core know our card is active
|
||||
*/
|
||||
|
@ -1155,19 +1161,23 @@ int mmc_attach_sdio(struct mmc_host *host)
|
|||
goto remove_added;
|
||||
}
|
||||
|
||||
if (host->caps & MMC_CAP_POWER_OFF_CARD)
|
||||
pm_runtime_put(&card->dev);
|
||||
|
||||
mmc_claim_host(host);
|
||||
return 0;
|
||||
|
||||
|
||||
remove_added:
|
||||
/* Remove without lock if the device has been added. */
|
||||
mmc_sdio_remove(host);
|
||||
mmc_claim_host(host);
|
||||
remove:
|
||||
/* And with lock if it hasn't been added. */
|
||||
mmc_release_host(host);
|
||||
if (host->card)
|
||||
mmc_sdio_remove(host);
|
||||
remove_added:
|
||||
/*
|
||||
* The devices are being deleted so it is not necessary to disable
|
||||
* runtime PM. Similarly we also don't pm_runtime_put() the SDIO card
|
||||
* because it needs to be active to remove any function devices that
|
||||
* were probed, and after that it gets deleted.
|
||||
*/
|
||||
mmc_sdio_remove(host);
|
||||
mmc_claim_host(host);
|
||||
err:
|
||||
mmc_detach_bus(host);
|
||||
|
|
|
@ -95,12 +95,30 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
|
|||
void sdio_run_irqs(struct mmc_host *host)
|
||||
{
|
||||
mmc_claim_host(host);
|
||||
host->sdio_irq_pending = true;
|
||||
process_sdio_pending_irqs(host);
|
||||
if (host->sdio_irqs) {
|
||||
host->sdio_irq_pending = true;
|
||||
process_sdio_pending_irqs(host);
|
||||
if (host->ops->ack_sdio_irq)
|
||||
host->ops->ack_sdio_irq(host);
|
||||
}
|
||||
mmc_release_host(host);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdio_run_irqs);
|
||||
|
||||
void sdio_irq_work(struct work_struct *work)
|
||||
{
|
||||
struct mmc_host *host =
|
||||
container_of(work, struct mmc_host, sdio_irq_work.work);
|
||||
|
||||
sdio_run_irqs(host);
|
||||
}
|
||||
|
||||
void sdio_signal_irq(struct mmc_host *host)
|
||||
{
|
||||
queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdio_signal_irq);
|
||||
|
||||
static int sdio_irq_thread(void *_host)
|
||||
{
|
||||
struct mmc_host *host = _host;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
struct mmc_host;
|
||||
struct mmc_card;
|
||||
struct work_struct;
|
||||
|
||||
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
|
||||
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
|
||||
|
@ -25,6 +26,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
|
|||
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
|
||||
int sdio_reset(struct mmc_host *host);
|
||||
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
|
||||
void sdio_irq_work(struct work_struct *work);
|
||||
|
||||
static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
|
||||
{
|
||||
|
|
|
@ -151,6 +151,8 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
|
|||
|
||||
if (irq < 0)
|
||||
host->caps |= MMC_CAP_NEEDS_POLL;
|
||||
else if ((host->caps & MMC_CAP_CD_WAKE) && !enable_irq_wake(irq))
|
||||
host->slot.cd_wake_enabled = true;
|
||||
}
|
||||
EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
|
||||
|
||||
|
|
|
@ -408,11 +408,11 @@ config MMC_AU1X
|
|||
|
||||
config MMC_ATMELMCI
|
||||
tristate "Atmel SD/MMC Driver (Multimedia Card Interface)"
|
||||
depends on AVR32 || ARCH_AT91
|
||||
depends on ARCH_AT91
|
||||
help
|
||||
This selects the Atmel Multimedia Card Interface driver. If
|
||||
you have an AT32 (AVR32) or AT91 platform with a Multimedia
|
||||
Card slot, say Y or M here.
|
||||
This selects the Atmel Multimedia Card Interface driver.
|
||||
If you have an AT91 platform with a Multimedia Card slot,
|
||||
say Y or M here.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
|
@ -571,13 +571,13 @@ config MMC_TMIO
|
|||
T7L66XB and also HTC ASIC3
|
||||
|
||||
config MMC_SDHI
|
||||
tristate "SH-Mobile SDHI SD/SDIO controller support"
|
||||
tristate "Renesas SDHI SD/SDIO controller support"
|
||||
depends on SUPERH || ARM || ARM64
|
||||
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
|
||||
select MMC_TMIO_CORE
|
||||
help
|
||||
This provides support for the SDHI SD/SDIO controller found in
|
||||
SuperH and ARM SH-Mobile SoCs
|
||||
Renesas SuperH, ARM and ARM64 based SoCs
|
||||
|
||||
config MMC_CB710
|
||||
tristate "ENE CB710 MMC/SD Interface support"
|
||||
|
|
|
@ -36,9 +36,7 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o
|
|||
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
|
||||
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
|
||||
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
|
||||
tmio_mmc_core-y := tmio_mmc_pio.o
|
||||
tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o
|
||||
obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
|
||||
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o renesas_sdhi_sys_dmac.o
|
||||
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
|
||||
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
|
||||
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#include <asm/unaligned.h>
|
||||
|
||||
/*
|
||||
* Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
|
||||
* Superset of MCI IP registers integrated in Atmel AT91 Processor
|
||||
* Registers and bitfields marked with [2] are only available in MCI2
|
||||
*/
|
||||
|
||||
|
@ -172,13 +172,6 @@
|
|||
#define atmci_writel(port, reg, value) \
|
||||
__raw_writel((value), (port)->regs + reg)
|
||||
|
||||
/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
|
||||
#ifdef CONFIG_AVR32
|
||||
# define ATMCI_PDC_CONNECTED 0
|
||||
#else
|
||||
# define ATMCI_PDC_CONNECTED 1
|
||||
#endif
|
||||
|
||||
#define AUTOSUSPEND_DELAY 50
|
||||
|
||||
#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
|
||||
|
@ -667,10 +660,8 @@ atmci_of_init(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for pdata\n");
|
||||
if (!pdata)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_child_of_node(np, cnp) {
|
||||
if (of_property_read_u32(cnp, "reg", &slot_id)) {
|
||||
|
@ -1549,21 +1540,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* TODO: None of the currently available AVR32-based
|
||||
* boards allow MMC power to be turned off. Implement
|
||||
* power control when this can be tested properly.
|
||||
*
|
||||
* We also need to hook this into the clock management
|
||||
* somehow so that newly inserted cards aren't
|
||||
* subjected to a fast clock before we have a chance
|
||||
* to figure out what the maximum rate is. Currently,
|
||||
* there's no way to avoid this, and there never will
|
||||
* be for boards that don't support power control.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int atmci_get_ro(struct mmc_host *mmc)
|
||||
|
@ -2464,7 +2442,7 @@ static void atmci_get_cap(struct atmel_mci *host)
|
|||
"version: 0x%x\n", version);
|
||||
|
||||
host->caps.has_dma_conf_reg = 0;
|
||||
host->caps.has_pdc = ATMCI_PDC_CONNECTED;
|
||||
host->caps.has_pdc = 1;
|
||||
host->caps.has_cfg_reg = 0;
|
||||
host->caps.has_cstor_reg = 0;
|
||||
host->caps.has_highspeed = 0;
|
||||
|
|
|
@ -1172,7 +1172,10 @@ static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
|
||||
dev_err(dev, "unsupported block size (%d bytes)\n",
|
||||
mrq->data->blksz);
|
||||
mrq->cmd->error = -EINVAL;
|
||||
|
||||
if (mrq->cmd)
|
||||
mrq->cmd->error = -EINVAL;
|
||||
|
||||
mmc_request_done(mmc, mrq);
|
||||
return;
|
||||
}
|
||||
|
@ -1194,7 +1197,10 @@ static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK,
|
||||
edm);
|
||||
bcm2835_dumpregs(host);
|
||||
mrq->cmd->error = -EILSEQ;
|
||||
|
||||
if (mrq->cmd)
|
||||
mrq->cmd->error = -EILSEQ;
|
||||
|
||||
bcm2835_finish_request(host);
|
||||
mutex_unlock(&host->mutex);
|
||||
return;
|
||||
|
@ -1207,7 +1213,7 @@ static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
if (!host->use_busy)
|
||||
bcm2835_finish_command(host);
|
||||
}
|
||||
} else if (bcm2835_send_command(host, mrq->cmd)) {
|
||||
} else if (mrq->cmd && bcm2835_send_command(host, mrq->cmd)) {
|
||||
if (host->data && host->dma_desc) {
|
||||
/* DMA transfer starts now, PIO starts after irq */
|
||||
bcm2835_start_dma(host);
|
||||
|
|
|
@ -1035,10 +1035,12 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
|
|||
* We only have a 3.3v supply, we cannot support any
|
||||
* of the UHS modes. We do support the high speed DDR
|
||||
* modes up to 52MHz.
|
||||
*
|
||||
* Disable bounce buffers for max_segs = 1
|
||||
*/
|
||||
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
||||
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
|
||||
MMC_CAP_3_3V_DDR;
|
||||
MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
|
||||
|
||||
if (host->use_sg)
|
||||
mmc->max_segs = 16;
|
||||
|
|
|
@ -157,8 +157,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
|
|||
* HOLD register should be bypassed in case there is no phase shift
|
||||
* applied on CMD/DATA that is sent to the card.
|
||||
*/
|
||||
if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->cur_slot)
|
||||
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
|
||||
if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->slot)
|
||||
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
|
|
@ -25,6 +25,7 @@ struct dw_mci_rockchip_priv_data {
|
|||
struct clk *drv_clk;
|
||||
struct clk *sample_clk;
|
||||
int default_sample_phase;
|
||||
int num_phases;
|
||||
};
|
||||
|
||||
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
|
||||
|
@ -133,8 +134,8 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
|
|||
}
|
||||
}
|
||||
|
||||
#define NUM_PHASES 360
|
||||
#define TUNING_ITERATION_TO_PHASE(i) (DIV_ROUND_UP((i) * 360, NUM_PHASES))
|
||||
#define TUNING_ITERATION_TO_PHASE(i, num_phases) \
|
||||
(DIV_ROUND_UP((i) * 360, num_phases))
|
||||
|
||||
static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
||||
{
|
||||
|
@ -159,13 +160,15 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
ranges = kmalloc_array(NUM_PHASES / 2 + 1, sizeof(*ranges), GFP_KERNEL);
|
||||
ranges = kmalloc_array(priv->num_phases / 2 + 1,
|
||||
sizeof(*ranges), GFP_KERNEL);
|
||||
if (!ranges)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Try each phase and extract good ranges */
|
||||
for (i = 0; i < NUM_PHASES; ) {
|
||||
clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i));
|
||||
for (i = 0; i < priv->num_phases; ) {
|
||||
clk_set_phase(priv->sample_clk,
|
||||
TUNING_ITERATION_TO_PHASE(i, priv->num_phases));
|
||||
|
||||
v = !mmc_send_tuning(mmc, opcode, NULL);
|
||||
|
||||
|
@ -179,7 +182,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||
if (v) {
|
||||
ranges[range_count-1].end = i;
|
||||
i++;
|
||||
} else if (i == NUM_PHASES - 1) {
|
||||
} else if (i == priv->num_phases - 1) {
|
||||
/* No extra skipping rules if we're at the end */
|
||||
i++;
|
||||
} else {
|
||||
|
@ -188,11 +191,11 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||
* one since testing bad phases is slow. Skip
|
||||
* 20 degrees.
|
||||
*/
|
||||
i += DIV_ROUND_UP(20 * NUM_PHASES, 360);
|
||||
i += DIV_ROUND_UP(20 * priv->num_phases, 360);
|
||||
|
||||
/* Always test the last one */
|
||||
if (i >= NUM_PHASES)
|
||||
i = NUM_PHASES - 1;
|
||||
if (i >= priv->num_phases)
|
||||
i = priv->num_phases - 1;
|
||||
}
|
||||
|
||||
prev_v = v;
|
||||
|
@ -210,7 +213,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||
range_count--;
|
||||
}
|
||||
|
||||
if (ranges[0].start == 0 && ranges[0].end == NUM_PHASES - 1) {
|
||||
if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) {
|
||||
clk_set_phase(priv->sample_clk, priv->default_sample_phase);
|
||||
dev_info(host->dev, "All phases work, using default phase %d.",
|
||||
priv->default_sample_phase);
|
||||
|
@ -222,7 +225,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||
int len = (ranges[i].end - ranges[i].start + 1);
|
||||
|
||||
if (len < 0)
|
||||
len += NUM_PHASES;
|
||||
len += priv->num_phases;
|
||||
|
||||
if (longest_range_len < len) {
|
||||
longest_range_len = len;
|
||||
|
@ -230,25 +233,30 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||
}
|
||||
|
||||
dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n",
|
||||
TUNING_ITERATION_TO_PHASE(ranges[i].start),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[i].end),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[i].start,
|
||||
priv->num_phases),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[i].end,
|
||||
priv->num_phases),
|
||||
len
|
||||
);
|
||||
}
|
||||
|
||||
dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n",
|
||||
TUNING_ITERATION_TO_PHASE(ranges[longest_range].start),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[longest_range].end),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[longest_range].start,
|
||||
priv->num_phases),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[longest_range].end,
|
||||
priv->num_phases),
|
||||
longest_range_len
|
||||
);
|
||||
|
||||
middle_phase = ranges[longest_range].start + longest_range_len / 2;
|
||||
middle_phase %= NUM_PHASES;
|
||||
middle_phase %= priv->num_phases;
|
||||
dev_info(host->dev, "Successfully tuned phase to %d\n",
|
||||
TUNING_ITERATION_TO_PHASE(middle_phase));
|
||||
TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases));
|
||||
|
||||
clk_set_phase(priv->sample_clk,
|
||||
TUNING_ITERATION_TO_PHASE(middle_phase));
|
||||
TUNING_ITERATION_TO_PHASE(middle_phase,
|
||||
priv->num_phases));
|
||||
|
||||
free:
|
||||
kfree(ranges);
|
||||
|
@ -264,6 +272,10 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
|
|||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (of_property_read_u32(np, "rockchip,desired-num-phases",
|
||||
&priv->num_phases))
|
||||
priv->num_phases = 360;
|
||||
|
||||
if (of_property_read_u32(np, "rockchip,default-sample-phase",
|
||||
&priv->default_sample_phase))
|
||||
priv->default_sample_phase = 0;
|
||||
|
|
|
@ -392,7 +392,7 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
|
|||
cmdr = stop->opcode | SDMMC_CMD_STOP |
|
||||
SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
|
||||
|
||||
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
|
||||
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
|
||||
cmdr |= SDMMC_CMD_USE_HOLD_REG;
|
||||
|
||||
return cmdr;
|
||||
|
@ -480,7 +480,7 @@ static void dw_mci_dmac_complete_dma(void *arg)
|
|||
if ((host->use_dma == TRANS_MODE_EDMAC) &&
|
||||
data && (data->flags & MMC_DATA_READ))
|
||||
/* Invalidate cache after read */
|
||||
dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
|
||||
dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
|
||||
data->sg,
|
||||
data->sg_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
@ -820,7 +820,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
|
|||
|
||||
/* Flush cache before write */
|
||||
if (host->data->flags & MMC_DATA_WRITE)
|
||||
dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
|
||||
dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
|
||||
sg_elems, DMA_TO_DEVICE);
|
||||
|
||||
dma_async_issue_pending(host->dms->ch);
|
||||
|
@ -1282,7 +1282,6 @@ static void __dw_mci_start_request(struct dw_mci *host,
|
|||
|
||||
mrq = slot->mrq;
|
||||
|
||||
host->cur_slot = slot;
|
||||
host->mrq = mrq;
|
||||
|
||||
host->pending_events = 0;
|
||||
|
@ -1621,16 +1620,10 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
|
|||
|
||||
if (card->type == MMC_TYPE_SDIO ||
|
||||
card->type == MMC_TYPE_SD_COMBO) {
|
||||
if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
|
||||
pm_runtime_get_noresume(mmc->parent);
|
||||
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
}
|
||||
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
clk_en_a = clk_en_a_old & ~clken_low_pwr;
|
||||
} else {
|
||||
if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
|
||||
pm_runtime_put_noidle(mmc->parent);
|
||||
clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
}
|
||||
clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
clk_en_a = clk_en_a_old | clken_low_pwr;
|
||||
}
|
||||
|
||||
|
@ -1642,9 +1635,8 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
|
|||
}
|
||||
}
|
||||
|
||||
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
|
||||
static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
|
||||
{
|
||||
struct dw_mci_slot *slot = mmc_priv(mmc);
|
||||
struct dw_mci *host = slot->host;
|
||||
unsigned long irqflags;
|
||||
u32 int_mask;
|
||||
|
@ -1662,6 +1654,27 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
|
|||
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
|
||||
{
|
||||
struct dw_mci_slot *slot = mmc_priv(mmc);
|
||||
struct dw_mci *host = slot->host;
|
||||
|
||||
__dw_mci_enable_sdio_irq(slot, enb);
|
||||
|
||||
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
|
||||
if (enb)
|
||||
pm_runtime_get_noresume(host->dev);
|
||||
else
|
||||
pm_runtime_put_noidle(host->dev);
|
||||
}
|
||||
|
||||
static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
|
||||
{
|
||||
struct dw_mci_slot *slot = mmc_priv(mmc);
|
||||
|
||||
__dw_mci_enable_sdio_irq(slot, 1);
|
||||
}
|
||||
|
||||
static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct dw_mci_slot *slot = mmc_priv(mmc);
|
||||
|
@ -1749,7 +1762,7 @@ static bool dw_mci_reset(struct dw_mci *host)
|
|||
|
||||
ciu_out:
|
||||
/* After a CTRL reset we need to have CIU set clock registers */
|
||||
mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
|
||||
mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1763,6 +1776,7 @@ static const struct mmc_host_ops dw_mci_ops = {
|
|||
.get_cd = dw_mci_get_cd,
|
||||
.hw_reset = dw_mci_hw_reset,
|
||||
.enable_sdio_irq = dw_mci_enable_sdio_irq,
|
||||
.ack_sdio_irq = dw_mci_ack_sdio_irq,
|
||||
.execute_tuning = dw_mci_execute_tuning,
|
||||
.card_busy = dw_mci_card_busy,
|
||||
.start_signal_voltage_switch = dw_mci_switch_voltage,
|
||||
|
@ -1775,11 +1789,11 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
|
|||
__acquires(&host->lock)
|
||||
{
|
||||
struct dw_mci_slot *slot;
|
||||
struct mmc_host *prev_mmc = host->cur_slot->mmc;
|
||||
struct mmc_host *prev_mmc = host->slot->mmc;
|
||||
|
||||
WARN_ON(host->cmd || host->data);
|
||||
|
||||
host->cur_slot->mrq = NULL;
|
||||
host->slot->mrq = NULL;
|
||||
host->mrq = NULL;
|
||||
if (!list_empty(&host->queue)) {
|
||||
slot = list_entry(host->queue.next,
|
||||
|
@ -1929,7 +1943,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
|
|||
err = dw_mci_command_complete(host, cmd);
|
||||
if (cmd == mrq->sbc && !err) {
|
||||
prev_state = state = STATE_SENDING_CMD;
|
||||
__dw_mci_start_request(host, host->cur_slot,
|
||||
__dw_mci_start_request(host, host->slot,
|
||||
mrq->cmd);
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -2548,26 +2562,19 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
|
|||
|
||||
static void dw_mci_handle_cd(struct dw_mci *host)
|
||||
{
|
||||
int i;
|
||||
struct dw_mci_slot *slot = host->slot;
|
||||
|
||||
for (i = 0; i < host->num_slots; i++) {
|
||||
struct dw_mci_slot *slot = host->slot[i];
|
||||
|
||||
if (!slot)
|
||||
continue;
|
||||
|
||||
if (slot->mmc->ops->card_event)
|
||||
slot->mmc->ops->card_event(slot->mmc);
|
||||
mmc_detect_change(slot->mmc,
|
||||
msecs_to_jiffies(host->pdata->detect_delay_ms));
|
||||
}
|
||||
if (slot->mmc->ops->card_event)
|
||||
slot->mmc->ops->card_event(slot->mmc);
|
||||
mmc_detect_change(slot->mmc,
|
||||
msecs_to_jiffies(host->pdata->detect_delay_ms));
|
||||
}
|
||||
|
||||
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct dw_mci *host = dev_id;
|
||||
u32 pending;
|
||||
int i;
|
||||
struct dw_mci_slot *slot = host->slot;
|
||||
|
||||
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
|
||||
|
||||
|
@ -2644,18 +2651,11 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
|||
dw_mci_handle_cd(host);
|
||||
}
|
||||
|
||||
/* Handle SDIO Interrupts */
|
||||
for (i = 0; i < host->num_slots; i++) {
|
||||
struct dw_mci_slot *slot = host->slot[i];
|
||||
|
||||
if (!slot)
|
||||
continue;
|
||||
|
||||
if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
|
||||
mci_writel(host, RINTSTS,
|
||||
SDMMC_INT_SDIO(slot->sdio_id));
|
||||
mmc_signal_sdio_irq(slot->mmc);
|
||||
}
|
||||
if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
|
||||
mci_writel(host, RINTSTS,
|
||||
SDMMC_INT_SDIO(slot->sdio_id));
|
||||
__dw_mci_enable_sdio_irq(slot, 0);
|
||||
sdio_signal_irq(slot->mmc);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2687,7 +2687,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
|
||||
static int dw_mci_init_slot(struct dw_mci *host)
|
||||
{
|
||||
struct mmc_host *mmc;
|
||||
struct dw_mci_slot *slot;
|
||||
|
@ -2700,15 +2700,15 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
|
|||
return -ENOMEM;
|
||||
|
||||
slot = mmc_priv(mmc);
|
||||
slot->id = id;
|
||||
slot->sdio_id = host->sdio_id0 + id;
|
||||
slot->id = 0;
|
||||
slot->sdio_id = host->sdio_id0 + slot->id;
|
||||
slot->mmc = mmc;
|
||||
slot->host = host;
|
||||
host->slot[id] = slot;
|
||||
host->slot = slot;
|
||||
|
||||
mmc->ops = &dw_mci_ops;
|
||||
if (of_property_read_u32_array(host->dev->of_node,
|
||||
"clock-freq-min-max", freq, 2)) {
|
||||
if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
|
||||
freq, 2)) {
|
||||
mmc->f_min = DW_MCI_FREQ_MIN;
|
||||
mmc->f_max = DW_MCI_FREQ_MAX;
|
||||
} else {
|
||||
|
@ -2755,6 +2755,10 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
|
|||
if (ret)
|
||||
goto err_host_allocated;
|
||||
|
||||
/* Process SDIO IRQs through the sdio_irq_work. */
|
||||
if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
||||
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
||||
|
||||
/* Useful defaults if platform data is unset. */
|
||||
if (host->use_dma == TRANS_MODE_IDMAC) {
|
||||
mmc->max_segs = host->ring_size;
|
||||
|
@ -2796,11 +2800,11 @@ err_host_allocated:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
|
||||
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
|
||||
{
|
||||
/* Debugfs stuff is cleaned up by mmc core */
|
||||
mmc_remove_host(slot->mmc);
|
||||
slot->host->slot[id] = NULL;
|
||||
slot->host->slot = NULL;
|
||||
mmc_free_host(slot->mmc);
|
||||
}
|
||||
|
||||
|
@ -2808,7 +2812,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
|
|||
{
|
||||
int addr_config;
|
||||
struct device *dev = host->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
|
||||
/*
|
||||
* Check tansfer mode from HCON[17:16]
|
||||
|
@ -2869,8 +2872,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
|
|||
dev_info(host->dev, "Using internal DMA controller.\n");
|
||||
} else {
|
||||
/* TRANS_MODE_EDMAC: check dma bindings again */
|
||||
if ((of_property_count_strings(np, "dma-names") < 0) ||
|
||||
(!of_find_property(np, "dmas", NULL))) {
|
||||
if ((device_property_read_string_array(dev, "dma-names",
|
||||
NULL, 0) < 0) ||
|
||||
!device_property_present(dev, "dmas")) {
|
||||
goto no_dma;
|
||||
}
|
||||
host->dma_ops = &dw_mci_edmac_ops;
|
||||
|
@ -2937,7 +2941,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
|
|||
{
|
||||
struct dw_mci_board *pdata;
|
||||
struct device *dev = host->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
||||
int ret;
|
||||
u32 clock_frequency;
|
||||
|
@ -2954,20 +2957,22 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
|
|||
}
|
||||
|
||||
/* find out number of slots supported */
|
||||
of_property_read_u32(np, "num-slots", &pdata->num_slots);
|
||||
if (device_property_read_u32(dev, "num-slots", &pdata->num_slots))
|
||||
dev_info(dev, "'num-slots' was deprecated.\n");
|
||||
|
||||
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
|
||||
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
|
||||
dev_info(dev,
|
||||
"fifo-depth property not found, using value of FIFOTH register as default\n");
|
||||
|
||||
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
|
||||
device_property_read_u32(dev, "card-detect-delay",
|
||||
&pdata->detect_delay_ms);
|
||||
|
||||
of_property_read_u32(np, "data-addr", &host->data_addr_override);
|
||||
device_property_read_u32(dev, "data-addr", &host->data_addr_override);
|
||||
|
||||
if (of_get_property(np, "fifo-watermark-aligned", NULL))
|
||||
if (device_property_present(dev, "fifo-watermark-aligned"))
|
||||
host->wm_aligned = true;
|
||||
|
||||
if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
|
||||
if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
|
||||
pdata->bus_hz = clock_frequency;
|
||||
|
||||
if (drv_data && drv_data->parse_dt) {
|
||||
|
@ -2990,29 +2995,21 @@ static void dw_mci_enable_cd(struct dw_mci *host)
|
|||
{
|
||||
unsigned long irqflags;
|
||||
u32 temp;
|
||||
int i;
|
||||
struct dw_mci_slot *slot;
|
||||
|
||||
/*
|
||||
* No need for CD if all slots have a non-error GPIO
|
||||
* as well as broken card detection is found.
|
||||
*/
|
||||
for (i = 0; i < host->num_slots; i++) {
|
||||
slot = host->slot[i];
|
||||
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
|
||||
return;
|
||||
|
||||
if (mmc_gpio_get_cd(slot->mmc) < 0)
|
||||
break;
|
||||
}
|
||||
if (i == host->num_slots)
|
||||
if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&host->irq_lock, irqflags);
|
||||
temp = mci_readl(host, INTMASK);
|
||||
temp |= SDMMC_INT_CD;
|
||||
mci_writel(host, INTMASK, temp);
|
||||
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
||||
if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
|
||||
spin_lock_irqsave(&host->irq_lock, irqflags);
|
||||
temp = mci_readl(host, INTMASK);
|
||||
temp |= SDMMC_INT_CD;
|
||||
mci_writel(host, INTMASK, temp);
|
||||
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
||||
int dw_mci_probe(struct dw_mci *host)
|
||||
|
@ -3020,7 +3017,6 @@ int dw_mci_probe(struct dw_mci *host)
|
|||
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
||||
int width, i, ret = 0;
|
||||
u32 fifo_size;
|
||||
int init_slots = 0;
|
||||
|
||||
if (!host->pdata) {
|
||||
host->pdata = dw_mci_parse_dt(host);
|
||||
|
@ -3183,19 +3179,6 @@ int dw_mci_probe(struct dw_mci *host)
|
|||
if (ret)
|
||||
goto err_dmaunmap;
|
||||
|
||||
if (host->pdata->num_slots)
|
||||
host->num_slots = host->pdata->num_slots;
|
||||
else
|
||||
host->num_slots = 1;
|
||||
|
||||
if (host->num_slots < 1 ||
|
||||
host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
|
||||
dev_err(host->dev,
|
||||
"Platform data must supply correct num_slots.\n");
|
||||
ret = -ENODEV;
|
||||
goto err_clk_ciu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable interrupts for command done, data over, data empty,
|
||||
* receive ready and error such as transmit, receive timeout, crc error
|
||||
|
@ -3211,20 +3194,9 @@ int dw_mci_probe(struct dw_mci *host)
|
|||
host->irq, width, fifo_size);
|
||||
|
||||
/* We need at least one slot to succeed */
|
||||
for (i = 0; i < host->num_slots; i++) {
|
||||
ret = dw_mci_init_slot(host, i);
|
||||
if (ret)
|
||||
dev_dbg(host->dev, "slot %d init failed\n", i);
|
||||
else
|
||||
init_slots++;
|
||||
}
|
||||
|
||||
if (init_slots) {
|
||||
dev_info(host->dev, "%d slots initialized\n", init_slots);
|
||||
} else {
|
||||
dev_dbg(host->dev,
|
||||
"attempted to initialize %d slots, but failed on all\n",
|
||||
host->num_slots);
|
||||
ret = dw_mci_init_slot(host);
|
||||
if (ret) {
|
||||
dev_dbg(host->dev, "slot %d init failed\n", i);
|
||||
goto err_dmaunmap;
|
||||
}
|
||||
|
||||
|
@ -3252,13 +3224,9 @@ EXPORT_SYMBOL(dw_mci_probe);
|
|||
|
||||
void dw_mci_remove(struct dw_mci *host)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < host->num_slots; i++) {
|
||||
dev_dbg(host->dev, "remove slot %d\n", i);
|
||||
if (host->slot[i])
|
||||
dw_mci_cleanup_slot(host->slot[i], i);
|
||||
}
|
||||
dev_dbg(host->dev, "remove slot\n");
|
||||
if (host->slot)
|
||||
dw_mci_cleanup_slot(host->slot);
|
||||
|
||||
mci_writel(host, RINTSTS, 0xFFFFFFFF);
|
||||
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
|
||||
|
@ -3290,9 +3258,9 @@ int dw_mci_runtime_suspend(struct device *dev)
|
|||
|
||||
clk_disable_unprepare(host->ciu_clk);
|
||||
|
||||
if (host->cur_slot &&
|
||||
(mmc_can_gpio_cd(host->cur_slot->mmc) ||
|
||||
!mmc_card_is_removable(host->cur_slot->mmc)))
|
||||
if (host->slot &&
|
||||
(mmc_can_gpio_cd(host->slot->mmc) ||
|
||||
!mmc_card_is_removable(host->slot->mmc)))
|
||||
clk_disable_unprepare(host->biu_clk);
|
||||
|
||||
return 0;
|
||||
|
@ -3301,12 +3269,12 @@ EXPORT_SYMBOL(dw_mci_runtime_suspend);
|
|||
|
||||
int dw_mci_runtime_resume(struct device *dev)
|
||||
{
|
||||
int i, ret = 0;
|
||||
int ret = 0;
|
||||
struct dw_mci *host = dev_get_drvdata(dev);
|
||||
|
||||
if (host->cur_slot &&
|
||||
(mmc_can_gpio_cd(host->cur_slot->mmc) ||
|
||||
!mmc_card_is_removable(host->cur_slot->mmc))) {
|
||||
if (host->slot &&
|
||||
(mmc_can_gpio_cd(host->slot->mmc) ||
|
||||
!mmc_card_is_removable(host->slot->mmc))) {
|
||||
ret = clk_prepare_enable(host->biu_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -3341,17 +3309,12 @@ int dw_mci_runtime_resume(struct device *dev)
|
|||
DW_MCI_ERROR_FLAGS);
|
||||
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
|
||||
|
||||
for (i = 0; i < host->num_slots; i++) {
|
||||
struct dw_mci_slot *slot = host->slot[i];
|
||||
|
||||
if (!slot)
|
||||
continue;
|
||||
if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
|
||||
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
|
||||
if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
|
||||
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
|
||||
|
||||
/* Force setup bus to guarantee available clock output */
|
||||
dw_mci_setup_bus(slot, true);
|
||||
}
|
||||
/* Force setup bus to guarantee available clock output */
|
||||
dw_mci_setup_bus(host->slot, true);
|
||||
|
||||
/* Now that slots are all setup, we can enable card detect */
|
||||
dw_mci_enable_cd(host);
|
||||
|
@ -3359,9 +3322,9 @@ int dw_mci_runtime_resume(struct device *dev)
|
|||
return 0;
|
||||
|
||||
err:
|
||||
if (host->cur_slot &&
|
||||
(mmc_can_gpio_cd(host->cur_slot->mmc) ||
|
||||
!mmc_card_is_removable(host->cur_slot->mmc)))
|
||||
if (host->slot &&
|
||||
(mmc_can_gpio_cd(host->slot->mmc) ||
|
||||
!mmc_card_is_removable(host->slot->mmc)))
|
||||
clk_disable_unprepare(host->biu_clk);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
#include <linux/reset.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#define MAX_MCI_SLOTS 2
|
||||
|
||||
enum dw_mci_state {
|
||||
STATE_IDLE = 0,
|
||||
STATE_SENDING_CMD,
|
||||
|
@ -134,7 +132,6 @@ struct dw_mci_dma_slave {
|
|||
* =======
|
||||
*
|
||||
* @lock is a softirq-safe spinlock protecting @queue as well as
|
||||
* @cur_slot, @mrq and @state. These must always be updated
|
||||
* at the same time while holding @lock.
|
||||
*
|
||||
* @irq_lock is an irq-safe spinlock protecting the INTMASK register
|
||||
|
@ -170,7 +167,6 @@ struct dw_mci {
|
|||
struct scatterlist *sg;
|
||||
struct sg_mapping_iter sg_miter;
|
||||
|
||||
struct dw_mci_slot *cur_slot;
|
||||
struct mmc_request *mrq;
|
||||
struct mmc_command *cmd;
|
||||
struct mmc_data *data;
|
||||
|
@ -206,7 +202,6 @@ struct dw_mci {
|
|||
|
||||
u32 bus_hz;
|
||||
u32 current_speed;
|
||||
u32 num_slots;
|
||||
u32 fifoth_val;
|
||||
u16 verid;
|
||||
struct device *dev;
|
||||
|
@ -215,7 +210,7 @@ struct dw_mci {
|
|||
void *priv;
|
||||
struct clk *biu_clk;
|
||||
struct clk *ciu_clk;
|
||||
struct dw_mci_slot *slot[MAX_MCI_SLOTS];
|
||||
struct dw_mci_slot *slot;
|
||||
|
||||
/* FIFO push and pull */
|
||||
int fifo_depth;
|
||||
|
|
|
@ -1774,7 +1774,7 @@ static int msdc_drv_remove(struct platform_device *pdev)
|
|||
pm_runtime_disable(host->dev);
|
||||
pm_runtime_put_noidle(host->dev);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(struct mt_gpdma_desc),
|
||||
2 * sizeof(struct mt_gpdma_desc),
|
||||
host->dma.gpd, host->dma.gpd_addr);
|
||||
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
|
||||
host->dma.bd, host->dma.bd_addr);
|
||||
|
|
|
@ -250,14 +250,14 @@ static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
|
|||
struct omap_hsmmc_host *host = mmc_priv(mmc);
|
||||
struct mmc_ios *ios = &mmc->ios;
|
||||
|
||||
if (mmc->supply.vmmc) {
|
||||
if (!IS_ERR(mmc->supply.vmmc)) {
|
||||
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable interface voltage rail, if needed */
|
||||
if (mmc->supply.vqmmc && !host->vqmmc_enabled) {
|
||||
if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
|
||||
ret = regulator_enable(mmc->supply.vqmmc);
|
||||
if (ret) {
|
||||
dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
|
||||
|
@ -269,7 +269,7 @@ static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
|
|||
return 0;
|
||||
|
||||
err_vqmmc:
|
||||
if (mmc->supply.vmmc)
|
||||
if (!IS_ERR(mmc->supply.vmmc))
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
||||
|
||||
return ret;
|
||||
|
@ -281,7 +281,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
|
|||
int status;
|
||||
struct omap_hsmmc_host *host = mmc_priv(mmc);
|
||||
|
||||
if (mmc->supply.vqmmc && host->vqmmc_enabled) {
|
||||
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
|
||||
ret = regulator_disable(mmc->supply.vqmmc);
|
||||
if (ret) {
|
||||
dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
|
||||
|
@ -290,7 +290,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
|
|||
host->vqmmc_enabled = 0;
|
||||
}
|
||||
|
||||
if (mmc->supply.vmmc) {
|
||||
if (!IS_ERR(mmc->supply.vmmc)) {
|
||||
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
||||
if (ret)
|
||||
goto err_set_ocr;
|
||||
|
@ -299,7 +299,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
|
|||
return 0;
|
||||
|
||||
err_set_ocr:
|
||||
if (mmc->supply.vqmmc) {
|
||||
if (!IS_ERR(mmc->supply.vqmmc)) {
|
||||
status = regulator_enable(mmc->supply.vqmmc);
|
||||
if (status)
|
||||
dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
|
||||
|
@ -313,7 +313,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!host->pbias)
|
||||
if (IS_ERR(host->pbias))
|
||||
return 0;
|
||||
|
||||
if (power_on) {
|
||||
|
@ -363,7 +363,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
|
|||
* If we don't see a Vcc regulator, assume it's a fixed
|
||||
* voltage always-on regulator.
|
||||
*/
|
||||
if (!mmc->supply.vmmc)
|
||||
if (IS_ERR(mmc->supply.vmmc))
|
||||
return 0;
|
||||
|
||||
if (mmc_pdata(host)->before_set_reg)
|
||||
|
@ -415,7 +415,7 @@ static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!reg)
|
||||
if (IS_ERR(reg))
|
||||
return 0;
|
||||
|
||||
if (regulator_is_enabled(reg)) {
|
||||
|
@ -466,36 +466,27 @@ static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
|
|||
|
||||
static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
|
||||
{
|
||||
int ocr_value = 0;
|
||||
int ret;
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
if (mmc_pdata(host)->set_power)
|
||||
return 0;
|
||||
|
||||
mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
|
||||
if (IS_ERR(mmc->supply.vmmc)) {
|
||||
ret = PTR_ERR(mmc->supply.vmmc);
|
||||
if ((ret != -ENODEV) && host->dev->of_node)
|
||||
return ret;
|
||||
dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
|
||||
PTR_ERR(mmc->supply.vmmc));
|
||||
mmc->supply.vmmc = NULL;
|
||||
} else {
|
||||
ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
|
||||
if (ocr_value > 0)
|
||||
mmc_pdata(host)->ocr_mask = ocr_value;
|
||||
}
|
||||
ret = mmc_regulator_get_supply(mmc);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return ret;
|
||||
|
||||
/* Allow an aux regulator */
|
||||
mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
|
||||
if (IS_ERR(mmc->supply.vqmmc)) {
|
||||
ret = PTR_ERR(mmc->supply.vqmmc);
|
||||
if ((ret != -ENODEV) && host->dev->of_node)
|
||||
return ret;
|
||||
dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
|
||||
PTR_ERR(mmc->supply.vqmmc));
|
||||
mmc->supply.vqmmc = NULL;
|
||||
mmc->supply.vqmmc = devm_regulator_get_optional(host->dev,
|
||||
"vmmc_aux");
|
||||
if (IS_ERR(mmc->supply.vqmmc)) {
|
||||
ret = PTR_ERR(mmc->supply.vqmmc);
|
||||
if ((ret != -ENODEV) && host->dev->of_node)
|
||||
return ret;
|
||||
dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
|
||||
PTR_ERR(mmc->supply.vqmmc));
|
||||
}
|
||||
}
|
||||
|
||||
host->pbias = devm_regulator_get_optional(host->dev, "pbias");
|
||||
|
@ -508,7 +499,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
|
|||
}
|
||||
dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
|
||||
PTR_ERR(host->pbias));
|
||||
host->pbias = NULL;
|
||||
}
|
||||
|
||||
/* For eMMC do not power off when not in sleep state */
|
||||
|
@ -2146,7 +2136,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto err_irq;
|
||||
|
||||
mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
|
||||
if (!mmc->ocr_avail)
|
||||
mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
|
||||
|
||||
omap_hsmmc_disable_irq(host);
|
||||
|
||||
|
|
|
@ -702,7 +702,11 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||
|
||||
pxamci_init_ocr(host);
|
||||
|
||||
mmc->caps = 0;
|
||||
/*
|
||||
* This architecture used to disable bounce buffers through its
|
||||
* defconfig, now it is done at runtime as a host property.
|
||||
*/
|
||||
mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
|
||||
host->cmdat = 0;
|
||||
if (!cpu_is_pxa25x()) {
|
||||
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Renesas Mobile SDHI
|
||||
*
|
||||
* Copyright (C) 2017 Horms Solutions Ltd., Simon Horman
|
||||
* Copyright (C) 2017 Renesas Electronics Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef RENESAS_SDHI_H
|
||||
#define RENESAS_SDHI_H
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include "tmio_mmc.h"
|
||||
|
||||
struct renesas_sdhi_scc {
|
||||
unsigned long clk_rate; /* clock rate for SDR104 */
|
||||
u32 tap; /* sampling clock position for SDR104 */
|
||||
};
|
||||
|
||||
struct renesas_sdhi_of_data {
|
||||
unsigned long tmio_flags;
|
||||
u32 tmio_ocr_mask;
|
||||
unsigned long capabilities;
|
||||
unsigned long capabilities2;
|
||||
enum dma_slave_buswidth dma_buswidth;
|
||||
dma_addr_t dma_rx_offset;
|
||||
unsigned int bus_shift;
|
||||
int scc_offset;
|
||||
struct renesas_sdhi_scc *taps;
|
||||
int taps_num;
|
||||
};
|
||||
|
||||
int renesas_sdhi_probe(struct platform_device *pdev,
|
||||
const struct tmio_mmc_dma_ops *dma_ops);
|
||||
int renesas_sdhi_remove(struct platform_device *pdev);
|
||||
#endif
|
|
@ -1,8 +1,9 @@
|
|||
/*
|
||||
* SuperH Mobile SDHI
|
||||
* Renesas SDHI
|
||||
*
|
||||
* Copyright (C) 2016 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2015-16 Renesas Electronics Corporation
|
||||
* Copyright (C) 2015-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2009 Magnus Damm
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -23,8 +24,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mmc/host.h>
|
||||
|
@ -35,6 +34,7 @@
|
|||
#include <linux/pinctrl/pinctrl-state.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include "renesas_sdhi.h"
|
||||
#include "tmio_mmc.h"
|
||||
|
||||
#define EXT_ACC 0xe4
|
||||
|
@ -45,103 +45,10 @@
|
|||
#define SDHI_VER_GEN3_SD 0xcc10
|
||||
#define SDHI_VER_GEN3_SDMMC 0xcd10
|
||||
|
||||
#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
|
||||
#define host_to_priv(host) \
|
||||
container_of((host)->pdata, struct renesas_sdhi, mmc_data)
|
||||
|
||||
struct sh_mobile_sdhi_scc {
|
||||
unsigned long clk_rate; /* clock rate for SDR104 */
|
||||
u32 tap; /* sampling clock position for SDR104 */
|
||||
};
|
||||
|
||||
struct sh_mobile_sdhi_of_data {
|
||||
unsigned long tmio_flags;
|
||||
u32 tmio_ocr_mask;
|
||||
unsigned long capabilities;
|
||||
unsigned long capabilities2;
|
||||
enum dma_slave_buswidth dma_buswidth;
|
||||
dma_addr_t dma_rx_offset;
|
||||
unsigned bus_shift;
|
||||
int scc_offset;
|
||||
struct sh_mobile_sdhi_scc *taps;
|
||||
int taps_num;
|
||||
};
|
||||
|
||||
static const struct sh_mobile_sdhi_of_data of_default_cfg = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
|
||||
};
|
||||
|
||||
static const struct sh_mobile_sdhi_of_data of_rz_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
|
||||
.tmio_ocr_mask = MMC_VDD_32_33,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||
};
|
||||
|
||||
static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
||||
TMIO_MMC_CLK_ACTUAL,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||
};
|
||||
|
||||
/* Definitions for sampling clocks */
|
||||
static struct sh_mobile_sdhi_scc rcar_gen2_scc_taps[] = {
|
||||
{
|
||||
.clk_rate = 156000000,
|
||||
.tap = 0x00000703,
|
||||
},
|
||||
{
|
||||
.clk_rate = 0,
|
||||
.tap = 0x00000300,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
||||
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
||||
.dma_rx_offset = 0x2000,
|
||||
.scc_offset = 0x0300,
|
||||
.taps = rcar_gen2_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
|
||||
};
|
||||
|
||||
/* Definitions for sampling clocks */
|
||||
static struct sh_mobile_sdhi_scc rcar_gen3_scc_taps[] = {
|
||||
{
|
||||
.clk_rate = 0,
|
||||
.tap = 0x00000300,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
||||
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||
.bus_shift = 2,
|
||||
.scc_offset = 0x1000,
|
||||
.taps = rcar_gen3_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
|
||||
};
|
||||
|
||||
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
|
||||
{ .compatible = "renesas,sdhi-shmobile" },
|
||||
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
|
||||
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
|
||||
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
|
||||
{ .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
|
||||
|
||||
struct sh_mobile_sdhi {
|
||||
struct renesas_sdhi {
|
||||
struct clk *clk;
|
||||
struct clk *clk_cd;
|
||||
struct tmio_mmc_data mmc_data;
|
||||
|
@ -151,13 +58,13 @@ struct sh_mobile_sdhi {
|
|||
void __iomem *scc_ctl;
|
||||
};
|
||||
|
||||
static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
|
||||
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* see also
|
||||
* sh_mobile_sdhi_of_data :: dma_buswidth
|
||||
* renesas_sdhi_of_data :: dma_buswidth
|
||||
*/
|
||||
switch (sd_ctrl_read16(host, CTL_VERSION)) {
|
||||
case SDHI_VER_GEN2_SDR50:
|
||||
|
@ -183,11 +90,12 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
|
|||
sd_ctrl_write16(host, EXT_ACC, val);
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
|
||||
static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
int ret = clk_prepare_enable(priv->clk);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -213,19 +121,19 @@ static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
|
|||
mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L);
|
||||
|
||||
/* enable 16bit data access on SDBUF as default */
|
||||
sh_mobile_sdhi_sdbuf_width(host, 16);
|
||||
renesas_sdhi_sdbuf_width(host, 16);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
|
||||
unsigned int new_clock)
|
||||
static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
|
||||
unsigned int new_clock)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
|
||||
int i, ret;
|
||||
|
||||
/* tested only on RCar Gen2+ currently; may work for others */
|
||||
/* tested only on R-Car Gen2+ currently; may work for others */
|
||||
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
|
||||
return clk_get_rate(priv->clk);
|
||||
|
||||
|
@ -257,26 +165,27 @@ static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
|
|||
return ret == 0 ? best_freq : clk_get_rate(priv->clk);
|
||||
}
|
||||
|
||||
static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk_cd);
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_card_busy(struct mmc_host *mmc)
|
||||
static int renesas_sdhi_card_busy(struct mmc_host *mmc)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
|
||||
return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
|
||||
TMIO_STAT_DAT0);
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
|
||||
struct mmc_ios *ios)
|
||||
static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
struct pinctrl_state *pin_state;
|
||||
int ret;
|
||||
|
||||
|
@ -327,21 +236,21 @@ static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
|
|||
#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
|
||||
|
||||
static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
|
||||
struct sh_mobile_sdhi *priv, int addr)
|
||||
struct renesas_sdhi *priv, int addr)
|
||||
{
|
||||
return readl(priv->scc_ctl + (addr << host->bus_shift));
|
||||
}
|
||||
|
||||
static inline void sd_scc_write32(struct tmio_mmc_host *host,
|
||||
struct sh_mobile_sdhi *priv,
|
||||
struct renesas_sdhi *priv,
|
||||
int addr, u32 val)
|
||||
{
|
||||
writel(val, priv->scc_ctl + (addr << host->bus_shift));
|
||||
}
|
||||
|
||||
static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
|
||||
static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv;
|
||||
struct renesas_sdhi *priv;
|
||||
|
||||
priv = host_to_priv(host);
|
||||
|
||||
|
@ -378,10 +287,10 @@ static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
|
|||
SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
|
||||
}
|
||||
|
||||
static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
|
||||
unsigned long tap)
|
||||
static void renesas_sdhi_prepare_tuning(struct tmio_mmc_host *host,
|
||||
unsigned long tap)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
/* Set sampling clock position */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
|
||||
|
@ -389,9 +298,9 @@ static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
|
|||
|
||||
#define SH_MOBILE_SDHI_MAX_TAP 3
|
||||
|
||||
static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
|
||||
static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
unsigned long tap_cnt; /* counter of tuning success */
|
||||
unsigned long tap_set; /* tap position */
|
||||
unsigned long tap_start;/* start position of tuning success */
|
||||
|
@ -412,9 +321,9 @@ static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
tap_start = 0;
|
||||
tap_end = 0;
|
||||
for (i = 0; i < host->tap_num * 2; i++) {
|
||||
if (test_bit(i, host->taps))
|
||||
if (test_bit(i, host->taps)) {
|
||||
ntap++;
|
||||
else {
|
||||
} else {
|
||||
if (ntap > tap_cnt) {
|
||||
tap_start = i - ntap;
|
||||
tap_end = i - 1;
|
||||
|
@ -446,10 +355,9 @@ static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
|
||||
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv = host_to_priv(host);
|
||||
struct renesas_sdhi *priv = host_to_priv(host);
|
||||
|
||||
/* Check SCC error */
|
||||
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
|
||||
|
@ -464,9 +372,9 @@ static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct sh_mobile_sdhi *priv;
|
||||
struct renesas_sdhi *priv;
|
||||
|
||||
priv = host_to_priv(host);
|
||||
|
||||
|
@ -490,7 +398,7 @@ static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
|
|||
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
|
||||
static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host)
|
||||
{
|
||||
int timeout = 1000;
|
||||
|
||||
|
@ -506,10 +414,9 @@ static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
|
||||
static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
|
||||
{
|
||||
switch (addr)
|
||||
{
|
||||
switch (addr) {
|
||||
case CTL_SD_CMD:
|
||||
case CTL_STOP_INTERNAL_ACTION:
|
||||
case CTL_XFER_BLK_COUNT:
|
||||
|
@ -519,14 +426,14 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
|
|||
case CTL_TRANSACTION_CTL:
|
||||
case CTL_DMA_ENABLE:
|
||||
case EXT_ACC:
|
||||
return sh_mobile_sdhi_wait_idle(host);
|
||||
return renesas_sdhi_wait_idle(host);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
|
||||
unsigned int direction, int blk_size)
|
||||
static int renesas_sdhi_multi_io_quirk(struct mmc_card *card,
|
||||
unsigned int direction, int blk_size)
|
||||
{
|
||||
/*
|
||||
* In Renesas controllers, when performing a
|
||||
|
@ -543,30 +450,34 @@ static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
|
|||
return blk_size;
|
||||
}
|
||||
|
||||
static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
|
||||
static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
|
||||
{
|
||||
sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
|
||||
|
||||
/* enable 32bit access if DMA mode if possibile */
|
||||
sh_mobile_sdhi_sdbuf_width(host, enable ? 32 : 16);
|
||||
renesas_sdhi_sdbuf_width(host, enable ? 32 : 16);
|
||||
}
|
||||
|
||||
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
||||
int renesas_sdhi_probe(struct platform_device *pdev,
|
||||
const struct tmio_mmc_dma_ops *dma_ops)
|
||||
{
|
||||
const struct sh_mobile_sdhi_of_data *of_data = of_device_get_match_data(&pdev->dev);
|
||||
struct sh_mobile_sdhi *priv;
|
||||
struct tmio_mmc_data *mmc_data;
|
||||
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
|
||||
const struct renesas_sdhi_of_data *of_data;
|
||||
struct tmio_mmc_data *mmc_data;
|
||||
struct tmio_mmc_dma *dma_priv;
|
||||
struct tmio_mmc_host *host;
|
||||
struct renesas_sdhi *priv;
|
||||
struct resource *res;
|
||||
int irq, ret, i;
|
||||
struct tmio_mmc_dma *dma_priv;
|
||||
|
||||
of_data = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(struct renesas_sdhi),
|
||||
GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -609,7 +520,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
goto eprobe;
|
||||
}
|
||||
|
||||
|
||||
if (of_data) {
|
||||
mmc_data->flags |= of_data->tmio_flags;
|
||||
mmc_data->ocr_mask = of_data->tmio_ocr_mask;
|
||||
|
@ -621,18 +531,18 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
host->dma = dma_priv;
|
||||
host->write16_hook = sh_mobile_sdhi_write16_hook;
|
||||
host->clk_enable = sh_mobile_sdhi_clk_enable;
|
||||
host->clk_update = sh_mobile_sdhi_clk_update;
|
||||
host->clk_disable = sh_mobile_sdhi_clk_disable;
|
||||
host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
|
||||
host->write16_hook = renesas_sdhi_write16_hook;
|
||||
host->clk_enable = renesas_sdhi_clk_enable;
|
||||
host->clk_update = renesas_sdhi_clk_update;
|
||||
host->clk_disable = renesas_sdhi_clk_disable;
|
||||
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
|
||||
|
||||
/* SDR speeds are only available on Gen2+ */
|
||||
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
|
||||
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
|
||||
host->card_busy = sh_mobile_sdhi_card_busy;
|
||||
host->card_busy = renesas_sdhi_card_busy;
|
||||
host->start_signal_voltage_switch =
|
||||
sh_mobile_sdhi_start_signal_voltage_switch;
|
||||
renesas_sdhi_start_signal_voltage_switch;
|
||||
}
|
||||
|
||||
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
|
||||
|
@ -643,7 +553,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
*mmc_data = *mmd;
|
||||
|
||||
dma_priv->filter = shdma_chan_filter;
|
||||
dma_priv->enable = sh_mobile_sdhi_enable_dma;
|
||||
dma_priv->enable = renesas_sdhi_enable_dma;
|
||||
|
||||
mmc_data->alignment_shift = 1; /* 2-byte alignment */
|
||||
mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
|
||||
|
@ -659,15 +569,13 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
*/
|
||||
mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
|
||||
|
||||
/*
|
||||
* All SDHI have CMD12 controll bit
|
||||
*/
|
||||
/* All SDHI have CMD12 control bit */
|
||||
mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
|
||||
|
||||
/* All SDHI have SDIO status bits which must be 1 */
|
||||
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
|
||||
|
||||
ret = tmio_mmc_host_probe(host, mmc_data);
|
||||
ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
|
||||
if (ret < 0)
|
||||
goto efree;
|
||||
|
||||
|
@ -675,7 +583,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
if (of_data && of_data->scc_offset &&
|
||||
(host->mmc->caps & MMC_CAP_UHS_SDR104 ||
|
||||
host->mmc->caps2 & MMC_CAP2_HS200_1_8V_SDR)) {
|
||||
const struct sh_mobile_sdhi_scc *taps = of_data->taps;
|
||||
const struct renesas_sdhi_scc *taps = of_data->taps;
|
||||
bool hit = false;
|
||||
|
||||
host->mmc->caps |= MMC_CAP_HW_RESET;
|
||||
|
@ -693,11 +601,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
|
||||
|
||||
priv->scc_ctl = host->ctl + of_data->scc_offset;
|
||||
host->init_tuning = sh_mobile_sdhi_init_tuning;
|
||||
host->prepare_tuning = sh_mobile_sdhi_prepare_tuning;
|
||||
host->select_tuning = sh_mobile_sdhi_select_tuning;
|
||||
host->check_scc_error = sh_mobile_sdhi_check_scc_error;
|
||||
host->hw_reset = sh_mobile_sdhi_hw_reset;
|
||||
host->init_tuning = renesas_sdhi_init_tuning;
|
||||
host->prepare_tuning = renesas_sdhi_prepare_tuning;
|
||||
host->select_tuning = renesas_sdhi_select_tuning;
|
||||
host->check_scc_error = renesas_sdhi_check_scc_error;
|
||||
host->hw_reset = renesas_sdhi_hw_reset;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
|
@ -707,7 +615,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
break;
|
||||
i++;
|
||||
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
|
||||
dev_name(&pdev->dev), host);
|
||||
dev_name(&pdev->dev), host);
|
||||
if (ret)
|
||||
goto eirq;
|
||||
}
|
||||
|
@ -732,8 +640,9 @@ efree:
|
|||
eprobe:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
|
||||
|
||||
static int sh_mobile_sdhi_remove(struct platform_device *pdev)
|
||||
int renesas_sdhi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mmc_host *mmc = platform_get_drvdata(pdev);
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
@ -742,28 +651,4 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||
pm_runtime_force_resume)
|
||||
SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
|
||||
tmio_mmc_host_runtime_resume,
|
||||
NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver sh_mobile_sdhi_driver = {
|
||||
.driver = {
|
||||
.name = "sh_mobile_sdhi",
|
||||
.pm = &tmio_mmc_dev_pm_ops,
|
||||
.of_match_table = sh_mobile_sdhi_of_match,
|
||||
},
|
||||
.probe = sh_mobile_sdhi_probe,
|
||||
.remove = sh_mobile_sdhi_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(sh_mobile_sdhi_driver);
|
||||
|
||||
MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
|
||||
MODULE_AUTHOR("Magnus Damm");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:sh_mobile_sdhi");
|
||||
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
|
|
@ -1,13 +1,14 @@
|
|||
/*
|
||||
* linux/drivers/mmc/tmio_mmc_dma.c
|
||||
* DMA function for TMIO MMC implementations
|
||||
*
|
||||
* Copyright (C) 2016-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2017 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2010-2011 Guennadi Liakhovetski
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* DMA function for TMIO MMC implementations
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
@ -15,14 +16,96 @@
|
|||
#include <linux/dmaengine.h>
|
||||
#include <linux/mfd/tmio.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "renesas_sdhi.h"
|
||||
#include "tmio_mmc.h"
|
||||
|
||||
#define TMIO_MMC_MIN_DMA_LEN 8
|
||||
|
||||
void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
|
||||
static const struct renesas_sdhi_of_data of_default_cfg = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
|
||||
};
|
||||
|
||||
static const struct renesas_sdhi_of_data of_rz_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
|
||||
.tmio_ocr_mask = MMC_VDD_32_33,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||
};
|
||||
|
||||
static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
||||
TMIO_MMC_CLK_ACTUAL,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||
};
|
||||
|
||||
/* Definitions for sampling clocks */
|
||||
static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
|
||||
{
|
||||
.clk_rate = 156000000,
|
||||
.tap = 0x00000703,
|
||||
},
|
||||
{
|
||||
.clk_rate = 0,
|
||||
.tap = 0x00000300,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
||||
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
|
||||
MMC_CAP_CMD23,
|
||||
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
|
||||
.dma_rx_offset = 0x2000,
|
||||
.scc_offset = 0x0300,
|
||||
.taps = rcar_gen2_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
|
||||
};
|
||||
|
||||
/* Definitions for sampling clocks */
|
||||
static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
|
||||
{
|
||||
.clk_rate = 0,
|
||||
.tap = 0x00000300,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
|
||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
|
||||
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
|
||||
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
|
||||
MMC_CAP_CMD23,
|
||||
.bus_shift = 2,
|
||||
.scc_offset = 0x1000,
|
||||
.taps = rcar_gen3_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
|
||||
};
|
||||
|
||||
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
|
||||
{ .compatible = "renesas,sdhi-shmobile" },
|
||||
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
|
||||
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
|
||||
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
|
||||
{ .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
|
||||
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
|
||||
|
||||
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
|
||||
bool enable)
|
||||
{
|
||||
if (!host->chan_tx || !host->chan_rx)
|
||||
return;
|
||||
|
@ -31,19 +114,19 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
|
|||
host->dma->enable(host, enable);
|
||||
}
|
||||
|
||||
void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
|
||||
{
|
||||
tmio_mmc_enable_dma(host, false);
|
||||
renesas_sdhi_sys_dmac_enable_dma(host, false);
|
||||
|
||||
if (host->chan_rx)
|
||||
dmaengine_terminate_all(host->chan_rx);
|
||||
if (host->chan_tx)
|
||||
dmaengine_terminate_all(host->chan_tx);
|
||||
|
||||
tmio_mmc_enable_dma(host, true);
|
||||
renesas_sdhi_sys_dmac_enable_dma(host, true);
|
||||
}
|
||||
|
||||
static void tmio_mmc_dma_callback(void *arg)
|
||||
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
|
||||
{
|
||||
struct tmio_mmc_host *host = arg;
|
||||
|
||||
|
@ -71,7 +154,7 @@ out:
|
|||
spin_unlock_irq(&host->lock);
|
||||
}
|
||||
|
||||
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
|
||||
struct dma_async_tx_descriptor *desc = NULL;
|
||||
|
@ -112,12 +195,12 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
|
|||
|
||||
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
|
||||
if (ret > 0)
|
||||
desc = dmaengine_prep_slave_sg(chan, sg, ret,
|
||||
DMA_DEV_TO_MEM, DMA_CTRL_ACK);
|
||||
desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
|
||||
DMA_CTRL_ACK);
|
||||
|
||||
if (desc) {
|
||||
reinit_completion(&host->dma_dataend);
|
||||
desc->callback = tmio_mmc_dma_callback;
|
||||
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
|
||||
desc->callback_param = host;
|
||||
|
||||
cookie = dmaengine_submit(desc);
|
||||
|
@ -129,7 +212,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
|
|||
pio:
|
||||
if (!desc) {
|
||||
/* DMA failed, fall back to PIO */
|
||||
tmio_mmc_enable_dma(host, false);
|
||||
renesas_sdhi_sys_dmac_enable_dma(host, false);
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
host->chan_rx = NULL;
|
||||
|
@ -145,7 +228,7 @@ pio:
|
|||
}
|
||||
}
|
||||
|
||||
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
|
||||
struct dma_async_tx_descriptor *desc = NULL;
|
||||
|
@ -181,6 +264,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
|
|||
if (!aligned) {
|
||||
unsigned long flags;
|
||||
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
|
||||
|
||||
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
|
||||
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
|
||||
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
|
||||
|
@ -190,12 +274,12 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
|
|||
|
||||
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
|
||||
if (ret > 0)
|
||||
desc = dmaengine_prep_slave_sg(chan, sg, ret,
|
||||
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
|
||||
desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
|
||||
DMA_CTRL_ACK);
|
||||
|
||||
if (desc) {
|
||||
reinit_completion(&host->dma_dataend);
|
||||
desc->callback = tmio_mmc_dma_callback;
|
||||
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
|
||||
desc->callback_param = host;
|
||||
|
||||
cookie = dmaengine_submit(desc);
|
||||
|
@ -207,7 +291,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
|
|||
pio:
|
||||
if (!desc) {
|
||||
/* DMA failed, fall back to PIO */
|
||||
tmio_mmc_enable_dma(host, false);
|
||||
renesas_sdhi_sys_dmac_enable_dma(host, false);
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
host->chan_tx = NULL;
|
||||
|
@ -223,19 +307,19 @@ pio:
|
|||
}
|
||||
}
|
||||
|
||||
void tmio_mmc_start_dma(struct tmio_mmc_host *host,
|
||||
struct mmc_data *data)
|
||||
static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
if (host->chan_rx)
|
||||
tmio_mmc_start_dma_rx(host);
|
||||
renesas_sdhi_sys_dmac_start_dma_rx(host);
|
||||
} else {
|
||||
if (host->chan_tx)
|
||||
tmio_mmc_start_dma_tx(host);
|
||||
renesas_sdhi_sys_dmac_start_dma_tx(host);
|
||||
}
|
||||
}
|
||||
|
||||
static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
|
||||
static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
|
||||
{
|
||||
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
|
||||
struct dma_chan *chan = NULL;
|
||||
|
@ -257,11 +341,12 @@ static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
|
|||
dma_async_issue_pending(chan);
|
||||
}
|
||||
|
||||
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
|
||||
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
|
||||
struct tmio_mmc_data *pdata)
|
||||
{
|
||||
/* We can only either use DMA for both Tx and Rx or not use it at all */
|
||||
if (!host->dma || (!host->pdev->dev.of_node &&
|
||||
(!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
|
||||
(!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
|
||||
return;
|
||||
|
||||
if (!host->chan_tx && !host->chan_rx) {
|
||||
|
@ -287,7 +372,8 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
|
|||
return;
|
||||
|
||||
cfg.direction = DMA_MEM_TO_DEV;
|
||||
cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
|
||||
cfg.dst_addr = res->start +
|
||||
(CTL_SD_DATA_PORT << host->bus_shift);
|
||||
cfg.dst_addr_width = host->dma->dma_buswidth;
|
||||
if (!cfg.dst_addr_width)
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
|
@ -320,10 +406,12 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
|
|||
goto ebouncebuf;
|
||||
|
||||
init_completion(&host->dma_dataend);
|
||||
tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
|
||||
tasklet_init(&host->dma_issue,
|
||||
renesas_sdhi_sys_dmac_issue_tasklet_fn,
|
||||
(unsigned long)host);
|
||||
}
|
||||
|
||||
tmio_mmc_enable_dma(host, true);
|
||||
renesas_sdhi_sys_dmac_enable_dma(host, true);
|
||||
|
||||
return;
|
||||
|
||||
|
@ -337,15 +425,17 @@ ecfgtx:
|
|||
host->chan_tx = NULL;
|
||||
}
|
||||
|
||||
void tmio_mmc_release_dma(struct tmio_mmc_host *host)
|
||||
static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
|
||||
{
|
||||
if (host->chan_tx) {
|
||||
struct dma_chan *chan = host->chan_tx;
|
||||
|
||||
host->chan_tx = NULL;
|
||||
dma_release_channel(chan);
|
||||
}
|
||||
if (host->chan_rx) {
|
||||
struct dma_chan *chan = host->chan_rx;
|
||||
|
||||
host->chan_rx = NULL;
|
||||
dma_release_channel(chan);
|
||||
}
|
||||
|
@ -354,3 +444,41 @@ void tmio_mmc_release_dma(struct tmio_mmc_host *host)
|
|||
host->bounce_buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
|
||||
.start = renesas_sdhi_sys_dmac_start_dma,
|
||||
.enable = renesas_sdhi_sys_dmac_enable_dma,
|
||||
.request = renesas_sdhi_sys_dmac_request_dma,
|
||||
.release = renesas_sdhi_sys_dmac_release_dma,
|
||||
.abort = renesas_sdhi_sys_dmac_abort_dma,
|
||||
};
|
||||
|
||||
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
|
||||
pm_runtime_force_resume)
|
||||
SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
|
||||
tmio_mmc_host_runtime_resume,
|
||||
NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver renesas_sys_dmac_sdhi_driver = {
|
||||
.driver = {
|
||||
.name = "sh_mobile_sdhi",
|
||||
.pm = &renesas_sdhi_sys_dmac_dev_pm_ops,
|
||||
.of_match_table = renesas_sdhi_sys_dmac_of_match,
|
||||
},
|
||||
.probe = renesas_sdhi_sys_dmac_probe,
|
||||
.remove = renesas_sdhi_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(renesas_sys_dmac_sdhi_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Renesas SDHI driver");
|
||||
MODULE_AUTHOR("Magnus Damm");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:sh_mobile_sdhi");
|
|
@ -274,7 +274,6 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
|
|||
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
|
||||
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
|
||||
MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY,
|
||||
.caps2 = MMC_CAP2_HC_ERASE_SZ,
|
||||
.flags = SDHCI_ACPI_RUNTIME_PM,
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
|
||||
|
@ -396,9 +395,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
|
|||
if (child->status.present && child->status.enabled)
|
||||
acpi_device_fix_up_power(child);
|
||||
|
||||
if (acpi_bus_get_status(device) || !device->status.present)
|
||||
return -ENODEV;
|
||||
|
||||
if (sdhci_acpi_byt_defer(dev))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
|
|
|
@ -89,9 +89,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
|
|||
goto err_clk;
|
||||
}
|
||||
|
||||
/* Enable MMC_CAP2_HC_ERASE_SZ for better max discard calculations */
|
||||
host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
|
||||
|
||||
sdhci_get_of_property(pdev);
|
||||
mmc_of_parse(host->mmc);
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@
|
|||
#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
|
||||
|
||||
/*
|
||||
* There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC:
|
||||
* There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC:
|
||||
* Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design,
|
||||
* but bit28 is used as the INT DMA ERR in fsl eSDHC design.
|
||||
* Define this macro DMA error INT for fsl eSDHC
|
||||
|
@ -110,15 +110,10 @@
|
|||
* In exact block transfer, the controller doesn't complete the
|
||||
* operations automatically as required at the end of the
|
||||
* transfer and remains on hold if the abort command is not sent.
|
||||
* As a result, the TC flag is not asserted and SW received timeout
|
||||
* exeception. Bit1 of Vendor Spec registor is used to fix it.
|
||||
* As a result, the TC flag is not asserted and SW received timeout
|
||||
* exception. Bit1 of Vendor Spec register is used to fix it.
|
||||
*/
|
||||
#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1)
|
||||
/*
|
||||
* The flag enables the workaround for ESDHC errata ENGcm07207 which
|
||||
* affects i.MX25 and i.MX35.
|
||||
*/
|
||||
#define ESDHC_FLAG_ENGCM07207 BIT(2)
|
||||
/*
|
||||
* The flag tells that the ESDHC controller is an USDHC block that is
|
||||
* integrated on the i.MX6 series.
|
||||
|
@ -131,9 +126,11 @@
|
|||
/* The IP has SDHCI_CAPABILITIES_1 register */
|
||||
#define ESDHC_FLAG_HAVE_CAP1 BIT(6)
|
||||
/*
|
||||
* The IP has errata ERR004536
|
||||
* The IP has erratum ERR004536
|
||||
* uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow,
|
||||
* when reading data from the card
|
||||
* This flag is also set for i.MX25 and i.MX35 in order to get
|
||||
* SDHCI_QUIRK_BROKEN_ADMA, but for different reasons (ADMA capability bits).
|
||||
*/
|
||||
#define ESDHC_FLAG_ERR004536 BIT(7)
|
||||
/* The IP supports HS200 mode */
|
||||
|
@ -141,7 +138,7 @@
|
|||
/* The IP supports HS400 mode */
|
||||
#define ESDHC_FLAG_HS400 BIT(9)
|
||||
|
||||
/* A higher clock ferquency than this rate requires strobell dll control */
|
||||
/* A clock frequency higher than this rate requires strobe dll control */
|
||||
#define ESDHC_STROBE_DLL_CLK_FREQ 100000000
|
||||
|
||||
struct esdhc_soc_data {
|
||||
|
@ -149,11 +146,11 @@ struct esdhc_soc_data {
|
|||
};
|
||||
|
||||
static struct esdhc_soc_data esdhc_imx25_data = {
|
||||
.flags = ESDHC_FLAG_ENGCM07207,
|
||||
.flags = ESDHC_FLAG_ERR004536,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data esdhc_imx35_data = {
|
||||
.flags = ESDHC_FLAG_ENGCM07207,
|
||||
.flags = ESDHC_FLAG_ERR004536,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data esdhc_imx51_data = {
|
||||
|
@ -197,7 +194,7 @@ struct pltfm_imx_data {
|
|||
struct clk *clk_ahb;
|
||||
struct clk *clk_per;
|
||||
enum {
|
||||
NO_CMD_PENDING, /* no multiblock command pending*/
|
||||
NO_CMD_PENDING, /* no multiblock command pending */
|
||||
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
|
||||
WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
|
||||
} multiblock_status;
|
||||
|
@ -286,7 +283,7 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
|
|||
* ADMA2 capability of esdhc, but this bit is messed up on
|
||||
* some SOCs (e.g. on MX25, MX35 this bit is set, but they
|
||||
* don't actually support ADMA2). So set the BROKEN_ADMA
|
||||
* uirk on MX25/35 platforms.
|
||||
* quirk on MX25/35 platforms.
|
||||
*/
|
||||
|
||||
if (val & SDHCI_CAN_DO_ADMA1) {
|
||||
|
@ -351,7 +348,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
|
|||
if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
|
||||
/*
|
||||
* Clear and then set D3CD bit to avoid missing the
|
||||
* card interrupt. This is a eSDHC controller problem
|
||||
* card interrupt. This is an eSDHC controller problem
|
||||
* so we need to apply the following workaround: clear
|
||||
* and set D3CD bit will make eSDHC re-sample the card
|
||||
* interrupt. In case a card interrupt was lost,
|
||||
|
@ -579,7 +576,7 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
|
|||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 new_val;
|
||||
u32 new_val = 0;
|
||||
u32 mask;
|
||||
|
||||
switch (reg) {
|
||||
|
@ -604,35 +601,52 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
|
|||
* Do not touch buswidth bits here. This is done in
|
||||
* esdhc_pltfm_bus_width.
|
||||
* Do not touch the D3CD bit either which is used for the
|
||||
* SDIO interrupt errata workaround.
|
||||
* SDIO interrupt erratum workaround.
|
||||
*/
|
||||
mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
|
||||
|
||||
esdhc_clrset_le(host, mask, new_val, reg);
|
||||
return;
|
||||
case SDHCI_SOFTWARE_RESET:
|
||||
if (val & SDHCI_RESET_DATA)
|
||||
new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL);
|
||||
break;
|
||||
}
|
||||
esdhc_clrset_le(host, 0xff, val, reg);
|
||||
|
||||
/*
|
||||
* The esdhc has a design violation to SDHC spec which tells
|
||||
* that software reset should not affect card detection circuit.
|
||||
* But esdhc clears its SYSCTL register bits [0..2] during the
|
||||
* software reset. This will stop those clocks that card detection
|
||||
* circuit relies on. To work around it, we turn the clocks on back
|
||||
* to keep card detection circuit functional.
|
||||
*/
|
||||
if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) {
|
||||
esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
|
||||
/*
|
||||
* The reset on usdhc fails to clear MIX_CTRL register.
|
||||
* Do it manually here.
|
||||
*/
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
/* the tuning bits should be kept during reset */
|
||||
new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
|
||||
writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
|
||||
host->ioaddr + ESDHC_MIX_CTRL);
|
||||
imx_data->is_ddr = 0;
|
||||
if (reg == SDHCI_SOFTWARE_RESET) {
|
||||
if (val & SDHCI_RESET_ALL) {
|
||||
/*
|
||||
* The esdhc has a design violation to SDHC spec which
|
||||
* tells that software reset should not affect card
|
||||
* detection circuit. But esdhc clears its SYSCTL
|
||||
* register bits [0..2] during the software reset. This
|
||||
* will stop those clocks that card detection circuit
|
||||
* relies on. To work around it, we turn the clocks on
|
||||
* back to keep card detection circuit functional.
|
||||
*/
|
||||
esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
|
||||
/*
|
||||
* The reset on usdhc fails to clear MIX_CTRL register.
|
||||
* Do it manually here.
|
||||
*/
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
/*
|
||||
* the tuning bits should be kept during reset
|
||||
*/
|
||||
new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
|
||||
writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
|
||||
host->ioaddr + ESDHC_MIX_CTRL);
|
||||
imx_data->is_ddr = 0;
|
||||
}
|
||||
} else if (val & SDHCI_RESET_DATA) {
|
||||
/*
|
||||
* The eSDHC DAT line software reset clears at least the
|
||||
* data transfer width on i.MX25, so make sure that the
|
||||
* Host Control register is unaffected.
|
||||
*/
|
||||
esdhc_clrset_le(host, 0xff, new_val,
|
||||
SDHCI_HOST_CONTROL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -657,7 +671,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
|
|||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
unsigned int host_clock = pltfm_host->clock;
|
||||
int pre_div = 2;
|
||||
int ddr_pre_div = imx_data->is_ddr ? 2 : 1;
|
||||
int pre_div = 1;
|
||||
int div = 1;
|
||||
u32 temp, val;
|
||||
|
||||
|
@ -672,28 +687,23 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
|
|||
return;
|
||||
}
|
||||
|
||||
if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
|
||||
pre_div = 1;
|
||||
|
||||
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
|
||||
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
|
||||
| ESDHC_CLOCK_MASK);
|
||||
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
|
||||
|
||||
while (host_clock / pre_div / 16 > clock && pre_div < 256)
|
||||
while (host_clock / (16 * pre_div * ddr_pre_div) > clock &&
|
||||
pre_div < 256)
|
||||
pre_div *= 2;
|
||||
|
||||
while (host_clock / pre_div / div > clock && div < 16)
|
||||
while (host_clock / (div * pre_div * ddr_pre_div) > clock && div < 16)
|
||||
div++;
|
||||
|
||||
host->mmc->actual_clock = host_clock / pre_div / div;
|
||||
host->mmc->actual_clock = host_clock / (div * pre_div * ddr_pre_div);
|
||||
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
|
||||
clock, host->mmc->actual_clock);
|
||||
|
||||
if (imx_data->is_ddr)
|
||||
pre_div >>= 2;
|
||||
else
|
||||
pre_div >>= 1;
|
||||
pre_div >>= 1;
|
||||
div--;
|
||||
|
||||
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
|
||||
|
@ -763,7 +773,7 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
|
|||
writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
|
||||
writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
|
||||
dev_dbg(mmc_dev(host->mmc),
|
||||
"tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
|
||||
"tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
|
||||
val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
|
||||
}
|
||||
|
||||
|
@ -807,7 +817,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
|
|||
ret = mmc_send_tuning(host->mmc, opcode, NULL);
|
||||
esdhc_post_tuning(host);
|
||||
|
||||
dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
|
||||
dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
|
||||
ret ? "failed" : "passed", avg, ret);
|
||||
|
||||
return ret;
|
||||
|
@ -847,15 +857,15 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
|
|||
}
|
||||
|
||||
/*
|
||||
* For HS400 eMMC, there is a data_strobe line, this signal is generated
|
||||
* For HS400 eMMC, there is a data_strobe line. This signal is generated
|
||||
* by the device and used for data output and CRC status response output
|
||||
* in HS400 mode. The frequency of this signal follows the frequency of
|
||||
* CLK generated by host. Host receive the data which is aligned to the
|
||||
* CLK generated by host. The host receives the data which is aligned to the
|
||||
* edge of data_strobe line. Due to the time delay between CLK line and
|
||||
* data_strobe line, if the delay time is larger than one clock cycle,
|
||||
* then CLK and data_strobe line will misaligned, read error shows up.
|
||||
* then CLK and data_strobe line will be misaligned, read error shows up.
|
||||
* So when the CLK is higher than 100MHz, each clock cycle is short enough,
|
||||
* host should config the delay target.
|
||||
* host should configure the delay target.
|
||||
*/
|
||||
static void esdhc_set_strobe_dll(struct sdhci_host *host)
|
||||
{
|
||||
|
@ -895,7 +905,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
|
|||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 ctrl;
|
||||
|
||||
/* Rest the tuning circurt */
|
||||
/* Reset the tuning circuit */
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
|
||||
ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL);
|
||||
|
@ -976,7 +986,7 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
|
|||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
|
||||
/* Doc Errata: the uSDHC actual maximum timeout count is 1 << 29 */
|
||||
/* Doc Erratum: the uSDHC actual maximum timeout count is 1 << 29 */
|
||||
return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
|
||||
}
|
||||
|
||||
|
@ -1032,10 +1042,10 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
|
|||
|
||||
/*
|
||||
* ROM code will change the bit burst_length_enable setting
|
||||
* to zero if this usdhc is choosed to boot system. Change
|
||||
* to zero if this usdhc is chosen to boot system. Change
|
||||
* it back here, otherwise it will impact the performance a
|
||||
* lot. This bit is used to enable/disable the burst length
|
||||
* for the external AHB2AXI bridge, it's usefully especially
|
||||
* for the external AHB2AXI bridge. It's useful especially
|
||||
* for INCR transfer because without burst length indicator,
|
||||
* the AHB2AXI bridge does not know the burst length in
|
||||
* advance. And without burst length indicator, AHB INCR
|
||||
|
@ -1045,7 +1055,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
|
|||
| ESDHC_BURST_LEN_EN_INCR,
|
||||
host->ioaddr + SDHCI_HOST_CONTROL);
|
||||
/*
|
||||
* errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
|
||||
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
|
||||
* TO1.1, it's harmless for MX6SL
|
||||
*/
|
||||
writel(readl(host->ioaddr + 0x6c) | BIT(7),
|
||||
|
@ -1104,7 +1114,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
|
|||
|
||||
mmc_of_parse_voltage(np, &host->ocr_mask);
|
||||
|
||||
/* sdr50 and sdr104 needs work on 1.8v signal voltage */
|
||||
/* sdr50 and sdr104 need work on 1.8v signal voltage */
|
||||
if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
|
||||
!IS_ERR(imx_data->pins_default)) {
|
||||
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
|
||||
|
@ -1116,7 +1126,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
|
|||
dev_warn(mmc_dev(host->mmc),
|
||||
"could not get ultra high speed state, work on normal mode\n");
|
||||
/*
|
||||
* fall back to not support uhs by specify no 1.8v quirk
|
||||
* fall back to not supporting uhs by specifying no
|
||||
* 1.8v quirk
|
||||
*/
|
||||
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
|
||||
}
|
||||
|
@ -1250,14 +1261,20 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
|
||||
pltfm_host->clk = imx_data->clk_per;
|
||||
pltfm_host->clock = clk_get_rate(pltfm_host->clk);
|
||||
clk_prepare_enable(imx_data->clk_per);
|
||||
clk_prepare_enable(imx_data->clk_ipg);
|
||||
clk_prepare_enable(imx_data->clk_ahb);
|
||||
err = clk_prepare_enable(imx_data->clk_per);
|
||||
if (err)
|
||||
goto free_sdhci;
|
||||
err = clk_prepare_enable(imx_data->clk_ipg);
|
||||
if (err)
|
||||
goto disable_per_clk;
|
||||
err = clk_prepare_enable(imx_data->clk_ahb);
|
||||
if (err)
|
||||
goto disable_ipg_clk;
|
||||
|
||||
imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
|
||||
if (IS_ERR(imx_data->pinctrl)) {
|
||||
err = PTR_ERR(imx_data->pinctrl);
|
||||
goto disable_clk;
|
||||
goto disable_ahb_clk;
|
||||
}
|
||||
|
||||
imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
|
||||
|
@ -1265,11 +1282,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(imx_data->pins_default))
|
||||
dev_warn(mmc_dev(host->mmc), "could not get default state\n");
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
|
||||
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
|
||||
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
|
||||
| SDHCI_QUIRK_BROKEN_ADMA;
|
||||
|
||||
if (esdhc_is_usdhc(imx_data)) {
|
||||
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
|
||||
host->mmc->caps |= MMC_CAP_1_8V_DDR;
|
||||
|
@ -1297,13 +1309,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
else
|
||||
err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
|
||||
if (err)
|
||||
goto disable_clk;
|
||||
goto disable_ahb_clk;
|
||||
|
||||
sdhci_esdhc_imx_hwinit(host);
|
||||
|
||||
err = sdhci_add_host(host);
|
||||
if (err)
|
||||
goto disable_clk;
|
||||
goto disable_ahb_clk;
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
|
||||
|
@ -1313,10 +1325,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
|
||||
disable_clk:
|
||||
clk_disable_unprepare(imx_data->clk_per);
|
||||
clk_disable_unprepare(imx_data->clk_ipg);
|
||||
disable_ahb_clk:
|
||||
clk_disable_unprepare(imx_data->clk_ahb);
|
||||
disable_ipg_clk:
|
||||
clk_disable_unprepare(imx_data->clk_ipg);
|
||||
disable_per_clk:
|
||||
clk_disable_unprepare(imx_data->clk_per);
|
||||
free_sdhci:
|
||||
sdhci_pltfm_free(pdev);
|
||||
return err;
|
||||
|
@ -1393,14 +1407,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
|
|||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
int err;
|
||||
|
||||
if (!sdhci_sdio_irq_enabled(host)) {
|
||||
clk_prepare_enable(imx_data->clk_per);
|
||||
clk_prepare_enable(imx_data->clk_ipg);
|
||||
err = clk_prepare_enable(imx_data->clk_per);
|
||||
if (err)
|
||||
return err;
|
||||
err = clk_prepare_enable(imx_data->clk_ipg);
|
||||
if (err)
|
||||
goto disable_per_clk;
|
||||
}
|
||||
clk_prepare_enable(imx_data->clk_ahb);
|
||||
err = clk_prepare_enable(imx_data->clk_ahb);
|
||||
if (err)
|
||||
goto disable_ipg_clk;
|
||||
err = sdhci_runtime_resume_host(host);
|
||||
if (err)
|
||||
goto disable_ahb_clk;
|
||||
|
||||
return sdhci_runtime_resume_host(host);
|
||||
return 0;
|
||||
|
||||
disable_ahb_clk:
|
||||
clk_disable_unprepare(imx_data->clk_ahb);
|
||||
disable_ipg_clk:
|
||||
if (!sdhci_sdio_irq_enabled(host))
|
||||
clk_disable_unprepare(imx_data->clk_ipg);
|
||||
disable_per_clk:
|
||||
if (!sdhci_sdio_irq_enabled(host))
|
||||
clk_disable_unprepare(imx_data->clk_per);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
|
||||
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
|
||||
SDHCI_QUIRK_32BIT_DMA_ADDR | \
|
||||
SDHCI_QUIRK_NO_BUSY_IRQ | \
|
||||
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
|
||||
SDHCI_QUIRK_PIO_NEEDS_DELAY | \
|
||||
|
|
|
@ -638,7 +638,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
|
|||
|
||||
ret = mmc_of_parse(host->mmc);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "parsing dt failed (%u)\n", ret);
|
||||
dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
|
||||
goto unreg_clk;
|
||||
}
|
||||
|
||||
|
|
|
@ -347,8 +347,7 @@ static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
|
|||
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC |
|
||||
MMC_CAP2_HC_ERASE_SZ;
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -542,6 +541,23 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
|
|||
}
|
||||
}
|
||||
|
||||
#define INTEL_HS400_ES_REG 0x78
|
||||
#define INTEL_HS400_ES_BIT BIT(0)
|
||||
|
||||
static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
u32 val;
|
||||
|
||||
val = sdhci_readl(host, INTEL_HS400_ES_REG);
|
||||
if (ios->enhanced_strobe)
|
||||
val |= INTEL_HS400_ES_BIT;
|
||||
else
|
||||
val &= ~INTEL_HS400_ES_BIT;
|
||||
sdhci_writel(host, val, INTEL_HS400_ES_REG);
|
||||
}
|
||||
|
||||
static const struct sdhci_ops sdhci_intel_byt_ops = {
|
||||
.set_clock = sdhci_set_clock,
|
||||
.set_power = sdhci_intel_set_power,
|
||||
|
@ -569,7 +585,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
|||
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
|
||||
MMC_CAP_CMD_DURING_TFR |
|
||||
MMC_CAP_WAIT_WHILE_BUSY;
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
|
||||
slot->hw_reset = sdhci_pci_int_hw_reset;
|
||||
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
|
||||
slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
|
||||
|
@ -578,6 +593,19 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
int ret = byt_emmc_probe_slot(slot);
|
||||
|
||||
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
|
||||
slot->host->mmc_host_ops.hs400_enhanced_strobe =
|
||||
intel_hs400_enhanced_strobe;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int ni_set_max_freq(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
|
@ -630,7 +658,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
|||
{
|
||||
byt_read_dsm(slot);
|
||||
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
|
||||
MMC_CAP_AGGRESSIVE_PM;
|
||||
MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
|
||||
slot->cd_idx = 0;
|
||||
slot->cd_override_level = true;
|
||||
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
|
||||
|
@ -653,6 +681,17 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
|
|||
.priv_size = sizeof(struct intel_host),
|
||||
};
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
|
||||
.allow_runtime_pm = true,
|
||||
.probe_slot = glk_emmc_probe_slot,
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
|
||||
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
|
||||
SDHCI_QUIRK2_STOP_WITH_TC,
|
||||
.ops = &sdhci_intel_byt_ops,
|
||||
.priv_size = sizeof(struct intel_host),
|
||||
};
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
|
||||
|
@ -1170,554 +1209,79 @@ static const struct sdhci_pci_fixes sdhci_amd = {
|
|||
};
|
||||
|
||||
static const struct pci_device_id pci_ids[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_RICOH,
|
||||
.device = PCI_DEVICE_ID_RICOH_R5C822,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ricoh,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_RICOH,
|
||||
.device = 0x843,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_RICOH,
|
||||
.device = 0xe822,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_RICOH,
|
||||
.device = 0xe823,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ENE,
|
||||
.device = PCI_DEVICE_ID_ENE_CB712_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ene_712,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ENE,
|
||||
.device = PCI_DEVICE_ID_ENE_CB712_SD_2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ene_712,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ENE,
|
||||
.device = PCI_DEVICE_ID_ENE_CB714_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ene_714,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ENE,
|
||||
.device = PCI_DEVICE_ID_ENE_CB714_SD_2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ene_714,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_MARVELL,
|
||||
.device = PCI_DEVICE_ID_MARVELL_88ALP01_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_cafe,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_JMICRON,
|
||||
.device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_JMICRON,
|
||||
.device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_JMICRON,
|
||||
.device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_JMICRON,
|
||||
.device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_SYSKONNECT,
|
||||
.device = 0x8000,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_syskt,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_VIA,
|
||||
.device = 0x95d0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_via,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_REALTEK,
|
||||
.device = 0x5250,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_rtsx,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_QRK_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_qrk,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MRST_SD0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MRST_SD1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MRST_SD2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MFD_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_PCH_SDIO0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_PCH_SDIO1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BYT_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
|
||||
.subvendor = PCI_VENDOR_ID_NI,
|
||||
.subdevice = 0x7884,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_ni_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BYT_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BYT_EMMC2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BSW_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BSW_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BSW_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_CLV_SDIO0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_CLV_SDIO1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_CLV_SDIO2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_CLV_EMMC0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_CLV_EMMC1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_MRFLD_MMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_mrfld_mmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_SPT_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_DNV_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXT_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXT_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXT_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXTM_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_APL_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_APL_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_GLK_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_GLK_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_GLK_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_8120,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_8220,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_8221,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_8320,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_8321,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_FUJIN2,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_SDS0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_SDS1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_SEABIRD0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_SEABIRD1,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_o2,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_AMD,
|
||||
.device = PCI_ANY_ID,
|
||||
.class = PCI_CLASS_SYSTEM_SDHCI << 8,
|
||||
.class_mask = 0xFFFF00,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_amd,
|
||||
},
|
||||
{ /* Generic SD host controller */
|
||||
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
|
||||
},
|
||||
|
||||
SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
|
||||
SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
|
||||
SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
|
||||
SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
|
||||
SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
|
||||
SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
|
||||
SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
|
||||
SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
|
||||
SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
|
||||
SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
|
||||
SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
|
||||
SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
|
||||
SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
|
||||
SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
|
||||
SDHCI_PCI_DEVICE(VIA, 95D0, via),
|
||||
SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
|
||||
SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
|
||||
SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
|
||||
SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
|
||||
SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
|
||||
SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
|
||||
SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(O2, 8120, o2),
|
||||
SDHCI_PCI_DEVICE(O2, 8220, o2),
|
||||
SDHCI_PCI_DEVICE(O2, 8221, o2),
|
||||
SDHCI_PCI_DEVICE(O2, 8320, o2),
|
||||
SDHCI_PCI_DEVICE(O2, 8321, o2),
|
||||
SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
|
||||
SDHCI_PCI_DEVICE(O2, SDS0, o2),
|
||||
SDHCI_PCI_DEVICE(O2, SDS1, o2),
|
||||
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
|
||||
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
|
||||
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
|
||||
/* Generic SD host controller */
|
||||
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define __SDHCI_PCI_H
|
||||
|
||||
/*
|
||||
* PCI device IDs
|
||||
* PCI device IDs, sub IDs
|
||||
*/
|
||||
|
||||
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
|
||||
|
@ -37,6 +37,50 @@
|
|||
#define PCI_DEVICE_ID_INTEL_GLK_SD 0x31ca
|
||||
#define PCI_DEVICE_ID_INTEL_GLK_EMMC 0x31cc
|
||||
#define PCI_DEVICE_ID_INTEL_GLK_SDIO 0x31d0
|
||||
#define PCI_DEVICE_ID_INTEL_CNP_EMMC 0x9dc4
|
||||
#define PCI_DEVICE_ID_INTEL_CNP_SD 0x9df5
|
||||
#define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375
|
||||
|
||||
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
|
||||
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
|
||||
#define PCI_DEVICE_ID_REALTEK_5250 0x5250
|
||||
|
||||
#define PCI_SUBDEVICE_ID_NI_7884 0x7884
|
||||
|
||||
/*
|
||||
* PCI device class and mask
|
||||
*/
|
||||
|
||||
#define SYSTEM_SDHCI (PCI_CLASS_SYSTEM_SDHCI << 8)
|
||||
#define PCI_CLASS_MASK 0xFFFF00
|
||||
|
||||
/*
|
||||
* Macros for PCI device-description
|
||||
*/
|
||||
|
||||
#define _PCI_VEND(vend) PCI_VENDOR_ID_##vend
|
||||
#define _PCI_DEV(vend, dev) PCI_DEVICE_ID_##vend##_##dev
|
||||
#define _PCI_SUBDEV(subvend, subdev) PCI_SUBDEVICE_ID_##subvend##_##subdev
|
||||
|
||||
#define SDHCI_PCI_DEVICE(vend, dev, cfg) { \
|
||||
.vendor = _PCI_VEND(vend), .device = _PCI_DEV(vend, dev), \
|
||||
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
|
||||
.driver_data = (kernel_ulong_t)&(sdhci_##cfg) \
|
||||
}
|
||||
|
||||
#define SDHCI_PCI_SUBDEVICE(vend, dev, subvend, subdev, cfg) { \
|
||||
.vendor = _PCI_VEND(vend), .device = _PCI_DEV(vend, dev), \
|
||||
.subvendor = _PCI_VEND(subvend), \
|
||||
.subdevice = _PCI_SUBDEV(subvend, subdev), \
|
||||
.driver_data = (kernel_ulong_t)&(sdhci_##cfg) \
|
||||
}
|
||||
|
||||
#define SDHCI_PCI_DEVICE_CLASS(vend, cl, cl_msk, cfg) { \
|
||||
.vendor = _PCI_VEND(vend), .device = PCI_ANY_ID, \
|
||||
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
|
||||
.class = (cl), .class_mask = (cl_msk), \
|
||||
.driver_data = (kernel_ulong_t)&(sdhci_##cfg) \
|
||||
}
|
||||
|
||||
/*
|
||||
* PCI registers
|
||||
|
|
|
@ -256,9 +256,6 @@ static int sdricoh_blockio(struct sdricoh_host *host, int read,
|
|||
}
|
||||
}
|
||||
|
||||
if (len)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
/*
|
||||
* linux/drivers/mmc/host/tmio_mmc.c
|
||||
* Driver for the MMC / SD / SDIO cell found in:
|
||||
*
|
||||
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
|
||||
*
|
||||
* Copyright (C) 2017 Renesas Electronics Corporation
|
||||
* Copyright (C) 2017 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2007 Ian Molton
|
||||
* Copyright (C) 2004 Ian Molton
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Driver for the MMC / SD / SDIO cell found in:
|
||||
*
|
||||
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
@ -99,13 +99,13 @@ static int tmio_mmc_probe(struct platform_device *pdev)
|
|||
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
|
||||
host->bus_shift = resource_size(res) >> 10;
|
||||
|
||||
ret = tmio_mmc_host_probe(host, pdata);
|
||||
ret = tmio_mmc_host_probe(host, pdata, NULL);
|
||||
if (ret)
|
||||
goto host_free;
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq,
|
||||
IRQF_TRIGGER_FALLING,
|
||||
dev_name(&pdev->dev), host);
|
||||
IRQF_TRIGGER_FALLING,
|
||||
dev_name(&pdev->dev), host);
|
||||
if (ret)
|
||||
goto host_remove;
|
||||
|
||||
|
@ -132,6 +132,7 @@ static int tmio_mmc_remove(struct platform_device *pdev)
|
|||
|
||||
if (mmc) {
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
tmio_mmc_host_remove(host);
|
||||
if (cell->disable)
|
||||
cell->disable(pdev);
|
||||
|
@ -145,8 +146,7 @@ static int tmio_mmc_remove(struct platform_device *pdev)
|
|||
static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
|
||||
SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
|
||||
tmio_mmc_host_runtime_resume,
|
||||
NULL)
|
||||
tmio_mmc_host_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver tmio_mmc_driver = {
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
/*
|
||||
* linux/drivers/mmc/host/tmio_mmc.h
|
||||
* Driver for the MMC / SD / SDIO cell found in:
|
||||
*
|
||||
* Copyright (C) 2016 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2015-16 Renesas Electronics Corporation
|
||||
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
|
||||
*
|
||||
* Copyright (C) 2015-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2007 Ian Molton
|
||||
* Copyright (C) 2004 Ian Molton
|
||||
*
|
||||
|
@ -10,9 +13,6 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Driver for the MMC / SD / SDIO cell found in:
|
||||
*
|
||||
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
|
||||
*/
|
||||
|
||||
#ifndef TMIO_MMC_H
|
||||
|
@ -115,6 +115,15 @@ struct tmio_mmc_dma {
|
|||
void (*enable)(struct tmio_mmc_host *host, bool enable);
|
||||
};
|
||||
|
||||
struct tmio_mmc_dma_ops {
|
||||
void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
|
||||
void (*enable)(struct tmio_mmc_host *host, bool enable);
|
||||
void (*request)(struct tmio_mmc_host *host,
|
||||
struct tmio_mmc_data *pdata);
|
||||
void (*release)(struct tmio_mmc_host *host);
|
||||
void (*abort)(struct tmio_mmc_host *host);
|
||||
};
|
||||
|
||||
struct tmio_mmc_host {
|
||||
void __iomem *ctl;
|
||||
struct mmc_command *cmd;
|
||||
|
@ -189,12 +198,15 @@ struct tmio_mmc_host {
|
|||
/* Tuning values: 1 for success, 0 for failure */
|
||||
DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
|
||||
unsigned int tap_num;
|
||||
|
||||
const struct tmio_mmc_dma_ops *dma_ops;
|
||||
};
|
||||
|
||||
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
|
||||
void tmio_mmc_host_free(struct tmio_mmc_host *host);
|
||||
int tmio_mmc_host_probe(struct tmio_mmc_host *host,
|
||||
struct tmio_mmc_data *pdata);
|
||||
struct tmio_mmc_data *pdata,
|
||||
const struct tmio_mmc_dma_ops *dma_ops);
|
||||
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
|
||||
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
|
||||
|
||||
|
@ -216,38 +228,6 @@ static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
|
|||
local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
|
||||
void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
|
||||
void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
|
||||
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
|
||||
void tmio_mmc_release_dma(struct tmio_mmc_host *host);
|
||||
void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
|
||||
#else
|
||||
static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
|
||||
struct tmio_mmc_data *pdata)
|
||||
{
|
||||
host->chan_tx = NULL;
|
||||
host->chan_rx = NULL;
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int tmio_mmc_host_runtime_suspend(struct device *dev);
|
||||
int tmio_mmc_host_runtime_resume(struct device *dev);
|
||||
|
@ -259,24 +239,26 @@ static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
|
|||
}
|
||||
|
||||
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
|
||||
u16 *buf, int count)
|
||||
u16 *buf, int count)
|
||||
{
|
||||
readsw(host->ctl + (addr << host->bus_shift), buf, count);
|
||||
}
|
||||
|
||||
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int addr)
|
||||
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
|
||||
int addr)
|
||||
{
|
||||
return readw(host->ctl + (addr << host->bus_shift)) |
|
||||
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
|
||||
u32 *buf, int count)
|
||||
u32 *buf, int count)
|
||||
{
|
||||
readsl(host->ctl + (addr << host->bus_shift), buf, count);
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
|
||||
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
|
||||
u16 val)
|
||||
{
|
||||
/* If there is a hook and it returns non-zero then there
|
||||
* is an error and the write should be skipped
|
||||
|
@ -287,19 +269,20 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val
|
|||
}
|
||||
|
||||
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
|
||||
u16 *buf, int count)
|
||||
u16 *buf, int count)
|
||||
{
|
||||
writesw(host->ctl + (addr << host->bus_shift), buf, count);
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
|
||||
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
|
||||
int addr, u32 val)
|
||||
{
|
||||
writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
|
||||
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
|
||||
}
|
||||
|
||||
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
|
||||
const u32 *buf, int count)
|
||||
const u32 *buf, int count)
|
||||
{
|
||||
writesl(host->ctl + (addr << host->bus_shift), buf, count);
|
||||
}
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
/*
|
||||
* linux/drivers/mmc/host/tmio_mmc_pio.c
|
||||
* Driver for the MMC / SD / SDIO IP found in:
|
||||
*
|
||||
* Copyright (C) 2016 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2015-16 Renesas Electronics Corporation
|
||||
* TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
|
||||
*
|
||||
* Copyright (C) 2015-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2017 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2011 Guennadi Liakhovetski
|
||||
* Copyright (C) 2007 Ian Molton
|
||||
* Copyright (C) 2004 Ian Molton
|
||||
|
@ -11,10 +14,6 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Driver for the MMC / SD / SDIO IP found in:
|
||||
*
|
||||
* TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
|
||||
*
|
||||
* This driver draws mainly on scattered spec sheets, Reverse engineering
|
||||
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
|
||||
* support). (Further 4 bit support from a later datasheet).
|
||||
|
@ -52,17 +51,55 @@
|
|||
|
||||
#include "tmio_mmc.h"
|
||||
|
||||
static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
if (host->dma_ops)
|
||||
host->dma_ops->start(host, data);
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
|
||||
{
|
||||
if (host->dma_ops)
|
||||
host->dma_ops->enable(host, enable);
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
|
||||
struct tmio_mmc_data *pdata)
|
||||
{
|
||||
if (host->dma_ops) {
|
||||
host->dma_ops->request(host, pdata);
|
||||
} else {
|
||||
host->chan_tx = NULL;
|
||||
host->chan_rx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
|
||||
{
|
||||
if (host->dma_ops)
|
||||
host->dma_ops->release(host);
|
||||
}
|
||||
|
||||
static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
|
||||
{
|
||||
if (host->dma_ops)
|
||||
host->dma_ops->abort(host);
|
||||
}
|
||||
|
||||
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
|
||||
{
|
||||
host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
|
||||
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
|
||||
|
||||
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
|
||||
{
|
||||
host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
|
||||
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
|
||||
|
||||
static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
|
||||
{
|
||||
|
@ -90,16 +127,17 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
|
|||
|
||||
#define STATUS_TO_TEXT(a, status, i) \
|
||||
do { \
|
||||
if (status & TMIO_STAT_##a) { \
|
||||
if (i++) \
|
||||
printk(" | "); \
|
||||
printk(#a); \
|
||||
if ((status) & TMIO_STAT_##a) { \
|
||||
if ((i)++) \
|
||||
printk(KERN_DEBUG " | "); \
|
||||
printk(KERN_DEBUG #a); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static void pr_debug_status(u32 status)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
pr_debug("status: %08x = ", status);
|
||||
STATUS_TO_TEXT(CARD_REMOVE, status, i);
|
||||
STATUS_TO_TEXT(CARD_INSERT, status, i);
|
||||
|
@ -140,8 +178,7 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
|
|||
pm_runtime_get_sync(mmc_dev(mmc));
|
||||
|
||||
host->sdio_irq_enabled = true;
|
||||
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
|
||||
~TMIO_SDIO_STAT_IOIRQ;
|
||||
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
|
||||
|
||||
/* Clear obsolete interrupts before enabling */
|
||||
sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
|
||||
|
@ -185,7 +222,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
|
|||
}
|
||||
|
||||
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
|
||||
unsigned int new_clock)
|
||||
unsigned int new_clock)
|
||||
{
|
||||
u32 clk = 0, clock;
|
||||
|
||||
|
@ -229,6 +266,12 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
|
|||
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
|
||||
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
|
||||
msleep(10);
|
||||
|
||||
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
|
||||
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
|
||||
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void tmio_mmc_reset_work(struct work_struct *work)
|
||||
|
@ -246,16 +289,16 @@ static void tmio_mmc_reset_work(struct work_struct *work)
|
|||
* cancel_delayed_work(), it can happen, that a .set_ios() call preempts
|
||||
* us, so, have to check for IS_ERR(host->mrq)
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(mrq)
|
||||
|| time_is_after_jiffies(host->last_req_ts +
|
||||
msecs_to_jiffies(CMDREQ_TIMEOUT))) {
|
||||
if (IS_ERR_OR_NULL(mrq) ||
|
||||
time_is_after_jiffies(host->last_req_ts +
|
||||
msecs_to_jiffies(CMDREQ_TIMEOUT))) {
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
dev_warn(&host->pdev->dev,
|
||||
"timeout waiting for hardware interrupt (CMD%u)\n",
|
||||
mrq->cmd->opcode);
|
||||
"timeout waiting for hardware interrupt (CMD%u)\n",
|
||||
mrq->cmd->opcode);
|
||||
|
||||
if (host->data)
|
||||
host->data->error = -ETIMEDOUT;
|
||||
|
@ -279,45 +322,6 @@ static void tmio_mmc_reset_work(struct work_struct *work)
|
|||
mmc_request_done(host->mmc, mrq);
|
||||
}
|
||||
|
||||
/* called with host->lock held, interrupts disabled */
|
||||
static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct mmc_request *mrq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
mrq = host->mrq;
|
||||
if (IS_ERR_OR_NULL(mrq)) {
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
host->cmd = NULL;
|
||||
host->data = NULL;
|
||||
host->force_pio = false;
|
||||
|
||||
cancel_delayed_work(&host->delayed_reset_work);
|
||||
|
||||
host->mrq = NULL;
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
if (mrq->cmd->error || (mrq->data && mrq->data->error))
|
||||
tmio_mmc_abort_dma(host);
|
||||
|
||||
if (host->check_scc_error)
|
||||
host->check_scc_error(host);
|
||||
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
}
|
||||
|
||||
static void tmio_mmc_done_work(struct work_struct *work)
|
||||
{
|
||||
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
|
||||
done);
|
||||
tmio_mmc_finish_request(host);
|
||||
}
|
||||
|
||||
/* These are the bitmasks the tmio chip requires to implement the MMC response
|
||||
* types. Note that R1 and R6 are the same in this scheme. */
|
||||
#define APP_CMD 0x0040
|
||||
|
@ -332,7 +336,8 @@ static void tmio_mmc_done_work(struct work_struct *work)
|
|||
#define SECURITY_CMD 0x4000
|
||||
#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
|
||||
|
||||
static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
|
||||
static int tmio_mmc_start_command(struct tmio_mmc_host *host,
|
||||
struct mmc_command *cmd)
|
||||
{
|
||||
struct mmc_data *data = host->data;
|
||||
int c = cmd->opcode;
|
||||
|
@ -371,11 +376,11 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
|
|||
c |= TRANSFER_MULTI;
|
||||
|
||||
/*
|
||||
* Disable auto CMD12 at IO_RW_EXTENDED when
|
||||
* multiple block transfer
|
||||
* Disable auto CMD12 at IO_RW_EXTENDED and
|
||||
* SET_BLOCK_COUNT when doing multiple block transfer
|
||||
*/
|
||||
if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
|
||||
(cmd->opcode == SD_IO_RW_EXTENDED))
|
||||
(cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
|
||||
c |= NO_CMD12_ISSUE;
|
||||
}
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
|
@ -497,8 +502,6 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
|
|||
|
||||
if (host->sg_off == host->sg_ptr->length)
|
||||
tmio_mmc_next_sg(host);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
|
||||
|
@ -506,6 +509,7 @@ static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
|
|||
if (host->sg_ptr == &host->bounce_sg) {
|
||||
unsigned long flags;
|
||||
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
|
||||
|
||||
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
|
||||
tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
|
||||
}
|
||||
|
@ -552,7 +556,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
|
|||
host->mrq);
|
||||
}
|
||||
|
||||
if (stop) {
|
||||
if (stop && !host->mrq->sbc) {
|
||||
if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
|
||||
dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
|
||||
stop->opcode, stop->arg);
|
||||
|
@ -565,10 +569,12 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
|
|||
|
||||
schedule_work(&host->done);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
|
||||
|
||||
static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
|
||||
{
|
||||
struct mmc_data *data;
|
||||
|
||||
spin_lock(&host->lock);
|
||||
data = host->data;
|
||||
|
||||
|
@ -613,8 +619,7 @@ out:
|
|||
spin_unlock(&host->lock);
|
||||
}
|
||||
|
||||
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
|
||||
unsigned int stat)
|
||||
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
|
||||
{
|
||||
struct mmc_command *cmd = host->cmd;
|
||||
int i, addr;
|
||||
|
@ -675,7 +680,7 @@ out:
|
|||
}
|
||||
|
||||
static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
|
||||
int ireg, int status)
|
||||
int ireg, int status)
|
||||
{
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
|
@ -693,14 +698,13 @@ static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
|
||||
int ireg, int status)
|
||||
static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
|
||||
int status)
|
||||
{
|
||||
/* Command completion */
|
||||
if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
|
||||
tmio_mmc_ack_mmc_irqs(host,
|
||||
TMIO_STAT_CMDRESPEND |
|
||||
TMIO_STAT_CMDTIMEOUT);
|
||||
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
|
||||
TMIO_STAT_CMDTIMEOUT);
|
||||
tmio_mmc_cmd_irq(host, status);
|
||||
return true;
|
||||
}
|
||||
|
@ -768,10 +772,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
|
|||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_irq);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
|
||||
|
||||
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
|
||||
struct mmc_data *data)
|
||||
struct mmc_data *data)
|
||||
{
|
||||
struct tmio_mmc_data *pdata = host->pdata;
|
||||
|
||||
|
@ -826,7 +830,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|||
|
||||
if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
|
||||
dev_warn_once(&host->pdev->dev,
|
||||
"Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
|
||||
"Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -857,12 +861,43 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void tmio_process_mrq(struct tmio_mmc_host *host,
|
||||
struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_command *cmd;
|
||||
int ret;
|
||||
|
||||
if (mrq->sbc && host->cmd != mrq->sbc) {
|
||||
cmd = mrq->sbc;
|
||||
} else {
|
||||
cmd = mrq->cmd;
|
||||
if (mrq->data) {
|
||||
ret = tmio_mmc_start_data(host, mrq->data);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
ret = tmio_mmc_start_command(host, cmd);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
schedule_delayed_work(&host->delayed_reset_work,
|
||||
msecs_to_jiffies(CMDREQ_TIMEOUT));
|
||||
return;
|
||||
|
||||
fail:
|
||||
host->force_pio = false;
|
||||
host->mrq = NULL;
|
||||
mrq->cmd->error = ret;
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
}
|
||||
|
||||
/* Process requests from the MMC layer */
|
||||
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
{
|
||||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
|
@ -882,24 +917,54 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
if (mrq->data) {
|
||||
ret = tmio_mmc_start_data(host, mrq->data);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
tmio_process_mrq(host, mrq);
|
||||
}
|
||||
|
||||
ret = tmio_mmc_start_command(host, mrq->cmd);
|
||||
if (!ret) {
|
||||
schedule_delayed_work(&host->delayed_reset_work,
|
||||
msecs_to_jiffies(CMDREQ_TIMEOUT));
|
||||
static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct mmc_request *mrq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
mrq = host->mrq;
|
||||
if (IS_ERR_OR_NULL(mrq)) {
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
fail:
|
||||
host->force_pio = false;
|
||||
host->mrq = NULL;
|
||||
mrq->cmd->error = ret;
|
||||
mmc_request_done(mmc, mrq);
|
||||
/* If not SET_BLOCK_COUNT, clear old data */
|
||||
if (host->cmd != mrq->sbc) {
|
||||
host->cmd = NULL;
|
||||
host->data = NULL;
|
||||
host->force_pio = false;
|
||||
host->mrq = NULL;
|
||||
}
|
||||
|
||||
cancel_delayed_work(&host->delayed_reset_work);
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
if (mrq->cmd->error || (mrq->data && mrq->data->error))
|
||||
tmio_mmc_abort_dma(host);
|
||||
|
||||
if (host->check_scc_error)
|
||||
host->check_scc_error(host);
|
||||
|
||||
/* If SET_BLOCK_COUNT, continue with main command */
|
||||
if (host->mrq) {
|
||||
tmio_process_mrq(host, mrq);
|
||||
return;
|
||||
}
|
||||
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
}
|
||||
|
||||
static void tmio_mmc_done_work(struct work_struct *work)
|
||||
{
|
||||
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
|
||||
done);
|
||||
tmio_mmc_finish_request(host);
|
||||
}
|
||||
|
||||
static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
|
||||
|
@ -965,7 +1030,7 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
|
|||
}
|
||||
|
||||
static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
|
||||
unsigned char bus_width)
|
||||
unsigned char bus_width)
|
||||
{
|
||||
u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
|
||||
& ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
|
||||
|
@ -1005,7 +1070,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
dev_dbg(dev,
|
||||
"%s.%d: CMD%u active since %lu, now %lu!\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
host->mrq->cmd->opcode, host->last_req_ts, jiffies);
|
||||
host->mrq->cmd->opcode, host->last_req_ts,
|
||||
jiffies);
|
||||
}
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
|
@ -1052,6 +1118,7 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
|
|||
struct tmio_mmc_host *host = mmc_priv(mmc);
|
||||
struct tmio_mmc_data *pdata = host->pdata;
|
||||
int ret = mmc_gpio_get_ro(mmc);
|
||||
|
||||
if (ret >= 0)
|
||||
return ret;
|
||||
|
||||
|
@ -1108,6 +1175,7 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
|
|||
struct tmio_mmc_data *pdata)
|
||||
{
|
||||
const struct device_node *np = pdev->dev.of_node;
|
||||
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
|
@ -1131,16 +1199,17 @@ tmio_mmc_host_alloc(struct platform_device *pdev)
|
|||
|
||||
return host;
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_host_alloc);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
|
||||
|
||||
void tmio_mmc_host_free(struct tmio_mmc_host *host)
|
||||
{
|
||||
mmc_free_host(host->mmc);
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_host_free);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
|
||||
|
||||
int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
||||
struct tmio_mmc_data *pdata)
|
||||
struct tmio_mmc_data *pdata,
|
||||
const struct tmio_mmc_dma_ops *dma_ops)
|
||||
{
|
||||
struct platform_device *pdev = _host->pdev;
|
||||
struct mmc_host *mmc = _host->mmc;
|
||||
|
@ -1177,7 +1246,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
|||
return -ENOMEM;
|
||||
|
||||
tmio_mmc_ops.card_busy = _host->card_busy;
|
||||
tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
|
||||
tmio_mmc_ops.start_signal_voltage_switch =
|
||||
_host->start_signal_voltage_switch;
|
||||
mmc->ops = &tmio_mmc_ops;
|
||||
|
||||
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
|
||||
|
@ -1221,6 +1291,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
|||
if (_host->native_hotplug)
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
||||
_host->sdio_irq_enabled = false;
|
||||
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
|
||||
_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
|
||||
|
||||
tmio_mmc_clk_stop(_host);
|
||||
tmio_mmc_reset(_host);
|
||||
|
||||
|
@ -1237,13 +1311,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
|||
|
||||
_host->sdcard_irq_mask &= ~irq_mask;
|
||||
|
||||
_host->sdio_irq_enabled = false;
|
||||
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
|
||||
_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
|
||||
sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
|
||||
sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0001);
|
||||
}
|
||||
|
||||
spin_lock_init(&_host->lock);
|
||||
mutex_init(&_host->ios_lock);
|
||||
|
||||
|
@ -1252,6 +1319,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
|||
INIT_WORK(&_host->done, tmio_mmc_done_work);
|
||||
|
||||
/* See if we also get DMA */
|
||||
_host->dma_ops = dma_ops;
|
||||
tmio_mmc_request_dma(_host, pdata);
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
|
@ -1278,7 +1346,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_host_probe);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
|
||||
|
||||
void tmio_mmc_host_remove(struct tmio_mmc_host *host)
|
||||
{
|
||||
|
@ -1303,7 +1371,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
|
|||
|
||||
tmio_mmc_clk_disable(host);
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_host_remove);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int tmio_mmc_host_runtime_suspend(struct device *dev)
|
||||
|
@ -1320,7 +1388,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
|
||||
|
||||
static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
|
||||
{
|
||||
|
@ -1345,7 +1413,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
|
||||
#endif
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -2107,7 +2107,8 @@ static int vub300_probe(struct usb_interface *interface,
|
|||
usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
|
||||
sizeof(serial_number));
|
||||
dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
|
||||
udev->descriptor.idVendor, udev->descriptor.idProduct,
|
||||
le16_to_cpu(udev->descriptor.idVendor),
|
||||
le16_to_cpu(udev->descriptor.idProduct),
|
||||
manufacturer, product, serial_number);
|
||||
command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!command_out_urb) {
|
||||
|
|
|
@ -13,15 +13,15 @@
|
|||
#define tmio_ioread16(addr) readw(addr)
|
||||
#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
|
||||
#define tmio_ioread32(addr) \
|
||||
(((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16))
|
||||
(((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16))
|
||||
|
||||
#define tmio_iowrite8(val, addr) writeb((val), (addr))
|
||||
#define tmio_iowrite16(val, addr) writew((val), (addr))
|
||||
#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
|
||||
#define tmio_iowrite32(val, addr) \
|
||||
do { \
|
||||
writew((val), (addr)); \
|
||||
writew((val) >> 16, (addr) + 2); \
|
||||
writew((val), (addr)); \
|
||||
writew((val) >> 16, (addr) + 2); \
|
||||
} while (0)
|
||||
|
||||
#define CNF_CMD 0x04
|
||||
|
@ -55,57 +55,57 @@
|
|||
} while (0)
|
||||
|
||||
/* tmio MMC platform flags */
|
||||
#define TMIO_MMC_WRPROTECT_DISABLE (1 << 0)
|
||||
#define TMIO_MMC_WRPROTECT_DISABLE BIT(0)
|
||||
/*
|
||||
* Some controllers can support a 2-byte block size when the bus width
|
||||
* is configured in 4-bit mode.
|
||||
*/
|
||||
#define TMIO_MMC_BLKSZ_2BYTES (1 << 1)
|
||||
#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
|
||||
/*
|
||||
* Some controllers can support SDIO IRQ signalling.
|
||||
*/
|
||||
#define TMIO_MMC_SDIO_IRQ (1 << 2)
|
||||
#define TMIO_MMC_SDIO_IRQ BIT(2)
|
||||
|
||||
/* Some features are only available or tested on RCar Gen2 or later */
|
||||
#define TMIO_MMC_MIN_RCAR2 (1 << 3)
|
||||
/* Some features are only available or tested on R-Car Gen2 or later */
|
||||
#define TMIO_MMC_MIN_RCAR2 BIT(3)
|
||||
|
||||
/*
|
||||
* Some controllers require waiting for the SD bus to become
|
||||
* idle before writing to some registers.
|
||||
*/
|
||||
#define TMIO_MMC_HAS_IDLE_WAIT (1 << 4)
|
||||
#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
|
||||
/*
|
||||
* A GPIO is used for card hotplug detection. We need an extra flag for this,
|
||||
* because 0 is a valid GPIO number too, and requiring users to specify
|
||||
* cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility.
|
||||
*/
|
||||
#define TMIO_MMC_USE_GPIO_CD (1 << 5)
|
||||
#define TMIO_MMC_USE_GPIO_CD BIT(5)
|
||||
|
||||
/*
|
||||
* Some controllers doesn't have over 0x100 register.
|
||||
* it is used to checking accessibility of
|
||||
* CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
|
||||
*/
|
||||
#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
|
||||
#define TMIO_MMC_HAVE_HIGH_REG BIT(6)
|
||||
|
||||
/*
|
||||
* Some controllers have CMD12 automatically
|
||||
* issue/non-issue register
|
||||
*/
|
||||
#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
|
||||
#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
|
||||
|
||||
/* Controller has some SDIO status bits which must be 1 */
|
||||
#define TMIO_MMC_SDIO_STATUS_SETBITS (1 << 8)
|
||||
#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
|
||||
|
||||
/*
|
||||
* Some controllers have a 32-bit wide data port register
|
||||
*/
|
||||
#define TMIO_MMC_32BIT_DATA_PORT (1 << 9)
|
||||
#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
|
||||
|
||||
/*
|
||||
* Some controllers allows to set SDx actual clock
|
||||
*/
|
||||
#define TMIO_MMC_CLK_ACTUAL (1 << 10)
|
||||
#define TMIO_MMC_CLK_ACTUAL BIT(10)
|
||||
|
||||
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
|
||||
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
|
||||
|
@ -146,9 +146,9 @@ struct tmio_nand_data {
|
|||
|
||||
struct tmio_fb_data {
|
||||
int (*lcd_set_power)(struct platform_device *fb_dev,
|
||||
bool on);
|
||||
bool on);
|
||||
int (*lcd_mode)(struct platform_device *fb_dev,
|
||||
const struct fb_videomode *mode);
|
||||
const struct fb_videomode *mode);
|
||||
int num_modes;
|
||||
struct fb_videomode *modes;
|
||||
|
||||
|
@ -157,5 +157,4 @@ struct tmio_fb_data {
|
|||
int width;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -305,9 +305,7 @@ struct mmc_card {
|
|||
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
|
||||
unsigned int nr_parts;
|
||||
|
||||
struct mmc_queue_req *mqrq; /* Shared queue structure */
|
||||
unsigned int bouncesz; /* Bounce buffer size */
|
||||
int qdepth; /* Shared queue depth */
|
||||
};
|
||||
|
||||
static inline bool mmc_large_sector(struct mmc_card *card)
|
||||
|
|
|
@ -130,6 +130,7 @@ struct mmc_host_ops {
|
|||
int (*get_cd)(struct mmc_host *host);
|
||||
|
||||
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
|
||||
void (*ack_sdio_irq)(struct mmc_host *host);
|
||||
|
||||
/* optional callback for HC quirks */
|
||||
void (*init_card)(struct mmc_host *host, struct mmc_card *card);
|
||||
|
@ -184,6 +185,7 @@ struct mmc_async_req {
|
|||
*/
|
||||
struct mmc_slot {
|
||||
int cd_irq;
|
||||
bool cd_wake_enabled;
|
||||
void *handler_priv;
|
||||
};
|
||||
|
||||
|
@ -270,9 +272,11 @@ struct mmc_host {
|
|||
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
|
||||
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
|
||||
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
|
||||
#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */
|
||||
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
|
||||
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
|
||||
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
|
||||
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
|
||||
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
|
||||
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
|
||||
#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
|
||||
|
@ -285,7 +289,6 @@ struct mmc_host {
|
|||
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
|
||||
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
|
||||
MMC_CAP2_HS200_1_2V_SDR)
|
||||
#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
|
||||
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
|
||||
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
|
||||
#define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */
|
||||
|
@ -358,6 +361,7 @@ struct mmc_host {
|
|||
|
||||
unsigned int sdio_irqs;
|
||||
struct task_struct *sdio_irq_thread;
|
||||
struct delayed_work sdio_irq_work;
|
||||
bool sdio_irq_pending;
|
||||
atomic_t sdio_irq_thread_abort;
|
||||
|
||||
|
@ -428,6 +432,7 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
|
|||
}
|
||||
|
||||
void sdio_run_irqs(struct mmc_host *host);
|
||||
void sdio_signal_irq(struct mmc_host *host);
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
int mmc_regulator_get_ocrmask(struct regulator *supply);
|
||||
|
|
Загрузка…
Ссылка в новой задаче