- Make nand_soft_waitrdy() wait tWB before polling the status REG
 - Fix BCH write in the the Marvell NAND controller driver
 - Fix wrong picosec to msec conversion in the Marvell NAND controller
   driver
 - Fix DMA handling in the TI OneNAND controllre driver
 -----BEGIN PGP SIGNATURE-----
 
 iQI5BAABCAAjBQJa9UwTHBxib3Jpcy5icmV6aWxsb25AYm9vdGxpbi5jb20ACgkQ
 Ze02AX4ItwAZdQ//SGaNWGzrCaXqoAQMMVanHJLeSau5KDTQpuz11RkjDe5q5CF6
 II8v34ks5SDb8pWnuKSvVgJx/n/zO1UE9N3aLPmPrLs4J3COHJAii7TFaunfcfpa
 MIE58C6ZohFWqe+xKl46UFxwsfmwqDZvV/UTMC+6MABj9JeDy2bZx64tIzbp8kT6
 Vmi2tuUTAQ2tnsdhymsdg59fy8Kr0CFQMzmlRG8pz3+dg6pyoCdlkvZO2U0mFNZb
 KebN9jiifvPgrPgHiql1rRMM0kUfQq0BTjwQ2YSkyuxXzaZ5XWE1etRacby8REtd
 /pTH6YrrPrguqhTknA00rG4YPxYAF2gUAmVmtT0AHIuUHVs4qe+RevNPTT9uEWKi
 W0hJLY10zZBpQXSvvZ7Au9P/24pHsYSakoPKgTdMXyIqciXt81pzGHwK8ySp7riX
 qHcvJDqflmO0NO+197pgi8J35QUKkaScTcoKKoFgnJEYHvMVguRtzBfB9p0a4HXO
 r78HgGzxWPMZdExr/81TOPSUdEQUbh7677+kg5mLQABIbqXfxes+dQUE+ApAIdmG
 01X/YdpkOOjruYL5UuTTs56KwOgmVcgiSjLeDbXI3l5qgw1tXnjhraqYB1CTcNfc
 hN1fqFPjrSyNL1wvYqkiVSkIXfbELPazeziLqkvq4uUHWsPGv+BzY/sHDsc=
 =dBC1
 -----END PGP SIGNATURE-----

Merge tag 'mtd/fixes-for-4.17-rc5' of git://git.infradead.org/linux-mtd

Pull mtd fixes from Boris Brezillon:

 - make nand_soft_waitrdy() wait tWB before polling the status REG

 - fix BCH write in the the Marvell NAND controller driver

 - fix wrong picosec to msec conversion in the Marvell NAND controller
   driver

 - fix DMA handling in the TI OneNAND controllre driver

* tag 'mtd/fixes-for-4.17-rc5' of git://git.infradead.org/linux-mtd:
  mtd: rawnand: Make sure we wait tWB before polling the STATUS reg
  mtd: rawnand: marvell: fix command xtype in BCH write hook
  mtd: rawnand: marvell: pass ms delay to wait_op
  mtd: onenand: omap2: Disable DMA for HIGHMEM buffers
This commit is contained in:
Linus Torvalds 2018-05-11 09:46:14 -07:00
Родитель ca30093dd7 3057fcef38
Коммит e03dc5d3d4
3 изменённых файлов: 52 добавлений и 70 удалений

Просмотреть файл

@ -375,56 +375,42 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
struct device *dev = &c->pdev->dev;
void *buf = (void *)buffer;
dma_addr_t dma_src, dma_dst;
int bram_offset, err;
size_t xtra;
int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
/*
* If the buffer address is not DMA-able, len is not long enough to make
* DMA transfers profitable or panic_write() may be in an interrupt
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
count < 384 || in_interrupt() || oops_in_progress )
goto out_copy;
/* panic_write() may be in an interrupt context */
if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
struct page *p1;
if (((size_t)buf & PAGE_MASK) !=
((size_t)(buf + count - 1) & PAGE_MASK))
goto out_copy;
p1 = vmalloc_to_page(buf);
if (!p1)
goto out_copy;
buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
}
xtra = count & 3;
if (xtra) {
count -= xtra;
memcpy(buf + count, this->base + bram_offset + count, xtra);
}
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
dma_src = c->phys_base + bram_offset;
dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
if (dma_mapping_error(dev, dma_dst)) {
dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy;
}
ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
if (!err)
return 0;
if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
return 0;
dev_err(dev, "timeout waiting for DMA\n");
out_copy:
memcpy(buf, this->base + bram_offset, count);
@ -437,49 +423,34 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
struct device *dev = &c->pdev->dev;
void *buf = (void *)buffer;
int ret;
dma_addr_t dma_src, dma_dst;
int bram_offset, err;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
/*
* If the buffer address is not DMA-able, len is not long enough to make
* DMA transfers profitable or panic_write() may be in an interrupt
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
count < 384 || in_interrupt() || oops_in_progress )
goto out_copy;
/* panic_write() may be in an interrupt context */
if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
struct page *p1;
if (((size_t)buf & PAGE_MASK) !=
((size_t)(buf + count - 1) & PAGE_MASK))
goto out_copy;
p1 = vmalloc_to_page(buf);
if (!p1)
goto out_copy;
buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
}
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
return -1;
}
ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy;
}
return 0;
err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
if (!err)
return 0;
dev_err(dev, "timeout waiting for DMA\n");
out_copy:
memcpy(this->base + bram_offset, buf, count);

Просмотреть файл

@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
return ret;
ret = marvell_nfc_wait_op(chip,
chip->data_interface.timings.sdr.tPROG_max);
PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
return ret;
}
@ -1408,6 +1408,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
u32 xtype;
int ret;
struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
@ -1423,7 +1424,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
* last naked write.
*/
if (chunk == 0) {
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
if (lt->nchunks == 1)
xtype = XTYPE_MONOLITHIC_RW;
else
xtype = XTYPE_WRITE_DISPATCH;
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_CMD1(NAND_CMD_SEQIN);
nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
@ -1494,7 +1500,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
}
ret = marvell_nfc_wait_op(chip,
chip->data_interface.timings.sdr.tPROG_max);
PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
marvell_nfc_disable_hw_ecc(chip);

Просмотреть файл

@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{
const struct nand_sdr_timings *timings;
u8 status = 0;
int ret;
if (!chip->exec_op)
return -ENOTSUPP;
/* Wait tWB before polling the STATUS reg. */
timings = nand_get_sdr_timings(&chip->data_interface);
ndelay(PSEC_TO_NSEC(timings->tWB_max));
ret = nand_status_op(chip, NULL);
if (ret)
return ret;