intel_th: msu: Fix single mode with IOMMU

Currently, the pages that are allocated for the single mode of MSC are not
mapped into the device's dma space and the code is incorrectly using
*_to_phys() in place of a dma address. This fails with IOMMU enabled and
is otherwise bad practice.

Fix the single mode buffer allocation to map the pages into the device's
DMA space.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Fixes: ba82664c13 ("intel_th: Add Memory Storage Unit driver")
Cc: stable@vger.kernel.org # v4.4+
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Alexander Shishkin 2019-05-03 11:44:34 +03:00 коммит произвёл Greg Kroah-Hartman
Родитель 9a552e271e
Коммит 4e0eaf239f
1 изменённых файлов: 32 добавлений и 3 удалений

Просмотреть файл

@ -84,6 +84,7 @@ struct msc_iter {
* @reg_base: register window base address
* @thdev: intel_th_device pointer
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
@ -104,6 +105,7 @@ struct msc {
struct intel_th_device *thdev;
struct list_head win_list;
struct sg_table single_sgt;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
*/
static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
{
unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned int order = get_order(size);
struct page *page;
int ret;
if (!size)
return 0;
ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
if (ret)
goto err_out;
ret = -ENOMEM;
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
goto err_free_sgt;
split_page(page, order);
msc->nr_pages = size >> PAGE_SHIFT;
sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
DMA_FROM_DEVICE);
if (ret < 0)
goto err_free_pages;
msc->nr_pages = nr_pages;
msc->base = page_address(page);
msc->base_addr = page_to_phys(page);
msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
return 0;
err_free_pages:
__free_pages(page, order);
err_free_sgt:
sg_free_table(&msc->single_sgt);
err_out:
return ret;
}
/**
@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc)
{
unsigned long off;
dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
1, DMA_FROM_DEVICE);
sg_free_table(&msc->single_sgt);
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);