Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu

This commit is contained in:
Joerg Roedel 2018-07-27 14:49:40 +02:00
Родитель d72e90f33a b63b3439b8
Коммит d81dc82e0f
4 изменённых файлов: 36 добавлений и 15 удалений

Просмотреть файл

@ -24,6 +24,7 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@ -366,7 +367,7 @@
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
static bool disable_bypass;
static bool disable_bypass = 1;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
@ -1301,6 +1302,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
/* Sync our overflow flag, as we believe we're up to speed */
q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
writel(q->cons, q->cons_reg);
return IRQ_HANDLED;
}
@ -2211,8 +2213,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
reg &= ~clr;
reg |= set;
writel_relaxed(reg | GBPA_UPDATE, gbpa);
return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
1, ARM_SMMU_POLL_TIMEOUT_US);
ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
1, ARM_SMMU_POLL_TIMEOUT_US);
if (ret)
dev_err(smmu->dev, "GBPA not responding to update\n");
return ret;
}
static void arm_smmu_free_msis(void *data)
@ -2392,8 +2398,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
if (reg & CR0_SMMUEN)
if (reg & CR0_SMMUEN) {
if (is_kdump_kernel()) {
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
arm_smmu_device_disable(smmu);
return -EBUSY;
}
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
}
ret = arm_smmu_device_disable(smmu);
if (ret)
@ -2491,10 +2504,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
enables |= CR0_SMMUEN;
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
if (ret) {
dev_err(smmu->dev, "GBPA not responding to update\n");
if (ret)
return ret;
}
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);

Просмотреть файл

@ -2103,12 +2103,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
"found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks);
return -ENODEV;
if (smmu->version == ARM_SMMU_V2) {
if (smmu->num_context_banks > smmu->num_context_irqs) {
dev_err(dev,
"found only %d context irq(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks);
return -ENODEV;
}
/* Ignore superfluous interrupts */
smmu->num_context_irqs = smmu->num_context_banks;
}
for (i = 0; i < smmu->num_global_irqs; ++i) {

Просмотреть файл

@ -192,6 +192,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct device *dev = cfg->iommu_dev;
phys_addr_t phys;
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl);
void *table = NULL;
@ -200,6 +201,10 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
phys = virt_to_phys(table);
if (phys != (arm_v7s_iopte)phys)
/* Doesn't fit in PTE */
goto out_free;
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@ -209,7 +214,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
* address directly, so if the DMA layer suggests otherwise by
* translating or truncating them, that bodes very badly...
*/
if (dma != virt_to_phys(table))
if (dma != phys)
goto out_unmap;
}
kmemleak_ignore(table);

Просмотреть файл

@ -237,7 +237,8 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
gfp | __GFP_ZERO, order);
if (!p)
return NULL;