dmaengine: idxd: remove fault processing code
Kernel memory are pinned and will not cause faults. Since the driver does not support interrupts for user descriptors, no fault errors are expected to come through the misc interrupt. Remove dead code. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162630502789.631986.10591230961790023856.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Родитель
6e7f3ee97b
Коммит
0e96454ca2
|
@ -23,10 +23,8 @@ struct idxd_fault {
|
|||
};
|
||||
|
||||
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
|
||||
enum irq_work_type wtype,
|
||||
int *processed, u64 data);
|
||||
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
||||
enum irq_work_type wtype,
|
||||
int *processed, u64 data);
|
||||
|
||||
static void idxd_device_reinit(struct work_struct *work)
|
||||
|
@ -62,46 +60,6 @@ static void idxd_device_reinit(struct work_struct *work)
|
|||
idxd_device_clear_state(idxd);
|
||||
}
|
||||
|
||||
static void idxd_device_fault_work(struct work_struct *work)
|
||||
{
|
||||
struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
|
||||
struct idxd_irq_entry *ie;
|
||||
int i;
|
||||
int processed;
|
||||
int irqcnt = fault->idxd->num_wq_irqs + 1;
|
||||
|
||||
for (i = 1; i < irqcnt; i++) {
|
||||
ie = &fault->idxd->irq_entries[i];
|
||||
irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
|
||||
&processed, fault->addr);
|
||||
if (processed)
|
||||
break;
|
||||
|
||||
irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
|
||||
&processed, fault->addr);
|
||||
if (processed)
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(fault);
|
||||
}
|
||||
|
||||
static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
|
||||
u64 fault_addr)
|
||||
{
|
||||
struct idxd_fault *fault;
|
||||
|
||||
fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
|
||||
if (!fault)
|
||||
return -ENOMEM;
|
||||
|
||||
fault->addr = fault_addr;
|
||||
fault->idxd = idxd;
|
||||
INIT_WORK(&fault->work, idxd_device_fault_work);
|
||||
queue_work(idxd->wq, &fault->work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
@ -168,15 +126,6 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
|||
if (!err)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This case should rarely happen and typically is due to software
|
||||
* programming error by the driver.
|
||||
*/
|
||||
if (idxd->sw_err.valid &&
|
||||
idxd->sw_err.desc_valid &&
|
||||
idxd->sw_err.fault_addr)
|
||||
idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
|
||||
|
||||
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
|
||||
idxd->state = IDXD_DEV_HALTED;
|
||||
|
@ -228,43 +177,19 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
|
||||
{
|
||||
/*
|
||||
* Completion address can be bad as well. Check fault address match for descriptor
|
||||
* and completion address.
|
||||
*/
|
||||
if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
|
||||
struct idxd_device *idxd = desc->wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
||||
dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
||||
enum irq_work_type wtype,
|
||||
int *processed, u64 data)
|
||||
{
|
||||
struct idxd_desc *desc, *t;
|
||||
struct llist_node *head;
|
||||
int queued = 0;
|
||||
unsigned long flags;
|
||||
enum idxd_complete_type reason;
|
||||
|
||||
*processed = 0;
|
||||
head = llist_del_all(&irq_entry->pending_llist);
|
||||
if (!head)
|
||||
goto out;
|
||||
|
||||
if (wtype == IRQ_WORK_NORMAL)
|
||||
reason = IDXD_COMPLETE_NORMAL;
|
||||
else
|
||||
reason = IDXD_COMPLETE_DEV_FAIL;
|
||||
|
||||
llist_for_each_entry_safe(desc, t, head, llnode) {
|
||||
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
|
||||
|
||||
|
@ -275,9 +200,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(status != DSA_COMP_SUCCESS))
|
||||
match_fault(desc, data);
|
||||
complete_desc(desc, reason);
|
||||
complete_desc(desc, IDXD_COMPLETE_NORMAL);
|
||||
(*processed)++;
|
||||
} else {
|
||||
spin_lock_irqsave(&irq_entry->list_lock, flags);
|
||||
|
@ -293,20 +216,14 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
|||
}
|
||||
|
||||
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
|
||||
enum irq_work_type wtype,
|
||||
int *processed, u64 data)
|
||||
{
|
||||
int queued = 0;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(flist);
|
||||
struct idxd_desc *desc, *n;
|
||||
enum idxd_complete_type reason;
|
||||
|
||||
*processed = 0;
|
||||
if (wtype == IRQ_WORK_NORMAL)
|
||||
reason = IDXD_COMPLETE_NORMAL;
|
||||
else
|
||||
reason = IDXD_COMPLETE_DEV_FAIL;
|
||||
|
||||
/*
|
||||
* This lock protects list corruption from access of list outside of the irq handler
|
||||
|
@ -338,9 +255,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(status != DSA_COMP_SUCCESS))
|
||||
match_fault(desc, data);
|
||||
complete_desc(desc, reason);
|
||||
complete_desc(desc, IDXD_COMPLETE_NORMAL);
|
||||
}
|
||||
|
||||
return queued;
|
||||
|
@ -370,14 +285,12 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
|
|||
* 5. Repeat until no more descriptors.
|
||||
*/
|
||||
do {
|
||||
rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
|
||||
&processed, 0);
|
||||
rc = irq_process_work_list(irq_entry, &processed, 0);
|
||||
total += processed;
|
||||
if (rc != 0)
|
||||
continue;
|
||||
|
||||
rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
|
||||
&processed, 0);
|
||||
rc = irq_process_pending_llist(irq_entry, &processed, 0);
|
||||
total += processed;
|
||||
} while (rc != 0);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче