irq_poll: make blk-iopoll available outside the block layer
The new name is irq_poll as iopoll is already taken. Better suggestions welcome. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
This commit is contained in:
Родитель
a2dbb7b56f
Коммит
511cbce2ff
|
@ -90,7 +90,7 @@ BLOCK_SOFTIRQ: Do all of the following:
|
||||||
from being initiated from tasks that might run on the CPU to
|
from being initiated from tasks that might run on the CPU to
|
||||||
be de-jittered. (It is OK to force this CPU offline and then
|
be de-jittered. (It is OK to force this CPU offline and then
|
||||||
bring it back online before you start your application.)
|
bring it back online before you start your application.)
|
||||||
BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
|
IRQ_POLL_SOFTIRQ: Do all of the following:
|
||||||
1. Force block-device interrupts onto some other CPU.
|
1. Force block-device interrupts onto some other CPU.
|
||||||
2. Initiate any block I/O and block-I/O polling on other CPUs.
|
2. Initiate any block I/O and block-I/O polling on other CPUs.
|
||||||
3. Once your application has started, prevent CPU-hotplug operations
|
3. Once your application has started, prevent CPU-hotplug operations
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
|
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
|
||||||
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
|
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
|
||||||
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
|
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
|
||||||
blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
|
blk-lib.o blk-mq.o blk-mq-tag.o \
|
||||||
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
|
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
|
||||||
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
|
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
|
||||||
partitions/
|
partitions/
|
||||||
|
|
|
@ -1102,6 +1102,7 @@ config SCSI_IPR
|
||||||
tristate "IBM Power Linux RAID adapter support"
|
tristate "IBM Power Linux RAID adapter support"
|
||||||
depends on PCI && SCSI && ATA
|
depends on PCI && SCSI && ATA
|
||||||
select FW_LOADER
|
select FW_LOADER
|
||||||
|
select IRQ_POLL
|
||||||
---help---
|
---help---
|
||||||
This driver supports the IBM Power Linux family RAID adapters.
|
This driver supports the IBM Power Linux family RAID adapters.
|
||||||
This includes IBM pSeries 5712, 5703, 5709, and 570A, as well
|
This includes IBM pSeries 5712, 5703, 5709, and 570A, as well
|
||||||
|
|
|
@ -3,6 +3,7 @@ config BE2ISCSI
|
||||||
depends on PCI && SCSI && NET
|
depends on PCI && SCSI && NET
|
||||||
select SCSI_ISCSI_ATTRS
|
select SCSI_ISCSI_ATTRS
|
||||||
select ISCSI_BOOT_SYSFS
|
select ISCSI_BOOT_SYSFS
|
||||||
|
select IRQ_POLL
|
||||||
|
|
||||||
help
|
help
|
||||||
This driver implements the iSCSI functionality for Emulex
|
This driver implements the iSCSI functionality for Emulex
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/if_vlan.h>
|
#include <linux/if_vlan.h>
|
||||||
#include <linux/blk-iopoll.h>
|
#include <linux/irq_poll.h>
|
||||||
#define FW_VER_LEN 32
|
#define FW_VER_LEN 32
|
||||||
#define MCC_Q_LEN 128
|
#define MCC_Q_LEN 128
|
||||||
#define MCC_CQ_LEN 256
|
#define MCC_CQ_LEN 256
|
||||||
|
@ -101,7 +101,7 @@ struct be_eq_obj {
|
||||||
struct beiscsi_hba *phba;
|
struct beiscsi_hba *phba;
|
||||||
struct be_queue_info *cq;
|
struct be_queue_info *cq;
|
||||||
struct work_struct work_cqs; /* Work Item */
|
struct work_struct work_cqs; /* Work Item */
|
||||||
struct blk_iopoll iopoll;
|
struct irq_poll iopoll;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct be_mcc_obj {
|
struct be_mcc_obj {
|
||||||
|
|
|
@ -1292,9 +1292,9 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba)
|
||||||
|
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
blk_iopoll_disable(&pbe_eq->iopoll);
|
irq_poll_disable(&pbe_eq->iopoll);
|
||||||
beiscsi_process_cq(pbe_eq);
|
beiscsi_process_cq(pbe_eq);
|
||||||
blk_iopoll_enable(&pbe_eq->iopoll);
|
irq_poll_enable(&pbe_eq->iopoll);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -910,8 +910,8 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
|
||||||
num_eq_processed = 0;
|
num_eq_processed = 0;
|
||||||
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
||||||
& EQE_VALID_MASK) {
|
& EQE_VALID_MASK) {
|
||||||
if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
|
if (!irq_poll_sched_prep(&pbe_eq->iopoll))
|
||||||
blk_iopoll_sched(&pbe_eq->iopoll);
|
irq_poll_sched(&pbe_eq->iopoll);
|
||||||
|
|
||||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
||||||
queue_tail_inc(eq);
|
queue_tail_inc(eq);
|
||||||
|
@ -972,8 +972,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
|
||||||
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
||||||
num_mcceq_processed++;
|
num_mcceq_processed++;
|
||||||
} else {
|
} else {
|
||||||
if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
|
if (!irq_poll_sched_prep(&pbe_eq->iopoll))
|
||||||
blk_iopoll_sched(&pbe_eq->iopoll);
|
irq_poll_sched(&pbe_eq->iopoll);
|
||||||
num_ioeq_processed++;
|
num_ioeq_processed++;
|
||||||
}
|
}
|
||||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
||||||
|
@ -2295,7 +2295,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
|
||||||
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
|
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int be_iopoll(struct blk_iopoll *iop, int budget)
|
static int be_iopoll(struct irq_poll *iop, int budget)
|
||||||
{
|
{
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
struct beiscsi_hba *phba;
|
struct beiscsi_hba *phba;
|
||||||
|
@ -2306,7 +2306,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
|
||||||
pbe_eq->cq_count += ret;
|
pbe_eq->cq_count += ret;
|
||||||
if (ret < budget) {
|
if (ret < budget) {
|
||||||
phba = pbe_eq->phba;
|
phba = pbe_eq->phba;
|
||||||
blk_iopoll_complete(iop);
|
irq_poll_complete(iop);
|
||||||
beiscsi_log(phba, KERN_INFO,
|
beiscsi_log(phba, KERN_INFO,
|
||||||
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
|
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
|
||||||
"BM_%d : rearm pbe_eq->q.id =%d\n",
|
"BM_%d : rearm pbe_eq->q.id =%d\n",
|
||||||
|
@ -5293,7 +5293,7 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
|
||||||
|
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
blk_iopoll_disable(&pbe_eq->iopoll);
|
irq_poll_disable(&pbe_eq->iopoll);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
|
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
|
||||||
|
@ -5579,9 +5579,9 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
|
||||||
|
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
||||||
be_iopoll);
|
be_iopoll);
|
||||||
blk_iopoll_enable(&pbe_eq->iopoll);
|
irq_poll_enable(&pbe_eq->iopoll);
|
||||||
}
|
}
|
||||||
|
|
||||||
i = (phba->msix_enabled) ? i : 0;
|
i = (phba->msix_enabled) ? i : 0;
|
||||||
|
@ -5752,9 +5752,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
|
||||||
|
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
||||||
be_iopoll);
|
be_iopoll);
|
||||||
blk_iopoll_enable(&pbe_eq->iopoll);
|
irq_poll_enable(&pbe_eq->iopoll);
|
||||||
}
|
}
|
||||||
|
|
||||||
i = (phba->msix_enabled) ? i : 0;
|
i = (phba->msix_enabled) ? i : 0;
|
||||||
|
@ -5795,7 +5795,7 @@ free_blkenbld:
|
||||||
destroy_workqueue(phba->wq);
|
destroy_workqueue(phba->wq);
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
blk_iopoll_disable(&pbe_eq->iopoll);
|
irq_poll_disable(&pbe_eq->iopoll);
|
||||||
}
|
}
|
||||||
free_twq:
|
free_twq:
|
||||||
beiscsi_clean_port(phba);
|
beiscsi_clean_port(phba);
|
||||||
|
|
|
@ -3638,7 +3638,7 @@ static struct device_attribute ipr_ioa_reset_attr = {
|
||||||
.store = ipr_store_reset_adapter
|
.store = ipr_store_reset_adapter
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ipr_iopoll(struct blk_iopoll *iop, int budget);
|
static int ipr_iopoll(struct irq_poll *iop, int budget);
|
||||||
/**
|
/**
|
||||||
* ipr_show_iopoll_weight - Show ipr polling mode
|
* ipr_show_iopoll_weight - Show ipr polling mode
|
||||||
* @dev: class device struct
|
* @dev: class device struct
|
||||||
|
@ -3681,34 +3681,34 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!ioa_cfg->sis64) {
|
if (!ioa_cfg->sis64) {
|
||||||
dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
|
dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (kstrtoul(buf, 10, &user_iopoll_weight))
|
if (kstrtoul(buf, 10, &user_iopoll_weight))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (user_iopoll_weight > 256) {
|
if (user_iopoll_weight > 256) {
|
||||||
dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
|
dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
|
if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
|
||||||
dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
|
dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
|
||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
||||||
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
|
irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(shost->host_lock, lock_flags);
|
spin_lock_irqsave(shost->host_lock, lock_flags);
|
||||||
ioa_cfg->iopoll_weight = user_iopoll_weight;
|
ioa_cfg->iopoll_weight = user_iopoll_weight;
|
||||||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
||||||
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
|
irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
|
||||||
ioa_cfg->iopoll_weight, ipr_iopoll);
|
ioa_cfg->iopoll_weight, ipr_iopoll);
|
||||||
blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
|
irq_poll_enable(&ioa_cfg->hrrq[i].iopoll);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(shost->host_lock, lock_flags);
|
spin_unlock_irqrestore(shost->host_lock, lock_flags);
|
||||||
|
@ -5569,7 +5569,7 @@ static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
|
||||||
return num_hrrq;
|
return num_hrrq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ipr_iopoll(struct blk_iopoll *iop, int budget)
|
static int ipr_iopoll(struct irq_poll *iop, int budget)
|
||||||
{
|
{
|
||||||
struct ipr_ioa_cfg *ioa_cfg;
|
struct ipr_ioa_cfg *ioa_cfg;
|
||||||
struct ipr_hrr_queue *hrrq;
|
struct ipr_hrr_queue *hrrq;
|
||||||
|
@ -5585,7 +5585,7 @@ static int ipr_iopoll(struct blk_iopoll *iop, int budget)
|
||||||
completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
|
completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
|
||||||
|
|
||||||
if (completed_ops < budget)
|
if (completed_ops < budget)
|
||||||
blk_iopoll_complete(iop);
|
irq_poll_complete(iop);
|
||||||
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
|
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
|
||||||
|
|
||||||
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
|
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
|
||||||
|
@ -5693,8 +5693,8 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
|
||||||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
|
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
|
||||||
hrrq->toggle_bit) {
|
hrrq->toggle_bit) {
|
||||||
if (!blk_iopoll_sched_prep(&hrrq->iopoll))
|
if (!irq_poll_sched_prep(&hrrq->iopoll))
|
||||||
blk_iopoll_sched(&hrrq->iopoll);
|
irq_poll_sched(&hrrq->iopoll);
|
||||||
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
|
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -10405,9 +10405,9 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||||
|
|
||||||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
||||||
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
|
irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
|
||||||
ioa_cfg->iopoll_weight, ipr_iopoll);
|
ioa_cfg->iopoll_weight, ipr_iopoll);
|
||||||
blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
|
irq_poll_enable(&ioa_cfg->hrrq[i].iopoll);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10436,7 +10436,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
|
||||||
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
ioa_cfg->iopoll_weight = 0;
|
ioa_cfg->iopoll_weight = 0;
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
||||||
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
|
irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (ioa_cfg->in_reset_reload) {
|
while (ioa_cfg->in_reset_reload) {
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
#include <linux/libata.h>
|
#include <linux/libata.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/kref.h>
|
#include <linux/kref.h>
|
||||||
#include <linux/blk-iopoll.h>
|
#include <linux/irq_poll.h>
|
||||||
#include <scsi/scsi.h>
|
#include <scsi/scsi.h>
|
||||||
#include <scsi/scsi_cmnd.h>
|
#include <scsi/scsi_cmnd.h>
|
||||||
|
|
||||||
|
@ -517,7 +517,7 @@ struct ipr_hrr_queue {
|
||||||
u8 allow_cmds:1;
|
u8 allow_cmds:1;
|
||||||
u8 removing_ioa:1;
|
u8 removing_ioa:1;
|
||||||
|
|
||||||
struct blk_iopoll iopoll;
|
struct irq_poll iopoll;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Command packet structure */
|
/* Command packet structure */
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
#ifndef BLK_IOPOLL_H
|
|
||||||
#define BLK_IOPOLL_H
|
|
||||||
|
|
||||||
struct blk_iopoll;
|
|
||||||
typedef int (blk_iopoll_fn)(struct blk_iopoll *, int);
|
|
||||||
|
|
||||||
struct blk_iopoll {
|
|
||||||
struct list_head list;
|
|
||||||
unsigned long state;
|
|
||||||
unsigned long data;
|
|
||||||
int weight;
|
|
||||||
int max;
|
|
||||||
blk_iopoll_fn *poll;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
IOPOLL_F_SCHED = 0,
|
|
||||||
IOPOLL_F_DISABLE = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating
|
|
||||||
* that we were the first to acquire this iop for scheduling. If this iop
|
|
||||||
* is currently disabled, return "failure".
|
|
||||||
*/
|
|
||||||
static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop)
|
|
||||||
{
|
|
||||||
if (!test_bit(IOPOLL_F_DISABLE, &iop->state))
|
|
||||||
return test_and_set_bit(IOPOLL_F_SCHED, &iop->state);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop)
|
|
||||||
{
|
|
||||||
return test_bit(IOPOLL_F_DISABLE, &iop->state);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void blk_iopoll_sched(struct blk_iopoll *);
|
|
||||||
extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *);
|
|
||||||
extern void blk_iopoll_complete(struct blk_iopoll *);
|
|
||||||
extern void __blk_iopoll_complete(struct blk_iopoll *);
|
|
||||||
extern void blk_iopoll_enable(struct blk_iopoll *);
|
|
||||||
extern void blk_iopoll_disable(struct blk_iopoll *);
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -412,7 +412,7 @@ enum
|
||||||
NET_TX_SOFTIRQ,
|
NET_TX_SOFTIRQ,
|
||||||
NET_RX_SOFTIRQ,
|
NET_RX_SOFTIRQ,
|
||||||
BLOCK_SOFTIRQ,
|
BLOCK_SOFTIRQ,
|
||||||
BLOCK_IOPOLL_SOFTIRQ,
|
IRQ_POLL_SOFTIRQ,
|
||||||
TASKLET_SOFTIRQ,
|
TASKLET_SOFTIRQ,
|
||||||
SCHED_SOFTIRQ,
|
SCHED_SOFTIRQ,
|
||||||
HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
|
HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
#ifndef IRQ_POLL_H
|
||||||
|
#define IRQ_POLL_H
|
||||||
|
|
||||||
|
struct irq_poll;
|
||||||
|
typedef int (irq_poll_fn)(struct irq_poll *, int);
|
||||||
|
|
||||||
|
struct irq_poll {
|
||||||
|
struct list_head list;
|
||||||
|
unsigned long state;
|
||||||
|
unsigned long data;
|
||||||
|
int weight;
|
||||||
|
int max;
|
||||||
|
irq_poll_fn *poll;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
IRQ_POLL_F_SCHED = 0,
|
||||||
|
IRQ_POLL_F_DISABLE = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns 0 if we successfully set the IRQ_POLL_F_SCHED bit, indicating
|
||||||
|
* that we were the first to acquire this iop for scheduling. If this iop
|
||||||
|
* is currently disabled, return "failure".
|
||||||
|
*/
|
||||||
|
static inline int irq_poll_sched_prep(struct irq_poll *iop)
|
||||||
|
{
|
||||||
|
if (!test_bit(IRQ_POLL_F_DISABLE, &iop->state))
|
||||||
|
return test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int irq_poll_disable_pending(struct irq_poll *iop)
|
||||||
|
{
|
||||||
|
return test_bit(IRQ_POLL_F_DISABLE, &iop->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern void irq_poll_sched(struct irq_poll *);
|
||||||
|
extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *);
|
||||||
|
extern void irq_poll_complete(struct irq_poll *);
|
||||||
|
extern void __irq_poll_complete(struct irq_poll *);
|
||||||
|
extern void irq_poll_enable(struct irq_poll *);
|
||||||
|
extern void irq_poll_disable(struct irq_poll *);
|
||||||
|
|
||||||
|
#endif
|
|
@ -15,7 +15,7 @@ struct softirq_action;
|
||||||
softirq_name(NET_TX) \
|
softirq_name(NET_TX) \
|
||||||
softirq_name(NET_RX) \
|
softirq_name(NET_RX) \
|
||||||
softirq_name(BLOCK) \
|
softirq_name(BLOCK) \
|
||||||
softirq_name(BLOCK_IOPOLL) \
|
softirq_name(IRQ_POLL) \
|
||||||
softirq_name(TASKLET) \
|
softirq_name(TASKLET) \
|
||||||
softirq_name(SCHED) \
|
softirq_name(SCHED) \
|
||||||
softirq_name(HRTIMER) \
|
softirq_name(HRTIMER) \
|
||||||
|
|
|
@ -475,6 +475,11 @@ config DDR
|
||||||
information. This data is useful for drivers handling
|
information. This data is useful for drivers handling
|
||||||
DDR SDRAM controllers.
|
DDR SDRAM controllers.
|
||||||
|
|
||||||
|
config IRQ_POLL
|
||||||
|
bool "IRQ polling library"
|
||||||
|
help
|
||||||
|
Helper library to poll interrupt mitigation using polling.
|
||||||
|
|
||||||
config MPILIB
|
config MPILIB
|
||||||
tristate
|
tristate
|
||||||
select CLZ_TAB
|
select CLZ_TAB
|
||||||
|
|
|
@ -164,6 +164,7 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
|
||||||
|
|
||||||
obj-$(CONFIG_SG_SPLIT) += sg_split.o
|
obj-$(CONFIG_SG_SPLIT) += sg_split.o
|
||||||
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
|
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
|
||||||
|
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
|
||||||
|
|
||||||
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
|
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
|
||||||
fdt_empty_tree.o
|
fdt_empty_tree.o
|
||||||
|
|
|
@ -6,84 +6,81 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
#include <linux/blkdev.h>
|
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/blk-iopoll.h>
|
#include <linux/irq_poll.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
|
||||||
#include "blk.h"
|
static unsigned int irq_poll_budget __read_mostly = 256;
|
||||||
|
|
||||||
static unsigned int blk_iopoll_budget __read_mostly = 256;
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
|
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_iopoll_sched - Schedule a run of the iopoll handler
|
* irq_poll_sched - Schedule a run of the iopoll handler
|
||||||
* @iop: The parent iopoll structure
|
* @iop: The parent iopoll structure
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Add this blk_iopoll structure to the pending poll list and trigger the
|
* Add this irq_poll structure to the pending poll list and trigger the
|
||||||
* raise of the blk iopoll softirq. The driver must already have gotten a
|
* raise of the blk iopoll softirq. The driver must already have gotten a
|
||||||
* successful return from blk_iopoll_sched_prep() before calling this.
|
* successful return from irq_poll_sched_prep() before calling this.
|
||||||
**/
|
**/
|
||||||
void blk_iopoll_sched(struct blk_iopoll *iop)
|
void irq_poll_sched(struct irq_poll *iop)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
|
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
|
||||||
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_iopoll_sched);
|
EXPORT_SYMBOL(irq_poll_sched);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __blk_iopoll_complete - Mark this @iop as un-polled again
|
* __irq_poll_complete - Mark this @iop as un-polled again
|
||||||
* @iop: The parent iopoll structure
|
* @iop: The parent iopoll structure
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* See blk_iopoll_complete(). This function must be called with interrupts
|
* See irq_poll_complete(). This function must be called with interrupts
|
||||||
* disabled.
|
* disabled.
|
||||||
**/
|
**/
|
||||||
void __blk_iopoll_complete(struct blk_iopoll *iop)
|
void __irq_poll_complete(struct irq_poll *iop)
|
||||||
{
|
{
|
||||||
list_del(&iop->list);
|
list_del(&iop->list);
|
||||||
smp_mb__before_atomic();
|
smp_mb__before_atomic();
|
||||||
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
|
clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__blk_iopoll_complete);
|
EXPORT_SYMBOL(__irq_poll_complete);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_iopoll_complete - Mark this @iop as un-polled again
|
* irq_poll_complete - Mark this @iop as un-polled again
|
||||||
* @iop: The parent iopoll structure
|
* @iop: The parent iopoll structure
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* If a driver consumes less than the assigned budget in its run of the
|
* If a driver consumes less than the assigned budget in its run of the
|
||||||
* iopoll handler, it'll end the polled mode by calling this function. The
|
* iopoll handler, it'll end the polled mode by calling this function. The
|
||||||
* iopoll handler will not be invoked again before blk_iopoll_sched_prep()
|
* iopoll handler will not be invoked again before irq_poll_sched_prep()
|
||||||
* is called.
|
* is called.
|
||||||
**/
|
**/
|
||||||
void blk_iopoll_complete(struct blk_iopoll *iop)
|
void irq_poll_complete(struct irq_poll *iop)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__blk_iopoll_complete(iop);
|
__irq_poll_complete(iop);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_iopoll_complete);
|
EXPORT_SYMBOL(irq_poll_complete);
|
||||||
|
|
||||||
static void blk_iopoll_softirq(struct softirq_action *h)
|
static void irq_poll_softirq(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
|
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
|
||||||
int rearm = 0, budget = blk_iopoll_budget;
|
int rearm = 0, budget = irq_poll_budget;
|
||||||
unsigned long start_time = jiffies;
|
unsigned long start_time = jiffies;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
while (!list_empty(list)) {
|
while (!list_empty(list)) {
|
||||||
struct blk_iopoll *iop;
|
struct irq_poll *iop;
|
||||||
int work, weight;
|
int work, weight;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -101,11 +98,11 @@ static void blk_iopoll_softirq(struct softirq_action *h)
|
||||||
* entries to the tail of this list, and only ->poll()
|
* entries to the tail of this list, and only ->poll()
|
||||||
* calls can remove this head entry from the list.
|
* calls can remove this head entry from the list.
|
||||||
*/
|
*/
|
||||||
iop = list_entry(list->next, struct blk_iopoll, list);
|
iop = list_entry(list->next, struct irq_poll, list);
|
||||||
|
|
||||||
weight = iop->weight;
|
weight = iop->weight;
|
||||||
work = 0;
|
work = 0;
|
||||||
if (test_bit(IOPOLL_F_SCHED, &iop->state))
|
if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
|
||||||
work = iop->poll(iop, weight);
|
work = iop->poll(iop, weight);
|
||||||
|
|
||||||
budget -= work;
|
budget -= work;
|
||||||
|
@ -121,72 +118,72 @@ static void blk_iopoll_softirq(struct softirq_action *h)
|
||||||
* move the instance around on the list at-will.
|
* move the instance around on the list at-will.
|
||||||
*/
|
*/
|
||||||
if (work >= weight) {
|
if (work >= weight) {
|
||||||
if (blk_iopoll_disable_pending(iop))
|
if (irq_poll_disable_pending(iop))
|
||||||
__blk_iopoll_complete(iop);
|
__irq_poll_complete(iop);
|
||||||
else
|
else
|
||||||
list_move_tail(&iop->list, list);
|
list_move_tail(&iop->list, list);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rearm)
|
if (rearm)
|
||||||
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_iopoll_disable - Disable iopoll on this @iop
|
* irq_poll_disable - Disable iopoll on this @iop
|
||||||
* @iop: The parent iopoll structure
|
* @iop: The parent iopoll structure
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Disable io polling and wait for any pending callbacks to have completed.
|
* Disable io polling and wait for any pending callbacks to have completed.
|
||||||
**/
|
**/
|
||||||
void blk_iopoll_disable(struct blk_iopoll *iop)
|
void irq_poll_disable(struct irq_poll *iop)
|
||||||
{
|
{
|
||||||
set_bit(IOPOLL_F_DISABLE, &iop->state);
|
set_bit(IRQ_POLL_F_DISABLE, &iop->state);
|
||||||
while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state))
|
while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
|
||||||
msleep(1);
|
msleep(1);
|
||||||
clear_bit(IOPOLL_F_DISABLE, &iop->state);
|
clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_iopoll_disable);
|
EXPORT_SYMBOL(irq_poll_disable);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_iopoll_enable - Enable iopoll on this @iop
|
* irq_poll_enable - Enable iopoll on this @iop
|
||||||
* @iop: The parent iopoll structure
|
* @iop: The parent iopoll structure
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Enable iopoll on this @iop. Note that the handler run will not be
|
* Enable iopoll on this @iop. Note that the handler run will not be
|
||||||
* scheduled, it will only mark it as active.
|
* scheduled, it will only mark it as active.
|
||||||
**/
|
**/
|
||||||
void blk_iopoll_enable(struct blk_iopoll *iop)
|
void irq_poll_enable(struct irq_poll *iop)
|
||||||
{
|
{
|
||||||
BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
|
BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
|
||||||
smp_mb__before_atomic();
|
smp_mb__before_atomic();
|
||||||
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
|
clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_iopoll_enable);
|
EXPORT_SYMBOL(irq_poll_enable);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_iopoll_init - Initialize this @iop
|
* irq_poll_init - Initialize this @iop
|
||||||
* @iop: The parent iopoll structure
|
* @iop: The parent iopoll structure
|
||||||
* @weight: The default weight (or command completion budget)
|
* @weight: The default weight (or command completion budget)
|
||||||
* @poll_fn: The handler to invoke
|
* @poll_fn: The handler to invoke
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Initialize this blk_iopoll structure. Before being actively used, the
|
* Initialize this irq_poll structure. Before being actively used, the
|
||||||
* driver must call blk_iopoll_enable().
|
* driver must call irq_poll_enable().
|
||||||
**/
|
**/
|
||||||
void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
|
void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
|
||||||
{
|
{
|
||||||
memset(iop, 0, sizeof(*iop));
|
memset(iop, 0, sizeof(*iop));
|
||||||
INIT_LIST_HEAD(&iop->list);
|
INIT_LIST_HEAD(&iop->list);
|
||||||
iop->weight = weight;
|
iop->weight = weight;
|
||||||
iop->poll = poll_fn;
|
iop->poll = poll_fn;
|
||||||
set_bit(IOPOLL_F_SCHED, &iop->state);
|
set_bit(IRQ_POLL_F_SCHED, &iop->state);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_iopoll_init);
|
EXPORT_SYMBOL(irq_poll_init);
|
||||||
|
|
||||||
static int blk_iopoll_cpu_notify(struct notifier_block *self,
|
static int irq_poll_cpu_notify(struct notifier_block *self,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -199,26 +196,26 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self,
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
|
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
|
||||||
this_cpu_ptr(&blk_cpu_iopoll));
|
this_cpu_ptr(&blk_cpu_iopoll));
|
||||||
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block blk_iopoll_cpu_notifier = {
|
static struct notifier_block irq_poll_cpu_notifier = {
|
||||||
.notifier_call = blk_iopoll_cpu_notify,
|
.notifier_call = irq_poll_cpu_notify,
|
||||||
};
|
};
|
||||||
|
|
||||||
static __init int blk_iopoll_setup(void)
|
static __init int irq_poll_setup(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
|
INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
|
||||||
|
|
||||||
open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq);
|
open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
|
||||||
register_hotcpu_notifier(&blk_iopoll_cpu_notifier);
|
register_hotcpu_notifier(&irq_poll_cpu_notifier);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
subsys_initcall(blk_iopoll_setup);
|
subsys_initcall(irq_poll_setup);
|
|
@ -3746,7 +3746,7 @@ static const struct flag flags[] = {
|
||||||
{ "NET_TX_SOFTIRQ", 2 },
|
{ "NET_TX_SOFTIRQ", 2 },
|
||||||
{ "NET_RX_SOFTIRQ", 3 },
|
{ "NET_RX_SOFTIRQ", 3 },
|
||||||
{ "BLOCK_SOFTIRQ", 4 },
|
{ "BLOCK_SOFTIRQ", 4 },
|
||||||
{ "BLOCK_IOPOLL_SOFTIRQ", 5 },
|
{ "IRQ_POLL_SOFTIRQ", 5 },
|
||||||
{ "TASKLET_SOFTIRQ", 6 },
|
{ "TASKLET_SOFTIRQ", 6 },
|
||||||
{ "SCHED_SOFTIRQ", 7 },
|
{ "SCHED_SOFTIRQ", 7 },
|
||||||
{ "HRTIMER_SOFTIRQ", 8 },
|
{ "HRTIMER_SOFTIRQ", 8 },
|
||||||
|
|
|
@ -209,7 +209,7 @@ static const struct flag flags[] = {
|
||||||
{ "NET_TX_SOFTIRQ", 2 },
|
{ "NET_TX_SOFTIRQ", 2 },
|
||||||
{ "NET_RX_SOFTIRQ", 3 },
|
{ "NET_RX_SOFTIRQ", 3 },
|
||||||
{ "BLOCK_SOFTIRQ", 4 },
|
{ "BLOCK_SOFTIRQ", 4 },
|
||||||
{ "BLOCK_IOPOLL_SOFTIRQ", 5 },
|
{ "IRQ_POLL_SOFTIRQ", 5 },
|
||||||
{ "TASKLET_SOFTIRQ", 6 },
|
{ "TASKLET_SOFTIRQ", 6 },
|
||||||
{ "SCHED_SOFTIRQ", 7 },
|
{ "SCHED_SOFTIRQ", 7 },
|
||||||
{ "HRTIMER_SOFTIRQ", 8 },
|
{ "HRTIMER_SOFTIRQ", 8 },
|
||||||
|
|
Загрузка…
Ссылка в новой задаче