scsi: hpsa: use pci_alloc_irq_vectors and automatic irq affinity

This patch converts over hpsa to use the pci_alloc_irq_vectors including
the PCI_IRQ_AFFINITY flag that automatically assigns spread out irq
affinity to the I/O queues.

It also cleans up the per-ctrl interrupt state due to the use of the
pci_irq_vector and pci_free_irq_vectors helpers that don't need to know
the exact irq type.  Additionally it changes a little oddity in the
existing code that was using different array indixes into the per-vector
arrays depending on whether a controller is using a single INTx or
single MSI irq.

[mkp: fixed typo]

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Don Brace <don.brace@microsemi.com>
Tested-by: Don Brace <don.brace@microsemi.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Christoph Hellwig 2016-11-09 10:42:22 -08:00 коммит произвёл Martin K. Petersen
Родитель 4861ee15f2
Коммит bc2bb1543e
2 изменённых файлов: 52 добавлений и 97 удалений

Просмотреть файл

@ -1001,7 +1001,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
{ {
if (likely(h->transMethod & CFGTBL_Trans_Performant)) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
if (unlikely(!h->msix_vector)) if (unlikely(!h->msix_vectors))
return; return;
if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
c->Header.ReplyQueue = c->Header.ReplyQueue =
@ -5618,7 +5618,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries; sh->sg_tablesize = h->maxsgentries;
sh->transportt = hpsa_sas_transport_template; sh->transportt = hpsa_sas_transport_template;
sh->hostdata[0] = (unsigned long) h; sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode]; sh->irq = pci_irq_vector(h->pdev, 0);
sh->unique_id = sh->irq; sh->unique_id = sh->irq;
h->scsi_host = sh; h->scsi_host = sh;
@ -7651,67 +7651,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
static void hpsa_disable_interrupt_mode(struct ctlr_info *h) static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
{ {
if (h->msix_vector) { pci_free_irq_vectors(h->pdev);
if (h->pdev->msix_enabled) h->msix_vectors = 0;
pci_disable_msix(h->pdev);
h->msix_vector = 0;
} else if (h->msi_vector) {
if (h->pdev->msi_enabled)
pci_disable_msi(h->pdev);
h->msi_vector = 0;
}
} }
/* If MSI/MSI-X is supported by the kernel we will try to enable it on /* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode. * controllers that are capable. If not, we use legacy INTx mode.
*/ */
static void hpsa_interrupt_mode(struct ctlr_info *h) static int hpsa_interrupt_mode(struct ctlr_info *h)
{ {
#ifdef CONFIG_PCI_MSI unsigned int flags = PCI_IRQ_LEGACY;
int err, i; int ret;
struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
for (i = 0; i < MAX_REPLY_QUEUES; i++) {
hpsa_msix_entries[i].vector = 0;
hpsa_msix_entries[i].entry = i;
}
/* Some boards advertise MSI but don't really support it */ /* Some boards advertise MSI but don't really support it */
if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || switch (h->board_id) {
(h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) case 0x40700E11:
goto default_int_mode; case 0x40800E11:
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { case 0x40820E11:
dev_info(&h->pdev->dev, "MSI-X capable controller\n"); case 0x40830E11:
h->msix_vector = MAX_REPLY_QUEUES; break;
if (h->msix_vector > num_online_cpus()) default:
h->msix_vector = num_online_cpus(); ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
1, h->msix_vector); if (ret > 0) {
if (err < 0) { h->msix_vectors = ret;
dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); return 0;
h->msix_vector = 0;
goto single_msi_mode;
} else if (err < h->msix_vector) {
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
"available\n", err);
} }
h->msix_vector = err;
for (i = 0; i < h->msix_vector; i++) flags |= PCI_IRQ_MSI;
h->intr[i] = hpsa_msix_entries[i].vector; break;
return;
} }
single_msi_mode:
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
dev_info(&h->pdev->dev, "MSI capable controller\n"); if (ret < 0)
if (!pci_enable_msi(h->pdev)) return ret;
h->msi_vector = 1; return 0;
else
dev_warn(&h->pdev->dev, "MSI init failed\n");
}
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
h->intr[h->intr_mode] = h->pdev->irq;
} }
static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@ -8067,7 +8041,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
pci_set_master(h->pdev); pci_set_master(h->pdev);
hpsa_interrupt_mode(h); err = hpsa_interrupt_mode(h);
if (err)
goto clean1;
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err) if (err)
goto clean2; /* intmode+region, pci */ goto clean2; /* intmode+region, pci */
@ -8103,6 +8079,7 @@ clean3: /* vaddr, intmode+region, pci */
h->vaddr = NULL; h->vaddr = NULL;
clean2: /* intmode+region, pci */ clean2: /* intmode+region, pci */
hpsa_disable_interrupt_mode(h); hpsa_disable_interrupt_mode(h);
clean1:
/* /*
* call pci_disable_device before pci_release_regions per * call pci_disable_device before pci_release_regions per
* Documentation/PCI/pci.txt * Documentation/PCI/pci.txt
@ -8236,34 +8213,20 @@ clean_up:
return -ENOMEM; return -ENOMEM;
} }
static void hpsa_irq_affinity_hints(struct ctlr_info *h)
{
int i, cpu;
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < h->msix_vector; i++) {
irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
}
/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
static void hpsa_free_irqs(struct ctlr_info *h) static void hpsa_free_irqs(struct ctlr_info *h)
{ {
int i; int i;
if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */ /* Single reply queue, only one irq to free */
i = h->intr_mode; free_irq(pci_irq_vector(h->pdev, 0), &h->q[i]);
irq_set_affinity_hint(h->intr[i], NULL); h->q[h->intr_mode] = 0;
free_irq(h->intr[i], &h->q[i]);
h->q[i] = 0;
return; return;
} }
for (i = 0; i < h->msix_vector; i++) { for (i = 0; i < h->msix_vectors; i++) {
irq_set_affinity_hint(h->intr[i], NULL); free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
free_irq(h->intr[i], &h->q[i]);
h->q[i] = 0; h->q[i] = 0;
} }
for (; i < MAX_REPLY_QUEUES; i++) for (; i < MAX_REPLY_QUEUES; i++)
@ -8284,11 +8247,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
for (i = 0; i < MAX_REPLY_QUEUES; i++) for (i = 0; i < MAX_REPLY_QUEUES; i++)
h->q[i] = (u8) i; h->q[i] = (u8) i;
if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
/* If performant mode and MSI-X, use multiple reply queues */ /* If performant mode and MSI-X, use multiple reply queues */
for (i = 0; i < h->msix_vector; i++) { for (i = 0; i < h->msix_vectors; i++) {
sprintf(h->intrname[i], "%s-msix%d", h->devname, i); sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
rc = request_irq(h->intr[i], msixhandler, rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
0, h->intrname[i], 0, h->intrname[i],
&h->q[i]); &h->q[i]);
if (rc) { if (rc) {
@ -8296,9 +8259,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
dev_err(&h->pdev->dev, dev_err(&h->pdev->dev,
"failed to get irq %d for %s\n", "failed to get irq %d for %s\n",
h->intr[i], h->devname); pci_irq_vector(h->pdev, i), h->devname);
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
free_irq(h->intr[j], &h->q[j]); free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
h->q[j] = 0; h->q[j] = 0;
} }
for (; j < MAX_REPLY_QUEUES; j++) for (; j < MAX_REPLY_QUEUES; j++)
@ -8306,33 +8269,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
return rc; return rc;
} }
} }
hpsa_irq_affinity_hints(h);
} else { } else {
/* Use single reply pool */ /* Use single reply pool */
if (h->msix_vector > 0 || h->msi_vector) { if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
if (h->msix_vector) sprintf(h->intrname[0], "%s-msi%s", h->devname,
sprintf(h->intrname[h->intr_mode], h->msix_vectors ? "x" : "");
"%s-msix", h->devname); rc = request_irq(pci_irq_vector(h->pdev, 0),
else
sprintf(h->intrname[h->intr_mode],
"%s-msi", h->devname);
rc = request_irq(h->intr[h->intr_mode],
msixhandler, 0, msixhandler, 0,
h->intrname[h->intr_mode], h->intrname[0],
&h->q[h->intr_mode]); &h->q[h->intr_mode]);
} else { } else {
sprintf(h->intrname[h->intr_mode], sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname); "%s-intx", h->devname);
rc = request_irq(h->intr[h->intr_mode], rc = request_irq(pci_irq_vector(h->pdev, 0),
intxhandler, IRQF_SHARED, intxhandler, IRQF_SHARED,
h->intrname[h->intr_mode], h->intrname[0],
&h->q[h->intr_mode]); &h->q[h->intr_mode]);
} }
irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
} }
if (rc) { if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname); pci_irq_vector(h->pdev, 0), h->devname);
hpsa_free_irqs(h); hpsa_free_irqs(h);
return -ENODEV; return -ENODEV;
} }
@ -9518,7 +9475,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return rc; return rc;
} }
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
hpsa_get_max_perf_mode_cmds(h); hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */ /* Performant mode ring buffer and supporting data structures */
h->reply_queue_size = h->max_commands * sizeof(u64); h->reply_queue_size = h->max_commands * sizeof(u64);

Просмотреть файл

@ -175,9 +175,7 @@ struct ctlr_info {
# define DOORBELL_INT 1 # define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2 # define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3 # define MEMQ_MODE_INT 3
unsigned int intr[MAX_REPLY_QUEUES]; unsigned int msix_vectors;
unsigned int msix_vector;
unsigned int msi_vector;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access; struct access_method access;
@ -464,7 +462,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
unsigned long register_value = FIFO_EMPTY; unsigned long register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */ /* msi auto clears the interrupt pending bit. */
if (unlikely(!(h->msi_vector || h->msix_vector))) { if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
/* flush the controller write of the reply queue by reading /* flush the controller write of the reply queue by reading
* outbound doorbell status register. * outbound doorbell status register.
*/ */