2019-06-01 11:08:42 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2007-10-22 03:41:48 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
|
|
*
|
2008-02-24 02:23:35 +03:00
|
|
|
* Copyright (C) 2006-2008 Intel Corporation
|
|
|
|
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
2007-10-22 03:41:48 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _IOVA_H_
|
|
|
|
#define _IOVA_H_
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/rbtree.h>
|
2017-08-10 17:14:59 +03:00
|
|
|
#include <linux/atomic.h>
|
2007-10-22 03:41:48 +04:00
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
|
|
|
/* iova structure */
|
|
|
|
struct iova {
|
|
|
|
struct rb_node node;
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 11:34:11 +03:00
|
|
|
unsigned long pfn_hi; /* Highest allocated pfn */
|
|
|
|
unsigned long pfn_lo; /* Lowest allocated pfn */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct iova_magazine;
|
|
|
|
struct iova_cpu_rcache;
|
|
|
|
|
|
|
|
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
|
|
|
|
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
|
|
|
|
|
|
|
|
struct iova_rcache {
|
|
|
|
spinlock_t lock;
|
|
|
|
unsigned long depot_size;
|
|
|
|
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
|
|
|
|
struct iova_cpu_rcache __percpu *cpu_rcaches;
|
2007-10-22 03:41:48 +04:00
|
|
|
};
|
|
|
|
|
2017-08-10 15:44:28 +03:00
|
|
|
struct iova_domain;
|
|
|
|
|
|
|
|
/* Call-Back from IOVA code into IOMMU drivers */
|
|
|
|
typedef void (* iova_flush_cb)(struct iova_domain *domain);
|
|
|
|
|
|
|
|
/* Destructor for per-entry data */
|
|
|
|
typedef void (* iova_entry_dtor)(unsigned long data);
|
|
|
|
|
|
|
|
/* Number of entries per Flush Queue */
|
|
|
|
#define IOVA_FQ_SIZE 256
|
|
|
|
|
2017-08-10 17:58:18 +03:00
|
|
|
/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
|
|
|
|
#define IOVA_FQ_TIMEOUT 10
|
|
|
|
|
2017-08-10 15:44:28 +03:00
|
|
|
/* Flush Queue entry for defered flushing */
|
|
|
|
struct iova_fq_entry {
|
|
|
|
unsigned long iova_pfn;
|
|
|
|
unsigned long pages;
|
|
|
|
unsigned long data;
|
2017-08-10 17:14:59 +03:00
|
|
|
u64 counter; /* Flush counter when this entrie was added */
|
2017-08-10 15:44:28 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Per-CPU Flush Queue structure */
|
|
|
|
struct iova_fq {
|
|
|
|
struct iova_fq_entry entries[IOVA_FQ_SIZE];
|
|
|
|
unsigned head, tail;
|
2017-08-10 17:31:17 +03:00
|
|
|
spinlock_t lock;
|
2017-08-10 15:44:28 +03:00
|
|
|
};
|
|
|
|
|
2007-10-22 03:41:48 +04:00
|
|
|
/* holds all the iova translations for a domain */
|
|
|
|
struct iova_domain {
|
|
|
|
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
|
|
|
|
struct rb_root rbroot; /* iova domain rbtree root */
|
2017-09-21 18:52:44 +03:00
|
|
|
struct rb_node *cached_node; /* Save last alloced node */
|
|
|
|
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
|
2015-01-12 20:51:16 +03:00
|
|
|
unsigned long granule; /* pfn granularity for this domain */
|
2015-01-12 20:51:15 +03:00
|
|
|
unsigned long start_pfn; /* Lower limit for this domain */
|
2008-02-06 12:36:23 +03:00
|
|
|
unsigned long dma_32bit_pfn;
|
2018-09-05 07:27:36 +03:00
|
|
|
unsigned long max32_alloc_size; /* Size of last failed allocation */
|
2019-04-03 11:35:21 +03:00
|
|
|
struct iova_fq __percpu *fq; /* Flush Queue */
|
|
|
|
|
|
|
|
atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
|
|
|
|
have been started */
|
|
|
|
|
|
|
|
atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
|
|
|
|
have been finished */
|
|
|
|
|
2017-09-21 18:52:46 +03:00
|
|
|
struct iova anchor; /* rbtree lookup anchor */
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 11:34:11 +03:00
|
|
|
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
2017-08-10 15:44:28 +03:00
|
|
|
|
|
|
|
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
|
|
|
|
TLBs */
|
|
|
|
|
|
|
|
iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
|
|
|
|
iova entry */
|
|
|
|
|
2017-08-10 17:58:18 +03:00
|
|
|
struct timer_list fq_timer; /* Timer to regularily empty the
|
|
|
|
flush-queues */
|
|
|
|
atomic_t fq_timer_on; /* 1 when timer is active, 0
|
|
|
|
when not */
|
2007-10-22 03:41:48 +04:00
|
|
|
};
|
|
|
|
|
2014-07-11 10:19:36 +04:00
|
|
|
static inline unsigned long iova_size(struct iova *iova)
|
|
|
|
{
|
|
|
|
return iova->pfn_hi - iova->pfn_lo + 1;
|
|
|
|
}
|
|
|
|
|
2015-01-12 20:51:16 +03:00
|
|
|
static inline unsigned long iova_shift(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
return __ffs(iovad->granule);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long iova_mask(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
return iovad->granule - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
return iova & iova_mask(iovad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
|
|
|
{
|
|
|
|
return ALIGN(size, iovad->granule);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
|
|
|
{
|
|
|
|
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
return iova >> iova_shift(iovad);
|
|
|
|
}
|
|
|
|
|
2017-03-23 02:06:17 +03:00
|
|
|
#if IS_ENABLED(CONFIG_IOMMU_IOVA)
|
2015-07-13 14:31:28 +03:00
|
|
|
int iova_cache_get(void);
|
|
|
|
void iova_cache_put(void);
|
2015-01-12 20:51:14 +03:00
|
|
|
|
2007-10-22 03:41:48 +04:00
|
|
|
struct iova *alloc_iova_mem(void);
|
|
|
|
void free_iova_mem(struct iova *iova);
|
|
|
|
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
|
|
void __free_iova(struct iova_domain *iovad, struct iova *iova);
|
|
|
|
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
2007-10-22 03:41:58 +04:00
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool size_aligned);
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 11:34:11 +03:00
|
|
|
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
|
|
|
|
unsigned long size);
|
2017-08-10 16:49:44 +03:00
|
|
|
void queue_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn, unsigned long pages,
|
|
|
|
unsigned long data);
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 11:34:11 +03:00
|
|
|
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
|
2017-09-20 11:52:02 +03:00
|
|
|
unsigned long limit_pfn, bool flush_rcache);
|
2007-10-22 03:41:48 +04:00
|
|
|
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi);
|
|
|
|
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
2015-01-12 20:51:16 +03:00
|
|
|
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
2017-09-21 18:52:45 +03:00
|
|
|
unsigned long start_pfn);
|
iommu/vt-d: Don't queue_iova() if there is no flush queue
Intel VT-d driver was reworked to use common deferred flushing
implementation. Previously there was one global per-cpu flush queue,
afterwards - one per domain.
Before deferring a flush, the queue should be allocated and initialized.
Currently only domains with IOMMU_DOMAIN_DMA type initialize their flush
queue. It's probably worth to init it for static or unmanaged domains
too, but it may be arguable - I'm leaving it to iommu folks.
Prevent queuing an iova flush if the domain doesn't have a queue.
The defensive check seems to be worth to keep even if queue would be
initialized for all kinds of domains. And is easy backportable.
On 4.19.43 stable kernel it has a user-visible effect: previously for
devices in si domain there were crashes, on sata devices:
BUG: spinlock bad magic on CPU#6, swapper/0/1
lock: 0xffff88844f582008, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
CPU: 6 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #1
Call Trace:
<IRQ>
dump_stack+0x61/0x7e
spin_bug+0x9d/0xa3
do_raw_spin_lock+0x22/0x8e
_raw_spin_lock_irqsave+0x32/0x3a
queue_iova+0x45/0x115
intel_unmap+0x107/0x113
intel_unmap_sg+0x6b/0x76
__ata_qc_complete+0x7f/0x103
ata_qc_complete+0x9b/0x26a
ata_qc_complete_multiple+0xd0/0xe3
ahci_handle_port_interrupt+0x3ee/0x48a
ahci_handle_port_intr+0x73/0xa9
ahci_single_level_irq_intr+0x40/0x60
__handle_irq_event_percpu+0x7f/0x19a
handle_irq_event_percpu+0x32/0x72
handle_irq_event+0x38/0x56
handle_edge_irq+0x102/0x121
handle_irq+0x147/0x15c
do_IRQ+0x66/0xf2
common_interrupt+0xf/0xf
RIP: 0010:__do_softirq+0x8c/0x2df
The same for usb devices that use ehci-pci:
BUG: spinlock bad magic on CPU#0, swapper/0/1
lock: 0xffff88844f402008, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #4
Call Trace:
<IRQ>
dump_stack+0x61/0x7e
spin_bug+0x9d/0xa3
do_raw_spin_lock+0x22/0x8e
_raw_spin_lock_irqsave+0x32/0x3a
queue_iova+0x77/0x145
intel_unmap+0x107/0x113
intel_unmap_page+0xe/0x10
usb_hcd_unmap_urb_setup_for_dma+0x53/0x9d
usb_hcd_unmap_urb_for_dma+0x17/0x100
unmap_urb_for_dma+0x22/0x24
__usb_hcd_giveback_urb+0x51/0xc3
usb_giveback_urb_bh+0x97/0xde
tasklet_action_common.isra.4+0x5f/0xa1
tasklet_action+0x2d/0x30
__do_softirq+0x138/0x2df
irq_exit+0x7d/0x8b
smp_apic_timer_interrupt+0x10f/0x151
apic_timer_interrupt+0xf/0x20
</IRQ>
RIP: 0010:_raw_spin_unlock_irqrestore+0x17/0x39
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: <stable@vger.kernel.org> # 4.14+
Fixes: 13cf01744608 ("iommu/vt-d: Make use of iova deferred flushing")
Signed-off-by: Dmitry Safonov <dima@arista.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2019-07-17 00:38:05 +03:00
|
|
|
bool has_iova_flush_queue(struct iova_domain *iovad);
|
2017-08-10 15:44:28 +03:00
|
|
|
int init_iova_flush_queue(struct iova_domain *iovad,
|
|
|
|
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
2007-10-22 03:41:48 +04:00
|
|
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
|
|
|
void put_iova_domain(struct iova_domain *iovad);
|
2014-02-19 10:07:37 +04:00
|
|
|
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
|
|
|
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
|
iommu/iova: introduce per-cpu caching to iova allocation
IOVA allocation has two problems that impede high-throughput I/O.
First, it can do a linear search over the allocated IOVA ranges.
Second, the rbtree spinlock that serializes IOVA allocations becomes
contended.
Address these problems by creating an API for caching allocated IOVA
ranges, so that the IOVA allocator isn't accessed frequently. This
patch adds a per-CPU cache, from which CPUs can alloc/free IOVAs
without taking the rbtree spinlock. The per-CPU caches are backed by
a global cache, to avoid invoking the (linear-time) IOVA allocator
without needing to make the per-CPU cache size excessive. This design
is based on magazines, as described in "Magazines and Vmem: Extending
the Slab Allocator to Many CPUs and Arbitrary Resources" (currently
available at https://www.usenix.org/legacy/event/usenix01/bonwick.html)
Adding caching on top of the existing rbtree allocator maintains the
property that IOVAs are densely packed in the IO virtual address space,
which is important for keeping IOMMU page table usage low.
To keep the cache size reasonable, we bound the IOVA space a CPU can
cache by 32 MiB (we cache a bounded number of IOVA ranges, and only
ranges of size <= 128 KiB). The shared global cache is bounded at
4 MiB of IOVA space.
Signed-off-by: Omer Peleg <omer@cs.technion.ac.il>
[mad@cs.technion.ac.il: rebased, cleaned up and reworded the commit message]
Signed-off-by: Adam Morrison <mad@cs.technion.ac.il>
Reviewed-by: Shaohua Li <shli@fb.com>
Reviewed-by: Ben Serebrin <serebrin@google.com>
[dwmw2: split out VT-d part into a separate patch]
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2016-04-20 11:34:11 +03:00
|
|
|
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
2017-03-20 22:11:28 +03:00
|
|
|
#else
|
|
|
|
static inline int iova_cache_get(void)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void iova_cache_put(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *alloc_iova_mem(void)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova_mem(struct iova *iova)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *alloc_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool size_aligned)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_iova_fast(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-08-10 16:49:44 +03:00
|
|
|
static inline void queue_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn, unsigned long pages,
|
|
|
|
unsigned long data)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-03-20 22:11:28 +03:00
|
|
|
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
|
|
|
unsigned long size,
|
2017-09-20 11:52:02 +03:00
|
|
|
unsigned long limit_pfn,
|
|
|
|
bool flush_rcache)
|
2017-03-20 22:11:28 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *reserve_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void copy_reserved_iova(struct iova_domain *from,
|
|
|
|
struct iova_domain *to)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_iova_domain(struct iova_domain *iovad,
|
|
|
|
unsigned long granule,
|
2017-09-21 18:52:45 +03:00
|
|
|
unsigned long start_pfn)
|
2017-03-20 22:11:28 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-07-23 10:51:00 +03:00
|
|
|
static inline bool has_iova_flush_queue(struct iova_domain *iovad)
|
iommu/vt-d: Don't queue_iova() if there is no flush queue
Intel VT-d driver was reworked to use common deferred flushing
implementation. Previously there was one global per-cpu flush queue,
afterwards - one per domain.
Before deferring a flush, the queue should be allocated and initialized.
Currently only domains with IOMMU_DOMAIN_DMA type initialize their flush
queue. It's probably worth to init it for static or unmanaged domains
too, but it may be arguable - I'm leaving it to iommu folks.
Prevent queuing an iova flush if the domain doesn't have a queue.
The defensive check seems to be worth to keep even if queue would be
initialized for all kinds of domains. And is easy backportable.
On 4.19.43 stable kernel it has a user-visible effect: previously for
devices in si domain there were crashes, on sata devices:
BUG: spinlock bad magic on CPU#6, swapper/0/1
lock: 0xffff88844f582008, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
CPU: 6 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #1
Call Trace:
<IRQ>
dump_stack+0x61/0x7e
spin_bug+0x9d/0xa3
do_raw_spin_lock+0x22/0x8e
_raw_spin_lock_irqsave+0x32/0x3a
queue_iova+0x45/0x115
intel_unmap+0x107/0x113
intel_unmap_sg+0x6b/0x76
__ata_qc_complete+0x7f/0x103
ata_qc_complete+0x9b/0x26a
ata_qc_complete_multiple+0xd0/0xe3
ahci_handle_port_interrupt+0x3ee/0x48a
ahci_handle_port_intr+0x73/0xa9
ahci_single_level_irq_intr+0x40/0x60
__handle_irq_event_percpu+0x7f/0x19a
handle_irq_event_percpu+0x32/0x72
handle_irq_event+0x38/0x56
handle_edge_irq+0x102/0x121
handle_irq+0x147/0x15c
do_IRQ+0x66/0xf2
common_interrupt+0xf/0xf
RIP: 0010:__do_softirq+0x8c/0x2df
The same for usb devices that use ehci-pci:
BUG: spinlock bad magic on CPU#0, swapper/0/1
lock: 0xffff88844f402008, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #4
Call Trace:
<IRQ>
dump_stack+0x61/0x7e
spin_bug+0x9d/0xa3
do_raw_spin_lock+0x22/0x8e
_raw_spin_lock_irqsave+0x32/0x3a
queue_iova+0x77/0x145
intel_unmap+0x107/0x113
intel_unmap_page+0xe/0x10
usb_hcd_unmap_urb_setup_for_dma+0x53/0x9d
usb_hcd_unmap_urb_for_dma+0x17/0x100
unmap_urb_for_dma+0x22/0x24
__usb_hcd_giveback_urb+0x51/0xc3
usb_giveback_urb_bh+0x97/0xde
tasklet_action_common.isra.4+0x5f/0xa1
tasklet_action+0x2d/0x30
__do_softirq+0x138/0x2df
irq_exit+0x7d/0x8b
smp_apic_timer_interrupt+0x10f/0x151
apic_timer_interrupt+0xf/0x20
</IRQ>
RIP: 0010:_raw_spin_unlock_irqrestore+0x17/0x39
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: <stable@vger.kernel.org> # 4.14+
Fixes: 13cf01744608 ("iommu/vt-d: Make use of iova deferred flushing")
Signed-off-by: Dmitry Safonov <dima@arista.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2019-07-17 00:38:05 +03:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-10 15:44:28 +03:00
|
|
|
static inline int init_iova_flush_queue(struct iova_domain *iovad,
|
|
|
|
iova_flush_cb flush_cb,
|
|
|
|
iova_entry_dtor entry_dtor)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2017-03-20 22:11:28 +03:00
|
|
|
static inline struct iova *find_iova(struct iova_domain *iovad,
|
|
|
|
unsigned long pfn)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void put_iova_domain(struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
|
|
|
struct iova *iova,
|
|
|
|
unsigned long pfn_lo,
|
|
|
|
unsigned long pfn_hi)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_cpu_cached_iovas(unsigned int cpu,
|
|
|
|
struct iova_domain *iovad)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2007-10-22 03:41:48 +04:00
|
|
|
|
|
|
|
#endif
|