- Remove unused V2 grant table support.
- Note that Konrad is xen-blkkback/front maintainer. - Add 'xen_nopv' option to disable PV extentions for x86 HVM guests. - Misc. minor cleanups. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQEcBAABAgAGBQJT4N57AAoJEFxbo/MsZsTRtfsH/2GxmloKqMZqusnz5PR/x2hd M3aXtDw36rxv3hEciIs/NX6obMenRdofDKXVMafnU/gw+EOBQQQ2n/nDqcLOSN+0 hVyrKHgByYQKaeAhAbrGiGIkuoe5JAURsaggx/YlYSx3hkE0za1XmcUjkPFEVP3l UeXXJ40H9hHgESsDwd1UQ08YNtvwdaWVHJAjio3jSxCBAHnAPhCqPhKVy/6LOr+U T6HgYsX9HLQRYBy34OOYfKBFnGOJpstnZJd3hMTYtrF4xaTl/Cnf+YxKxv/XJtGD YHukhQaEyws7RaDAXK1Uty1hlqgzDoVcFz1TixJIrF6YaO2QhhjMa/oYkbBW09s= =Ojrz -----END PGP SIGNATURE----- Merge tag 'stable/for-linus-3.17-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull Xen updates from David Vrabel: - remove unused V2 grant table support - note that Konrad is xen-blkkback/front maintainer - add 'xen_nopv' option to disable PV extentions for x86 HVM guests - misc minor cleanups * tag 'stable/for-linus-3.17-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen-pciback: Document the 'quirks' sysfs file xen/pciback: Fix error return code in xen_pcibk_attach() xen/events: drop negativity check of unsigned parameter xen/setup: Remove Identity Map Debug Message xen/events/fifo: remove a unecessary use of BM() xen/events/fifo: ensure all bitops are properly aligned even on x86 xen/events/fifo: reset control block and local HEADs on resume xen/arm: use BUG_ON xen/grant-table: remove support for V2 tables x86/xen: safely map and unmap grant frames when in atomic context MAINTAINERS: Make me the Xen block subsystem (front and back) maintainer xen: Introduce 'xen_nopv' to disable PV extensions for HVM guests.
This commit is contained in:
Коммит
e306e3be1c
|
@ -0,0 +1,13 @@
|
|||
What: /sys/bus/pci/drivers/pciback/quirks
|
||||
Date: Oct 2011
|
||||
KernelVersion: 3.1
|
||||
Contact: xen-devel@lists.xenproject.org
|
||||
Description:
|
||||
If the permissive attribute is set, then writing a string in
|
||||
the format of DDDD:BB:DD.F-REG:SIZE:MASK will allow the guest
|
||||
to write and read from the PCI device. That is Domain:Bus:
|
||||
Device.Function-Register:Size:Mask (Domain is optional).
|
||||
For example:
|
||||
#echo 00:19.0-E0:2:FF > /sys/bus/pci/drivers/pciback/quirks
|
||||
will allow the guest to read and write to the configuration
|
||||
register 0x0E.
|
|
@ -3745,6 +3745,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Disables the ticketlock slowpath using Xen PV
|
||||
optimizations.
|
||||
|
||||
xen_nopv [X86]
|
||||
Disables the PV optimizations forcing the HVM guest to
|
||||
run as generic HVM guest with no PV drivers.
|
||||
|
||||
xirc2ps_cs= [NET,PCMCIA]
|
||||
Format:
|
||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||
|
|
|
@ -10066,6 +10066,13 @@ S: Supported
|
|||
F: arch/x86/pci/*xen*
|
||||
F: drivers/pci/*xen*
|
||||
|
||||
XEN BLOCK SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/block/xen-blkback/*
|
||||
F: drivers/block/xen*
|
||||
|
||||
XEN SWIOTLB SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
|
|
|
@ -181,8 +181,7 @@ static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
|
|||
struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
|
||||
int rc;
|
||||
rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
|
||||
if (rc)
|
||||
BUG();
|
||||
BUG_ON(rc);
|
||||
}
|
||||
|
||||
static void xen_power_off(void)
|
||||
|
@ -190,8 +189,7 @@ static void xen_power_off(void)
|
|||
struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
|
||||
int rc;
|
||||
rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
|
||||
if (rc)
|
||||
BUG();
|
||||
BUG_ON(rc);
|
||||
}
|
||||
|
||||
static int xen_cpu_notification(struct notifier_block *self,
|
||||
|
|
|
@ -45,11 +45,9 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
|
|||
return;
|
||||
}
|
||||
|
||||
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
|
||||
unsigned long max_nr_gframes,
|
||||
grant_status_t **__shared)
|
||||
int arch_gnttab_init(unsigned long nr_shared)
|
||||
{
|
||||
return -ENOSYS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
|
||||
|
|
|
@ -1828,8 +1828,19 @@ static void __init xen_hvm_guest_init(void)
|
|||
xen_hvm_init_mmu_ops();
|
||||
}
|
||||
|
||||
static bool xen_nopv = false;
|
||||
static __init int xen_parse_nopv(char *arg)
|
||||
{
|
||||
xen_nopv = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("xen_nopv", xen_parse_nopv);
|
||||
|
||||
static uint32_t __init xen_hvm_platform(void)
|
||||
{
|
||||
if (xen_nopv)
|
||||
return 0;
|
||||
|
||||
if (xen_pv_domain())
|
||||
return 0;
|
||||
|
||||
|
@ -1838,6 +1849,8 @@ static uint32_t __init xen_hvm_platform(void)
|
|||
|
||||
bool xen_hvm_need_lapic(void)
|
||||
{
|
||||
if (xen_nopv)
|
||||
return false;
|
||||
if (xen_pv_domain())
|
||||
return false;
|
||||
if (!xen_hvm_domain())
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
static struct gnttab_vm_area {
|
||||
struct vm_struct *area;
|
||||
pte_t **ptes;
|
||||
} gnttab_shared_vm_area, gnttab_status_vm_area;
|
||||
} gnttab_shared_vm_area;
|
||||
|
||||
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
|
||||
unsigned long max_nr_gframes,
|
||||
|
@ -73,43 +73,16 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
|
||||
unsigned long max_nr_gframes,
|
||||
grant_status_t **__shared)
|
||||
{
|
||||
grant_status_t *shared = *__shared;
|
||||
unsigned long addr;
|
||||
unsigned long i;
|
||||
|
||||
if (shared == NULL)
|
||||
*__shared = shared = gnttab_status_vm_area.area->addr;
|
||||
|
||||
addr = (unsigned long)shared;
|
||||
|
||||
for (i = 0; i < nr_gframes; i++) {
|
||||
set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
|
||||
mfn_pte(frames[i], PAGE_KERNEL));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
|
||||
{
|
||||
pte_t **ptes;
|
||||
unsigned long addr;
|
||||
unsigned long i;
|
||||
|
||||
if (shared == gnttab_status_vm_area.area->addr)
|
||||
ptes = gnttab_status_vm_area.ptes;
|
||||
else
|
||||
ptes = gnttab_shared_vm_area.ptes;
|
||||
|
||||
addr = (unsigned long)shared;
|
||||
|
||||
for (i = 0; i < nr_gframes; i++) {
|
||||
set_pte_at(&init_mm, addr, ptes[i], __pte(0));
|
||||
set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
|
||||
__pte(0));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
@ -129,35 +102,12 @@ static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void arch_gnttab_vfree(struct gnttab_vm_area *area)
|
||||
int arch_gnttab_init(unsigned long nr_shared)
|
||||
{
|
||||
free_vm_area(area->area);
|
||||
kfree(area->ptes);
|
||||
}
|
||||
|
||||
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!xen_pv_domain())
|
||||
return 0;
|
||||
|
||||
ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Always allocate the space for the status frames in case
|
||||
* we're migrated to a host with V2 support.
|
||||
*/
|
||||
ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
arch_gnttab_vfree(&gnttab_shared_vm_area);
|
||||
return -ENOMEM;
|
||||
return arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
|
|
|
@ -841,10 +841,9 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
|
|||
pfn = ALIGN(pfn, P2M_PER_PAGE);
|
||||
}
|
||||
|
||||
if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
|
||||
WARN((pfn - pfn_s) != (pfn_e - pfn_s),
|
||||
"Identity mapping failed. We are %ld short of 1-1 mappings!\n",
|
||||
(pfn_e - pfn_s) - (pfn - pfn_s)))
|
||||
printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
|
||||
(pfn_e - pfn_s) - (pfn - pfn_s));
|
||||
|
||||
return pfn - pfn_s;
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
|
|||
*/
|
||||
unsigned int evtchn_from_irq(unsigned irq)
|
||||
{
|
||||
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
|
||||
if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
|
||||
return 0;
|
||||
|
||||
return info_for_irq(irq)->evtchn;
|
||||
|
|
|
@ -67,10 +67,9 @@ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
|
|||
static unsigned event_array_pages __read_mostly;
|
||||
|
||||
/*
|
||||
* sync_set_bit() and friends must be unsigned long aligned on non-x86
|
||||
* platforms.
|
||||
* sync_set_bit() and friends must be unsigned long aligned.
|
||||
*/
|
||||
#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
|
||||
#if BITS_PER_LONG > 32
|
||||
|
||||
#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
|
||||
#define EVTCHN_FIFO_BIT(b, w) \
|
||||
|
@ -100,6 +99,25 @@ static unsigned evtchn_fifo_nr_channels(void)
|
|||
return event_array_pages * EVENT_WORDS_PER_PAGE;
|
||||
}
|
||||
|
||||
static int init_control_block(int cpu,
|
||||
struct evtchn_fifo_control_block *control_block)
|
||||
{
|
||||
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
|
||||
struct evtchn_init_control init_control;
|
||||
unsigned int i;
|
||||
|
||||
/* Reset the control block and the local HEADs. */
|
||||
clear_page(control_block);
|
||||
for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
|
||||
q->head[i] = 0;
|
||||
|
||||
init_control.control_gfn = virt_to_mfn(control_block);
|
||||
init_control.offset = 0;
|
||||
init_control.vcpu = cpu;
|
||||
|
||||
return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
|
||||
}
|
||||
|
||||
static void free_unused_array_pages(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -312,7 +330,7 @@ static void evtchn_fifo_handle_events(unsigned cpu)
|
|||
ready = xchg(&control_block->ready, 0);
|
||||
|
||||
while (ready) {
|
||||
q = find_first_bit(BM(&ready), EVTCHN_FIFO_MAX_QUEUES);
|
||||
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
|
||||
consume_one_event(cpu, control_block, q, &ready);
|
||||
ready |= xchg(&control_block->ready, 0);
|
||||
}
|
||||
|
@ -324,7 +342,6 @@ static void evtchn_fifo_resume(void)
|
|||
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *control_block = per_cpu(cpu_control_block, cpu);
|
||||
struct evtchn_init_control init_control;
|
||||
int ret;
|
||||
|
||||
if (!control_block)
|
||||
|
@ -341,12 +358,7 @@ static void evtchn_fifo_resume(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
init_control.control_gfn = virt_to_mfn(control_block);
|
||||
init_control.offset = 0;
|
||||
init_control.vcpu = cpu;
|
||||
|
||||
ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
|
||||
&init_control);
|
||||
ret = init_control_block(cpu, control_block);
|
||||
if (ret < 0)
|
||||
BUG();
|
||||
}
|
||||
|
@ -374,30 +386,25 @@ static const struct evtchn_ops evtchn_ops_fifo = {
|
|||
.resume = evtchn_fifo_resume,
|
||||
};
|
||||
|
||||
static int evtchn_fifo_init_control_block(unsigned cpu)
|
||||
static int evtchn_fifo_alloc_control_block(unsigned cpu)
|
||||
{
|
||||
struct page *control_block = NULL;
|
||||
struct evtchn_init_control init_control;
|
||||
void *control_block = NULL;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
||||
control_block = (void *)__get_free_page(GFP_KERNEL);
|
||||
if (control_block == NULL)
|
||||
goto error;
|
||||
|
||||
init_control.control_gfn = virt_to_mfn(page_address(control_block));
|
||||
init_control.offset = 0;
|
||||
init_control.vcpu = cpu;
|
||||
|
||||
ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
|
||||
ret = init_control_block(cpu, control_block);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
per_cpu(cpu_control_block, cpu) = page_address(control_block);
|
||||
per_cpu(cpu_control_block, cpu) = control_block;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
__free_page(control_block);
|
||||
free_page((unsigned long)control_block);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -411,7 +418,7 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
|
|||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
if (!per_cpu(cpu_control_block, cpu))
|
||||
ret = evtchn_fifo_init_control_block(cpu);
|
||||
ret = evtchn_fifo_alloc_control_block(cpu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -428,7 +435,7 @@ int __init xen_evtchn_fifo_init(void)
|
|||
int cpu = get_cpu();
|
||||
int ret;
|
||||
|
||||
ret = evtchn_fifo_init_control_block(cpu);
|
||||
ret = evtchn_fifo_alloc_control_block(cpu);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -69,7 +69,6 @@ struct grant_frames xen_auto_xlat_grant_frames;
|
|||
|
||||
static union {
|
||||
struct grant_entry_v1 *v1;
|
||||
union grant_entry_v2 *v2;
|
||||
void *addr;
|
||||
} gnttab_shared;
|
||||
|
||||
|
@ -120,36 +119,10 @@ struct gnttab_ops {
|
|||
* by bit operations.
|
||||
*/
|
||||
int (*query_foreign_access)(grant_ref_t ref);
|
||||
/*
|
||||
* Grant a domain to access a range of bytes within the page referred by
|
||||
* an available grant entry. Ref parameter is reference of a grant entry
|
||||
* which will be sub-page accessed, domid is id of grantee domain, frame
|
||||
* is frame address of subpage grant, flags is grant type and flag
|
||||
* information, page_off is offset of the range of bytes, and length is
|
||||
* length of bytes to be accessed.
|
||||
*/
|
||||
void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
|
||||
unsigned long frame, int flags,
|
||||
unsigned page_off, unsigned length);
|
||||
/*
|
||||
* Redirect an available grant entry on domain A to another grant
|
||||
* reference of domain B, then allow domain C to use grant reference
|
||||
* of domain B transitively. Ref parameter is an available grant entry
|
||||
* reference on domain A, domid is id of domain C which accesses grant
|
||||
* entry transitively, flags is grant type and flag information,
|
||||
* trans_domid is id of domain B whose grant entry is finally accessed
|
||||
* transitively, trans_gref is grant entry transitive reference of
|
||||
* domain B.
|
||||
*/
|
||||
void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
|
||||
domid_t trans_domid, grant_ref_t trans_gref);
|
||||
};
|
||||
|
||||
static struct gnttab_ops *gnttab_interface;
|
||||
|
||||
/*This reflects status of grant entries, so act as a global value*/
|
||||
static grant_status_t *grstatus;
|
||||
|
||||
static int grant_table_version;
|
||||
static int grefs_per_grant_frame;
|
||||
|
||||
|
@ -231,7 +204,7 @@ static void put_free_entry(grant_ref_t ref)
|
|||
}
|
||||
|
||||
/*
|
||||
* Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
|
||||
* Following applies to gnttab_update_entry_v1.
|
||||
* Introducing a valid entry into the grant table:
|
||||
* 1. Write ent->domid.
|
||||
* 2. Write ent->frame:
|
||||
|
@ -250,15 +223,6 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
|
|||
gnttab_shared.v1[ref].flags = flags;
|
||||
}
|
||||
|
||||
static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
|
||||
unsigned long frame, unsigned flags)
|
||||
{
|
||||
gnttab_shared.v2[ref].hdr.domid = domid;
|
||||
gnttab_shared.v2[ref].full_page.frame = frame;
|
||||
wmb();
|
||||
gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Public grant-issuing interface functions
|
||||
*/
|
||||
|
@ -285,132 +249,11 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
|
||||
|
||||
static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
|
||||
unsigned long frame, int flags,
|
||||
unsigned page_off, unsigned length)
|
||||
{
|
||||
gnttab_shared.v2[ref].sub_page.frame = frame;
|
||||
gnttab_shared.v2[ref].sub_page.page_off = page_off;
|
||||
gnttab_shared.v2[ref].sub_page.length = length;
|
||||
gnttab_shared.v2[ref].hdr.domid = domid;
|
||||
wmb();
|
||||
gnttab_shared.v2[ref].hdr.flags =
|
||||
GTF_permit_access | GTF_sub_page | flags;
|
||||
}
|
||||
|
||||
int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
|
||||
unsigned long frame, int flags,
|
||||
unsigned page_off,
|
||||
unsigned length)
|
||||
{
|
||||
if (flags & (GTF_accept_transfer | GTF_reading |
|
||||
GTF_writing | GTF_transitive))
|
||||
return -EPERM;
|
||||
|
||||
if (gnttab_interface->update_subpage_entry == NULL)
|
||||
return -ENOSYS;
|
||||
|
||||
gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
|
||||
page_off, length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
|
||||
|
||||
int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
|
||||
int flags, unsigned page_off,
|
||||
unsigned length)
|
||||
{
|
||||
int ref, rc;
|
||||
|
||||
ref = get_free_entries(1);
|
||||
if (unlikely(ref < 0))
|
||||
return -ENOSPC;
|
||||
|
||||
rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
|
||||
page_off, length);
|
||||
if (rc < 0) {
|
||||
put_free_entry(ref);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return ref;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
|
||||
|
||||
bool gnttab_subpage_grants_available(void)
|
||||
{
|
||||
return gnttab_interface->update_subpage_entry != NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
|
||||
|
||||
static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
|
||||
int flags, domid_t trans_domid,
|
||||
grant_ref_t trans_gref)
|
||||
{
|
||||
gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
|
||||
gnttab_shared.v2[ref].transitive.gref = trans_gref;
|
||||
gnttab_shared.v2[ref].hdr.domid = domid;
|
||||
wmb();
|
||||
gnttab_shared.v2[ref].hdr.flags =
|
||||
GTF_permit_access | GTF_transitive | flags;
|
||||
}
|
||||
|
||||
int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
|
||||
int flags, domid_t trans_domid,
|
||||
grant_ref_t trans_gref)
|
||||
{
|
||||
if (flags & (GTF_accept_transfer | GTF_reading |
|
||||
GTF_writing | GTF_sub_page))
|
||||
return -EPERM;
|
||||
|
||||
if (gnttab_interface->update_trans_entry == NULL)
|
||||
return -ENOSYS;
|
||||
|
||||
gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
|
||||
trans_gref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
|
||||
|
||||
int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
|
||||
domid_t trans_domid,
|
||||
grant_ref_t trans_gref)
|
||||
{
|
||||
int ref, rc;
|
||||
|
||||
ref = get_free_entries(1);
|
||||
if (unlikely(ref < 0))
|
||||
return -ENOSPC;
|
||||
|
||||
rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
|
||||
trans_domid, trans_gref);
|
||||
if (rc < 0) {
|
||||
put_free_entry(ref);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return ref;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
|
||||
|
||||
bool gnttab_trans_grants_available(void)
|
||||
{
|
||||
return gnttab_interface->update_trans_entry != NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
|
||||
|
||||
static int gnttab_query_foreign_access_v1(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
|
||||
}
|
||||
|
||||
static int gnttab_query_foreign_access_v2(grant_ref_t ref)
|
||||
{
|
||||
return grstatus[ref] & (GTF_reading|GTF_writing);
|
||||
}
|
||||
|
||||
int gnttab_query_foreign_access(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_interface->query_foreign_access(ref);
|
||||
|
@ -433,29 +276,6 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
|
||||
{
|
||||
gnttab_shared.v2[ref].hdr.flags = 0;
|
||||
mb();
|
||||
if (grstatus[ref] & (GTF_reading|GTF_writing)) {
|
||||
return 0;
|
||||
} else {
|
||||
/* The read of grstatus needs to have acquire
|
||||
semantics. On x86, reads already have
|
||||
that, and we just need to protect against
|
||||
compiler reorderings. On other
|
||||
architectures we may need a full
|
||||
barrier. */
|
||||
#ifdef CONFIG_X86
|
||||
barrier();
|
||||
#else
|
||||
mb();
|
||||
#endif
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
|
||||
{
|
||||
return gnttab_interface->end_foreign_access_ref(ref, readonly);
|
||||
|
@ -616,37 +436,6 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
|
|||
return frame;
|
||||
}
|
||||
|
||||
static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
|
||||
{
|
||||
unsigned long frame;
|
||||
u16 flags;
|
||||
u16 *pflags;
|
||||
|
||||
pflags = &gnttab_shared.v2[ref].hdr.flags;
|
||||
|
||||
/*
|
||||
* If a transfer is not even yet started, try to reclaim the grant
|
||||
* reference and return failure (== 0).
|
||||
*/
|
||||
while (!((flags = *pflags) & GTF_transfer_committed)) {
|
||||
if (sync_cmpxchg(pflags, flags, 0) == flags)
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* If a transfer is in progress then wait until it is completed. */
|
||||
while (!(flags & GTF_transfer_completed)) {
|
||||
flags = *pflags;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
rmb(); /* Read the frame number /after/ reading completion status. */
|
||||
frame = gnttab_shared.v2[ref].full_page.frame;
|
||||
BUG_ON(frame == 0);
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
|
||||
{
|
||||
return gnttab_interface->end_foreign_transfer_ref(ref);
|
||||
|
@ -962,12 +751,6 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
|
||||
|
||||
static unsigned nr_status_frames(unsigned nr_grant_frames)
|
||||
{
|
||||
BUG_ON(grefs_per_grant_frame == 0);
|
||||
return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
|
||||
}
|
||||
|
||||
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
|
||||
{
|
||||
int rc;
|
||||
|
@ -985,55 +768,6 @@ static void gnttab_unmap_frames_v1(void)
|
|||
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
|
||||
}
|
||||
|
||||
static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
|
||||
{
|
||||
uint64_t *sframes;
|
||||
unsigned int nr_sframes;
|
||||
struct gnttab_get_status_frames getframes;
|
||||
int rc;
|
||||
|
||||
nr_sframes = nr_status_frames(nr_gframes);
|
||||
|
||||
/* No need for kzalloc as it is initialized in following hypercall
|
||||
* GNTTABOP_get_status_frames.
|
||||
*/
|
||||
sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
|
||||
if (!sframes)
|
||||
return -ENOMEM;
|
||||
|
||||
getframes.dom = DOMID_SELF;
|
||||
getframes.nr_frames = nr_sframes;
|
||||
set_xen_guest_handle(getframes.frame_list, sframes);
|
||||
|
||||
rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
|
||||
&getframes, 1);
|
||||
if (rc == -ENOSYS) {
|
||||
kfree(sframes);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
BUG_ON(rc || getframes.status);
|
||||
|
||||
rc = arch_gnttab_map_status(sframes, nr_sframes,
|
||||
nr_status_frames(gnttab_max_grant_frames()),
|
||||
&grstatus);
|
||||
BUG_ON(rc);
|
||||
kfree(sframes);
|
||||
|
||||
rc = arch_gnttab_map_shared(frames, nr_gframes,
|
||||
gnttab_max_grant_frames(),
|
||||
&gnttab_shared.addr);
|
||||
BUG_ON(rc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gnttab_unmap_frames_v2(void)
|
||||
{
|
||||
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
|
||||
arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
|
||||
}
|
||||
|
||||
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
|
||||
{
|
||||
struct gnttab_setup_table setup;
|
||||
|
@ -1101,43 +835,13 @@ static struct gnttab_ops gnttab_v1_ops = {
|
|||
.query_foreign_access = gnttab_query_foreign_access_v1,
|
||||
};
|
||||
|
||||
static struct gnttab_ops gnttab_v2_ops = {
|
||||
.map_frames = gnttab_map_frames_v2,
|
||||
.unmap_frames = gnttab_unmap_frames_v2,
|
||||
.update_entry = gnttab_update_entry_v2,
|
||||
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
|
||||
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
|
||||
.query_foreign_access = gnttab_query_foreign_access_v2,
|
||||
.update_subpage_entry = gnttab_update_subpage_entry_v2,
|
||||
.update_trans_entry = gnttab_update_trans_entry_v2,
|
||||
};
|
||||
|
||||
static void gnttab_request_version(void)
|
||||
{
|
||||
int rc;
|
||||
struct gnttab_set_version gsv;
|
||||
/* Only version 1 is used, which will always be available. */
|
||||
grant_table_version = 1;
|
||||
grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
|
||||
gnttab_interface = &gnttab_v1_ops;
|
||||
|
||||
gsv.version = 1;
|
||||
|
||||
rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
|
||||
if (rc == 0 && gsv.version == 2) {
|
||||
grant_table_version = 2;
|
||||
grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
|
||||
gnttab_interface = &gnttab_v2_ops;
|
||||
} else if (grant_table_version == 2) {
|
||||
/*
|
||||
* If we've already used version 2 features,
|
||||
* but then suddenly discover that they're not
|
||||
* available (e.g. migrating to an older
|
||||
* version of Xen), almost unbounded badness
|
||||
* can happen.
|
||||
*/
|
||||
panic("we need grant tables version 2, but only version 1 is available");
|
||||
} else {
|
||||
grant_table_version = 1;
|
||||
grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
|
||||
gnttab_interface = &gnttab_v1_ops;
|
||||
}
|
||||
pr_info("Grant tables using version %d layout\n", grant_table_version);
|
||||
}
|
||||
|
||||
|
@ -1225,8 +929,7 @@ int gnttab_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
ret = arch_gnttab_init(max_nr_grant_frames,
|
||||
nr_status_frames(max_nr_grant_frames));
|
||||
ret = arch_gnttab_init(max_nr_grant_frames);
|
||||
if (ret < 0)
|
||||
goto ini_nomem;
|
||||
|
||||
|
|
|
@ -174,6 +174,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
|
|||
"version mismatch (%s/%s) with pcifront - "
|
||||
"halting " DRV_NAME,
|
||||
magic, XEN_PCI_MAGIC);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,24 +64,6 @@ int gnttab_resume(void);
|
|||
|
||||
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
|
||||
int readonly);
|
||||
int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
|
||||
int flags, unsigned page_off,
|
||||
unsigned length);
|
||||
int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
|
||||
domid_t trans_domid,
|
||||
grant_ref_t trans_gref);
|
||||
|
||||
/*
|
||||
* Are sub-page grants available on this version of Xen? Returns true if they
|
||||
* are, and false if they're not.
|
||||
*/
|
||||
bool gnttab_subpage_grants_available(void);
|
||||
|
||||
/*
|
||||
* Are transitive grants available on this version of Xen? Returns true if they
|
||||
* are, and false if they're not.
|
||||
*/
|
||||
bool gnttab_trans_grants_available(void);
|
||||
|
||||
/*
|
||||
* End access through the given grant reference, iff the grant entry is no
|
||||
|
@ -128,13 +110,6 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
|
|||
|
||||
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
|
||||
unsigned long frame, int readonly);
|
||||
int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
|
||||
unsigned long frame, int flags,
|
||||
unsigned page_off,
|
||||
unsigned length);
|
||||
int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
|
||||
int flags, domid_t trans_domid,
|
||||
grant_ref_t trans_gref);
|
||||
|
||||
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
|
||||
unsigned long pfn);
|
||||
|
@ -170,13 +145,10 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
|
|||
unmap->dev_bus_addr = 0;
|
||||
}
|
||||
|
||||
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
|
||||
int arch_gnttab_init(unsigned long nr_shared);
|
||||
int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
|
||||
unsigned long max_nr_gframes,
|
||||
void **__shared);
|
||||
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
|
||||
unsigned long max_nr_gframes,
|
||||
grant_status_t **__shared);
|
||||
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
|
||||
|
||||
struct grant_frames {
|
||||
|
|
Загрузка…
Ссылка в новой задаче