Bug-fixes:
- Fix balloon driver for auto-translate guests (PVHVM, ARM) to not use scratch pages. - Fix block API header for ARM32 and ARM64 to have proper layout - On ARM when mapping guests, stick on PTE_SPECIAL - When using SWIOTLB under ARM, don't call swiotlb functions twice - When unmapping guests memory and if we fail, don't return pages which failed to be unmapped. - Grant driver was using the wrong address on ARM. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.15 (GNU/Linux) iQEcBAABAgAGBQJSsdFEAAoJEFjIrFwIi8fJDQwIAL1ygSTwSXdH6TlqtD9GVdsE G6kiCM7G6VXrKMf8zBtgbGpcl6FT0zOIz4cRcXbyDniuHTjdWuH9dlmZOzFMAirE uMWwOB1EfmRBEJRsd2pW0Gj0O6VABWh8BHklFCeWUvk/Stlw9uXqIwf7Pjcj6wPT XW+ZywqsAve4MM60Rz/nMsakLcTK4i5SCRgPPFgAnPKUod3f/QbEHwci/lpinJFv AuQp2JytCsDc2nehEi1kMwEx7LLBlUcjXTqPG5lhQnXrFleDtMdCJd9dGjeze7Qu F5sftfdlp18ojQwegv1PGiVI4jV8rIq29ybaef/y9DLd3nC3rmi8B8/m9RG2qyI= =dUsw -----END PGP SIGNATURE----- Merge tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull Xen bugfixes from Konrad Rzeszutek Wilk: - Fix balloon driver for auto-translate guests (PVHVM, ARM) to not use scratch pages. - Fix block API header for ARM32 and ARM64 to have proper layout - On ARM when mapping guests, stick on PTE_SPECIAL - When using SWIOTLB under ARM, don't call swiotlb functions twice - When unmapping guests memory and if we fail, don't return pages which failed to be unmapped. - Grant driver was using the wrong address on ARM. * tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/balloon: Seperate the auto-translate logic properly (v2) xen/block: Correctly define structures in public headers on ARM32 and ARM64 arm: xen: foreign mapping PTEs are special. xen/arm64: do not call the swiotlb functions twice xen: privcmd: do not return pages which we have failed to unmap XEN: Grant table address, xen_hvm_resume_frames, is a phys_addr not a pfn
This commit is contained in:
Коммит
4203d0eb3a
|
@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
|||
struct remap_data *info = data;
|
||||
struct page *page = info->pages[info->index++];
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
pte_t pte = pfn_pte(pfn, info->prot);
|
||||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
|
||||
if (map_foreign_page(pfn, info->fgmfn, info->domid))
|
||||
return -EFAULT;
|
||||
|
@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
|
|||
}
|
||||
if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
|
||||
return 0;
|
||||
xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
|
||||
xen_hvm_resume_frames = res.start;
|
||||
xen_events_irq = irq_of_parse_and_map(node, 0);
|
||||
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
|
||||
version, xen_events_irq, xen_hvm_resume_frames);
|
||||
version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
|
||||
xen_domain_type = XEN_HVM_DOMAIN;
|
||||
|
||||
xen_setup_features();
|
||||
|
|
|
@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
|||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
||||
}
|
||||
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
|
||||
|
|
|
@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
|
|||
|
||||
pfn = page_to_pfn(page);
|
||||
|
||||
set_phys_to_machine(pfn, frame_list[i]);
|
||||
|
||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||
/* Link back into the page tables if not highmem. */
|
||||
if (xen_pv_domain() && !PageHighMem(page)) {
|
||||
int ret;
|
||||
ret = HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
mfn_pte(frame_list[i], PAGE_KERNEL),
|
||||
0);
|
||||
BUG_ON(ret);
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
set_phys_to_machine(pfn, frame_list[i]);
|
||||
|
||||
/* Link back into the page tables if not highmem. */
|
||||
if (!PageHighMem(page)) {
|
||||
int ret;
|
||||
ret = HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
mfn_pte(frame_list[i], PAGE_KERNEL),
|
||||
0);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
enum bp_state state = BP_DONE;
|
||||
unsigned long pfn, i;
|
||||
struct page *page;
|
||||
struct page *scratch_page;
|
||||
int ret;
|
||||
struct xen_memory_reservation reservation = {
|
||||
.address_bits = 0,
|
||||
|
@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
|
||||
scrub_page(page);
|
||||
|
||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||
/*
|
||||
* Ballooned out frames are effectively replaced with
|
||||
* a scratch frame. Ensure direct mappings and the
|
||||
* p2m are consistent.
|
||||
*/
|
||||
scratch_page = get_balloon_scratch_page();
|
||||
#ifdef CONFIG_XEN_HAVE_PVMMU
|
||||
if (xen_pv_domain() && !PageHighMem(page)) {
|
||||
ret = HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
pfn_pte(page_to_pfn(scratch_page),
|
||||
PAGE_KERNEL_RO), 0);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
#endif
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
unsigned long p;
|
||||
struct page *scratch_page = get_balloon_scratch_page();
|
||||
|
||||
if (!PageHighMem(page)) {
|
||||
ret = HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
pfn_pte(page_to_pfn(scratch_page),
|
||||
PAGE_KERNEL_RO), 0);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
p = page_to_pfn(scratch_page);
|
||||
__set_phys_to_machine(pfn, pfn_to_mfn(p));
|
||||
|
||||
put_balloon_scratch_page();
|
||||
}
|
||||
put_balloon_scratch_page();
|
||||
#endif
|
||||
|
||||
balloon_append(pfn_to_page(pfn));
|
||||
}
|
||||
|
@ -627,15 +630,17 @@ static int __init balloon_init(void)
|
|||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
{
|
||||
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
|
||||
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
|
||||
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
|
||||
return -ENOMEM;
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
for_each_online_cpu(cpu)
|
||||
{
|
||||
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
|
||||
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
|
||||
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
register_cpu_notifier(&balloon_cpu_notifier);
|
||||
}
|
||||
register_cpu_notifier(&balloon_cpu_notifier);
|
||||
|
||||
pr_info("Initialising balloon driver\n");
|
||||
|
||||
|
|
|
@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
|
|||
gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
|
||||
PAGE_SIZE * max_nr_gframes);
|
||||
if (gnttab_shared.addr == NULL) {
|
||||
pr_warn("Failed to ioremap gnttab share frames!\n");
|
||||
pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
|
||||
xen_hvm_resume_frames);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
|
|||
{
|
||||
struct page **pages = vma->vm_private_data;
|
||||
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
int rc;
|
||||
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
|
||||
return;
|
||||
|
||||
xen_unmap_domain_mfn_range(vma, numpgs, pages);
|
||||
free_xenballooned_pages(numpgs, pages);
|
||||
rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
|
||||
if (rc == 0)
|
||||
free_xenballooned_pages(numpgs, pages);
|
||||
else
|
||||
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
|
||||
numpgs, rc);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
|
|||
struct blkif_request_rw {
|
||||
uint8_t nr_segments; /* number of segments */
|
||||
blkif_vdev_t handle; /* only for read/write requests */
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifndef CONFIG_X86_32
|
||||
uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
|
||||
#endif
|
||||
uint64_t id; /* private guest value, echoed in resp */
|
||||
|
@ -163,7 +163,7 @@ struct blkif_request_discard {
|
|||
uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
|
||||
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
|
||||
blkif_vdev_t _pad1; /* only for read/write requests */
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifndef CONFIG_X86_32
|
||||
uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
|
||||
#endif
|
||||
uint64_t id; /* private guest value, echoed in resp */
|
||||
|
@ -175,7 +175,7 @@ struct blkif_request_discard {
|
|||
struct blkif_request_other {
|
||||
uint8_t _pad1;
|
||||
blkif_vdev_t _pad2; /* only for read/write requests */
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifndef CONFIG_X86_32
|
||||
uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
|
||||
#endif
|
||||
uint64_t id; /* private guest value, echoed in resp */
|
||||
|
@ -184,7 +184,7 @@ struct blkif_request_other {
|
|||
struct blkif_request_indirect {
|
||||
uint8_t indirect_op;
|
||||
uint16_t nr_segments;
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifndef CONFIG_X86_32
|
||||
uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
|
||||
#endif
|
||||
uint64_t id;
|
||||
|
@ -192,7 +192,7 @@ struct blkif_request_indirect {
|
|||
blkif_vdev_t handle;
|
||||
uint16_t _pad2;
|
||||
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifndef CONFIG_X86_32
|
||||
uint32_t _pad3; /* make it 64 byte aligned */
|
||||
#else
|
||||
uint64_t _pad3; /* make it 64 byte aligned */
|
||||
|
|
Загрузка…
Ссылка в новой задаче