-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCYd729AAKCRCAXGG7T9hj
 vmPzAP9MnRSseEV8C1t1naeW6W57DCQ9iTOXwC74q/5OxaINngEA7O2H6+FeRH63
 lo3vOW/S2W8G+iqET5DQ9z3YDQnItww=
 =Zlo8
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - a fix for the Xen gntdev driver

 - a fix for running as Xen dom0 booted via EFI and the EFI framebuffer
   being located above 4GB

 - a series for support of mapping other guest's memory by using zone
   device when running as Xen guest on Arm

* tag 'for-linus-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  dt-bindings: xen: Clarify "reg" purpose
  arm/xen: Read extended regions from DT and init Xen resource
  xen/unpopulated-alloc: Add mechanism to use Xen resource
  xen/balloon: Bring alloc(free)_xenballooned_pages helpers back
  arm/xen: Switch to use gnttab_setup_auto_xlat_frames() for DT
  xen/unpopulated-alloc: Drop check for virt_addr_valid() in fill_list()
  xen/x86: obtain upper 32 bits of video frame buffer address for Dom0
  xen/gntdev: fix unmap notification order
This commit is contained in:
Linus Torvalds 2022-01-12 16:42:00 -08:00
Родитель 64ad946152 54bb4a91b2
Коммит ce990f1de0
10 изменённых файлов: 259 добавлений и 36 удалений

Просмотреть файл

@ -7,15 +7,17 @@ the following properties:
compatible = "xen,xen-<version>", "xen,xen";
where <version> is the version of the Xen ABI of the platform.
- reg: specifies the base physical address and size of a region in
memory where the grant table should be mapped to, using an
HYPERVISOR_memory_op hypercall. The memory region is large enough to map
the whole grant table (it is larger or equal to gnttab_max_grant_frames()).
This property is unnecessary when booting Dom0 using ACPI.
- reg: specifies the base physical address and size of the regions in memory
where the special resources should be mapped to, using an HYPERVISOR_memory_op
hypercall.
Region 0 is reserved for mapping grant table, it must be always present.
The memory region is large enough to map the whole grant table (it is larger
or equal to gnttab_max_grant_frames()).
Regions 1...N are extended regions (unused address space) for mapping foreign
GFNs and grants, they might be absent if there is nothing to expose.
- interrupts: the interrupt used by Xen to inject event notifications.
A GIC node is also required.
This property is unnecessary when booting Dom0 using ACPI.
To support UEFI on Xen ARM virtual platforms, Xen populates the FDT "uefi" node
under /hypervisor with following parameters:

Просмотреть файл

@ -59,6 +59,10 @@ unsigned long xen_released_pages;
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
static __read_mostly unsigned int xen_events_irq;
static __read_mostly phys_addr_t xen_grant_frames;
#define GRANT_TABLE_INDEX 0
#define EXT_REGION_INDEX 1
uint32_t xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
@ -300,9 +304,115 @@ static void __init xen_acpi_guest_init(void)
#endif
}
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
/*
* A type-less specific Xen resource which contains extended regions
* (unused regions of guest physical address space provided by the hypervisor).
*/
static struct resource xen_resource = {
.name = "Xen unused space",
};
int __init arch_xen_unpopulated_init(struct resource **res)
{
struct device_node *np;
struct resource *regs, *tmp_res;
uint64_t min_gpaddr = -1, max_gpaddr = 0;
unsigned int i, nr_reg = 0;
int rc;
if (!xen_domain())
return -ENODEV;
if (!acpi_disabled)
return -ENODEV;
np = of_find_compatible_node(NULL, NULL, "xen,xen");
if (WARN_ON(!np))
return -ENODEV;
/* Skip region 0 which is reserved for grant table space */
while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL))
nr_reg++;
if (!nr_reg) {
pr_err("No extended regions are found\n");
return -EINVAL;
}
regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
/*
* Create resource from extended regions provided by the hypervisor to be
* used as unused address space for Xen scratch pages.
*/
for (i = 0; i < nr_reg; i++) {
rc = of_address_to_resource(np, i + EXT_REGION_INDEX, &regs[i]);
if (rc)
goto err;
if (max_gpaddr < regs[i].end)
max_gpaddr = regs[i].end;
if (min_gpaddr > regs[i].start)
min_gpaddr = regs[i].start;
}
xen_resource.start = min_gpaddr;
xen_resource.end = max_gpaddr;
/*
* Mark holes between extended regions as unavailable. The rest of that
* address space will be available for the allocation.
*/
for (i = 1; i < nr_reg; i++) {
resource_size_t start, end;
/* There is an overlap between regions */
if (regs[i - 1].end + 1 > regs[i].start) {
rc = -EINVAL;
goto err;
}
/* There is no hole between regions */
if (regs[i - 1].end + 1 == regs[i].start)
continue;
start = regs[i - 1].end + 1;
end = regs[i].start - 1;
tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
if (!tmp_res) {
rc = -ENOMEM;
goto err;
}
tmp_res->name = "Unavailable space";
tmp_res->start = start;
tmp_res->end = end;
rc = insert_resource(&xen_resource, tmp_res);
if (rc) {
pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc);
kfree(tmp_res);
goto err;
}
}
*res = &xen_resource;
err:
kfree(regs);
return rc;
}
#endif
static void __init xen_dt_guest_init(void)
{
struct device_node *xen_node;
struct resource res;
xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
if (!xen_node) {
@ -311,13 +421,19 @@ static void __init xen_dt_guest_init(void)
}
xen_events_irq = irq_of_parse_and_map(xen_node, 0);
if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
pr_err("Xen grant table region is not found\n");
return;
}
xen_grant_frames = res.start;
}
static int __init xen_guest_init(void)
{
struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL;
int cpu;
int rc, cpu;
if (!xen_domain())
return 0;
@ -370,12 +486,16 @@ static int __init xen_guest_init(void)
for_each_possible_cpu(cpu)
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
&xen_auto_xlat_grant_frames.vaddr,
xen_auto_xlat_grant_frames.count)) {
if (!xen_grant_frames) {
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
&xen_auto_xlat_grant_frames.vaddr,
xen_auto_xlat_grant_frames.count);
} else
rc = gnttab_setup_auto_xlat_frames(xen_grant_frames);
if (rc) {
free_percpu(xen_vcpu_info);
return -ENOMEM;
return rc;
}
gnttab_init();

Просмотреть файл

@ -62,14 +62,18 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
break;
}
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.gbl_caps)
+ sizeof(info->u.vesa_lfb.gbl_caps))
screen_info->capabilities = info->u.vesa_lfb.gbl_caps;
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.mode_attrs)
+ sizeof(info->u.vesa_lfb.mode_attrs))
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.ext_lfb_base)
+ sizeof(info->u.vesa_lfb.ext_lfb_base)
&& info->u.vesa_lfb.ext_lfb_base) {
screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
}
break;
}
}

Просмотреть файл

@ -327,7 +327,7 @@ config XEN_FRONT_PGDIR_SHBUF
config XEN_UNPOPULATED_ALLOC
bool "Use unpopulated memory ranges for guest mappings"
depends on X86 && ZONE_DEVICE
depends on ZONE_DEVICE
default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
help
Use unpopulated memory ranges in order to create mappings for guest

Просмотреть файл

@ -581,7 +581,6 @@ void balloon_set_new_target(unsigned long target)
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
#ifndef CONFIG_XEN_UNPOPULATED_ALLOC
static int add_ballooned_pages(unsigned int nr_pages)
{
enum bp_state st;
@ -610,12 +609,12 @@ static int add_ballooned_pages(unsigned int nr_pages)
}
/**
* xen_alloc_unpopulated_pages - get pages that have been ballooned out
* xen_alloc_ballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
* @return 0 on success, error otherwise
*/
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int pgno = 0;
struct page *page;
@ -652,23 +651,23 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
return 0;
out_undo:
mutex_unlock(&balloon_mutex);
xen_free_unpopulated_pages(pgno, pages);
xen_free_ballooned_pages(pgno, pages);
/*
* NB: free_xenballooned_pages will only subtract pgno pages, but since
* NB: xen_free_ballooned_pages will only subtract pgno pages, but since
* target_unpopulated is incremented with nr_pages at the start we need
* to remove the remaining ones also, or accounting will be screwed.
*/
balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret;
}
EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
EXPORT_SYMBOL(xen_alloc_ballooned_pages);
/**
* xen_free_unpopulated_pages - return pages retrieved with get_ballooned_pages
* xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
* @nr_pages: Number of pages
* @pages: pages to return
*/
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
@ -687,9 +686,9 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
mutex_unlock(&balloon_mutex);
}
EXPORT_SYMBOL(xen_free_unpopulated_pages);
EXPORT_SYMBOL(xen_free_ballooned_pages);
#if defined(CONFIG_XEN_PV)
#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
@ -712,7 +711,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
#endif
#endif
static int __init balloon_init(void)
{

Просмотреть файл

@ -250,13 +250,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
}
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map);
}

Просмотреть файл

@ -8,6 +8,7 @@
#include <asm/page.h>
#include <xen/balloon.h>
#include <xen/page.h>
#include <xen/xen.h>
@ -15,13 +16,29 @@ static DEFINE_MUTEX(list_lock);
static struct page *page_list;
static unsigned int list_count;
static struct resource *target_resource;
/*
* If arch is not happy with system "iomem_resource" being used for
* the region allocation it can provide it's own view by creating specific
* Xen resource with unused regions of guest physical address space provided
* by the hypervisor.
*/
int __weak __init arch_xen_unpopulated_init(struct resource **res)
{
*res = &iomem_resource;
return 0;
}
static int fill_list(unsigned int nr_pages)
{
struct dev_pagemap *pgmap;
struct resource *res;
struct resource *res, *tmp_res = NULL;
void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
int ret = -ENOMEM;
struct range mhp_range;
int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@ -30,14 +47,40 @@ static int fill_list(unsigned int nr_pages)
res->name = "Xen scratch";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res,
alloc_pages * PAGE_SIZE, 0, -1,
mhp_range = mhp_get_pluggable_range(true);
ret = allocate_resource(target_resource, res,
alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n");
goto err_resource;
}
/*
* Reserve the region previously allocated from Xen resource to avoid
* re-using it by someone else.
*/
if (target_resource != &iomem_resource) {
tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
if (!tmp_res) {
ret = -ENOMEM;
goto err_insert;
}
tmp_res->name = res->name;
tmp_res->start = res->start;
tmp_res->end = res->end;
tmp_res->flags = res->flags;
ret = request_resource(&iomem_resource, tmp_res);
if (ret < 0) {
pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
kfree(tmp_res);
goto err_insert;
}
}
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) {
ret = -ENOMEM;
@ -85,7 +128,6 @@ static int fill_list(unsigned int nr_pages)
for (i = 0; i < alloc_pages; i++) {
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
pg->zone_device_data = page_list;
page_list = pg;
list_count++;
@ -96,6 +138,11 @@ static int fill_list(unsigned int nr_pages)
err_memremap:
kfree(pgmap);
err_pgmap:
if (tmp_res) {
release_resource(tmp_res);
kfree(tmp_res);
}
err_insert:
release_resource(res);
err_resource:
kfree(res);
@ -113,6 +160,14 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
unsigned int i;
int ret = 0;
/*
* Fallback to default behavior if we do not have any suitable resource
* to allocate required region from and as the result we won't be able to
* construct pages.
*/
if (!target_resource)
return xen_alloc_ballooned_pages(nr_pages, pages);
mutex_lock(&list_lock);
if (list_count < nr_pages) {
ret = fill_list(nr_pages - list_count);
@ -160,6 +215,11 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
if (!target_resource) {
xen_free_ballooned_pages(nr_pages, pages);
return;
}
mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
pages[i]->zone_device_data = page_list;
@ -202,3 +262,20 @@ static int __init init(void)
}
subsys_initcall(init);
#endif
static int __init unpopulated_init(void)
{
int ret;
if (!xen_domain())
return -ENODEV;
ret = arch_xen_unpopulated_init(&target_resource);
if (ret) {
pr_err("xen:unpopulated: Cannot initialize target resource\n");
target_resource = NULL;
}
return ret;
}
early_initcall(unpopulated_init);

Просмотреть файл

@ -26,6 +26,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
#else

Просмотреть файл

@ -722,6 +722,9 @@ struct dom0_vga_console_info {
uint32_t gbl_caps;
/* Mode attributes (offset 0x0, VESA command 0x4f01). */
uint16_t mode_attrs;
uint16_t pad;
/* high 32 bits of lfb_base */
uint32_t ext_lfb_base;
} vesa_lfb;
} u;
};

Просмотреть файл

@ -52,7 +52,23 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
#include <linux/ioport.h>
int arch_xen_unpopulated_init(struct resource **res);
#else
#include <xen/balloon.h>
static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
struct page **pages)
{
return xen_alloc_ballooned_pages(nr_pages, pages);
}
static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
struct page **pages)
{
xen_free_ballooned_pages(nr_pages, pages);
}
#endif
#endif /* _XEN_XEN_H */