Merge branches 'stable/balloon.cleanup' and 'stable/general.cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
* 'stable/balloon.cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen/balloon: Move dec_totalhigh_pages() from __balloon_append() to balloon_append() xen/balloon: Clarify credit calculation xen/balloon: Simplify HVM integration xen/balloon: Use PageHighMem() for high memory page detection * 'stable/general.cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: drivers/xen/sys-hypervisor: Cleanup code/data sections definitions arch/x86/xen/smp: Cleanup code/data sections definitions arch/x86/xen/time: Cleanup code/data sections definitions arch/x86/xen/xen-ops: Cleanup code/data sections definitions arch/x86/xen/mmu: Cleanup code/data sections definitions arch/x86/xen/setup: Cleanup code/data sections definitions arch/x86/xen/enlighten: Cleanup code/data sections definitions arch/x86/xen/irq: Cleanup code/data sections definitions xen: tidy up whitespace in drivers/xen/Makefile
This commit is contained in:
Коммит
3bfccb7497
|
@ -235,7 +235,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
|
|||
*dx &= maskedx;
|
||||
}
|
||||
|
||||
static __init void xen_init_cpuid_mask(void)
|
||||
static void __init xen_init_cpuid_mask(void)
|
||||
{
|
||||
unsigned int ax, bx, cx, dx;
|
||||
unsigned int xsave_mask;
|
||||
|
@ -400,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
|
|||
/*
|
||||
* load_gdt for early boot, when the gdt is only mapped once
|
||||
*/
|
||||
static __init void xen_load_gdt_boot(const struct desc_ptr *dtr)
|
||||
static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
|
||||
{
|
||||
unsigned long va = dtr->address;
|
||||
unsigned int size = dtr->size + 1;
|
||||
|
@ -662,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
|
|||
* Version of write_gdt_entry for use at early boot-time needed to
|
||||
* update an entry as simply as possible.
|
||||
*/
|
||||
static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
|
||||
static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
|
||||
const void *desc, int type)
|
||||
{
|
||||
switch (type) {
|
||||
|
@ -933,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const struct pv_info xen_info __initdata = {
|
||||
static const struct pv_info xen_info __initconst = {
|
||||
.paravirt_enabled = 1,
|
||||
.shared_kernel_pmd = 0,
|
||||
|
||||
.name = "Xen",
|
||||
};
|
||||
|
||||
static const struct pv_init_ops xen_init_ops __initdata = {
|
||||
static const struct pv_init_ops xen_init_ops __initconst = {
|
||||
.patch = xen_patch,
|
||||
};
|
||||
|
||||
static const struct pv_cpu_ops xen_cpu_ops __initdata = {
|
||||
static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
||||
.cpuid = xen_cpuid,
|
||||
|
||||
.set_debugreg = xen_set_debugreg,
|
||||
|
@ -1004,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
|
|||
.end_context_switch = xen_end_context_switch,
|
||||
};
|
||||
|
||||
static const struct pv_apic_ops xen_apic_ops __initdata = {
|
||||
static const struct pv_apic_ops xen_apic_ops __initconst = {
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
.startup_ipi_hook = paravirt_nop,
|
||||
#endif
|
||||
|
@ -1055,7 +1055,7 @@ int xen_panic_handler_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct machine_ops __initdata xen_machine_ops = {
|
||||
static const struct machine_ops xen_machine_ops __initconst = {
|
||||
.restart = xen_restart,
|
||||
.halt = xen_machine_halt,
|
||||
.power_off = xen_machine_halt,
|
||||
|
@ -1332,7 +1332,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = {
|
||||
static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
|
||||
.notifier_call = xen_hvm_cpu_notify,
|
||||
};
|
||||
|
||||
|
@ -1381,7 +1381,7 @@ bool xen_hvm_need_lapic(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = {
|
||||
const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
|
||||
.name = "Xen HVM",
|
||||
.detect = xen_hvm_platform,
|
||||
.init_platform = xen_hvm_guest_init,
|
||||
|
|
|
@ -113,7 +113,7 @@ static void xen_halt(void)
|
|||
xen_safe_halt();
|
||||
}
|
||||
|
||||
static const struct pv_irq_ops xen_irq_ops __initdata = {
|
||||
static const struct pv_irq_ops xen_irq_ops __initconst = {
|
||||
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
|
||||
.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
|
||||
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
|
||||
|
|
|
@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void)
|
|||
* that's before we have page structures to store the bits. So do all
|
||||
* the book-keeping now.
|
||||
*/
|
||||
static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
|
||||
static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
|
||||
enum pt_level level)
|
||||
{
|
||||
SetPagePinned(page);
|
||||
|
@ -1271,7 +1271,7 @@ void xen_exit_mmap(struct mm_struct *mm)
|
|||
spin_unlock(&mm->page_table_lock);
|
||||
}
|
||||
|
||||
static __init void xen_pagetable_setup_start(pgd_t *base)
|
||||
static void __init xen_pagetable_setup_start(pgd_t *base)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1291,7 +1291,7 @@ static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
|
|||
|
||||
static void xen_post_allocator_init(void);
|
||||
|
||||
static __init void xen_pagetable_setup_done(pgd_t *base)
|
||||
static void __init xen_pagetable_setup_done(pgd_t *base)
|
||||
{
|
||||
xen_setup_shared_info();
|
||||
xen_post_allocator_init();
|
||||
|
@ -1488,7 +1488,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
||||
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
|
||||
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
|
||||
|
@ -1498,7 +1498,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
|||
return pte;
|
||||
}
|
||||
#else /* CONFIG_X86_64 */
|
||||
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
||||
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
|
||||
|
@ -1519,7 +1519,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
|||
|
||||
/* Init-time set_pte while constructing initial pagetables, which
|
||||
doesn't allow RO pagetable pages to be remapped RW */
|
||||
static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
|
||||
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
pte = mask_rw_pte(ptep, pte);
|
||||
|
||||
|
@ -1537,7 +1537,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
|
|||
|
||||
/* Early in boot, while setting up the initial pagetable, assume
|
||||
everything is pinned. */
|
||||
static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
|
||||
static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
#ifdef CONFIG_FLATMEM
|
||||
BUG_ON(mem_map); /* should only be used early */
|
||||
|
@ -1547,7 +1547,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
|
|||
}
|
||||
|
||||
/* Used for pmd and pud */
|
||||
static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
|
||||
static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
#ifdef CONFIG_FLATMEM
|
||||
BUG_ON(mem_map); /* should only be used early */
|
||||
|
@ -1557,13 +1557,13 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
|
|||
|
||||
/* Early release_pte assumes that all pts are pinned, since there's
|
||||
only init_mm and anything attached to that is pinned. */
|
||||
static __init void xen_release_pte_init(unsigned long pfn)
|
||||
static void __init xen_release_pte_init(unsigned long pfn)
|
||||
{
|
||||
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
|
||||
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
|
||||
}
|
||||
|
||||
static __init void xen_release_pmd_init(unsigned long pfn)
|
||||
static void __init xen_release_pmd_init(unsigned long pfn)
|
||||
{
|
||||
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
|
||||
}
|
||||
|
@ -1689,7 +1689,7 @@ static void set_page_prot(void *addr, pgprot_t prot)
|
|||
BUG();
|
||||
}
|
||||
|
||||
static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
|
||||
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
|
||||
{
|
||||
unsigned pmdidx, pteidx;
|
||||
unsigned ident_pte;
|
||||
|
@ -1772,7 +1772,7 @@ static void convert_pfn_mfn(void *v)
|
|||
* of the physical mapping once some sort of allocator has been set
|
||||
* up.
|
||||
*/
|
||||
__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
|
||||
pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
|
||||
unsigned long max_pfn)
|
||||
{
|
||||
pud_t *l3;
|
||||
|
@ -1843,7 +1843,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
|
|||
static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
|
||||
static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
|
||||
|
||||
static __init void xen_write_cr3_init(unsigned long cr3)
|
||||
static void __init xen_write_cr3_init(unsigned long cr3)
|
||||
{
|
||||
unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
|
||||
|
||||
|
@ -1880,7 +1880,7 @@ static __init void xen_write_cr3_init(unsigned long cr3)
|
|||
pv_mmu_ops.write_cr3 = &xen_write_cr3;
|
||||
}
|
||||
|
||||
__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
|
||||
pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
|
||||
unsigned long max_pfn)
|
||||
{
|
||||
pmd_t *kernel_pmd;
|
||||
|
@ -1986,7 +1986,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
|||
#endif
|
||||
}
|
||||
|
||||
__init void xen_ident_map_ISA(void)
|
||||
void __init xen_ident_map_ISA(void)
|
||||
{
|
||||
unsigned long pa;
|
||||
|
||||
|
@ -2009,7 +2009,7 @@ __init void xen_ident_map_ISA(void)
|
|||
xen_flush_tlb();
|
||||
}
|
||||
|
||||
static __init void xen_post_allocator_init(void)
|
||||
static void __init xen_post_allocator_init(void)
|
||||
{
|
||||
#ifdef CONFIG_XEN_DEBUG
|
||||
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
|
||||
|
@ -2046,7 +2046,7 @@ static void xen_leave_lazy_mmu(void)
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
static const struct pv_mmu_ops xen_mmu_ops __initdata = {
|
||||
static const struct pv_mmu_ops xen_mmu_ops __initconst = {
|
||||
.read_cr2 = xen_read_cr2,
|
||||
.write_cr2 = xen_write_cr2,
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
|
|||
*/
|
||||
#define EXTRA_MEM_RATIO (10)
|
||||
|
||||
static __init void xen_add_extra_mem(unsigned long pages)
|
||||
static void __init xen_add_extra_mem(unsigned long pages)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
|
@ -336,7 +336,7 @@ static void __init fiddle_vdso(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static __cpuinit int register_callback(unsigned type, const void *func)
|
||||
static int __cpuinit register_callback(unsigned type, const void *func)
|
||||
{
|
||||
struct callback_register callback = {
|
||||
.type = type,
|
||||
|
|
|
@ -57,7 +57,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static __cpuinit void cpu_bringup(void)
|
||||
static void __cpuinit cpu_bringup(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
@ -85,7 +85,7 @@ static __cpuinit void cpu_bringup(void)
|
|||
wmb(); /* make sure everything is out */
|
||||
}
|
||||
|
||||
static __cpuinit void cpu_bringup_and_idle(void)
|
||||
static void __cpuinit cpu_bringup_and_idle(void)
|
||||
{
|
||||
cpu_bringup();
|
||||
cpu_idle();
|
||||
|
@ -242,7 +242,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
|||
}
|
||||
}
|
||||
|
||||
static __cpuinit int
|
||||
static int __cpuinit
|
||||
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
struct vcpu_guest_context *ctxt;
|
||||
|
@ -486,7 +486,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct smp_ops xen_smp_ops __initdata = {
|
||||
static const struct smp_ops xen_smp_ops __initconst = {
|
||||
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
|
||||
.smp_prepare_cpus = xen_smp_prepare_cpus,
|
||||
.smp_cpus_done = xen_smp_cpus_done,
|
||||
|
|
|
@ -439,11 +439,11 @@ void xen_timer_resume(void)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct pv_time_ops xen_time_ops __initdata = {
|
||||
static const struct pv_time_ops xen_time_ops __initconst = {
|
||||
.sched_clock = xen_clocksource_read,
|
||||
};
|
||||
|
||||
static __init void xen_time_init(void)
|
||||
static void __init xen_time_init(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct timespec tp;
|
||||
|
@ -468,7 +468,7 @@ static __init void xen_time_init(void)
|
|||
xen_setup_cpu_clockevents();
|
||||
}
|
||||
|
||||
__init void xen_init_time_ops(void)
|
||||
void __init xen_init_time_ops(void)
|
||||
{
|
||||
pv_time_ops = xen_time_ops;
|
||||
|
||||
|
@ -490,7 +490,7 @@ static void xen_hvm_setup_cpu_clockevents(void)
|
|||
xen_setup_cpu_clockevents();
|
||||
}
|
||||
|
||||
__init void xen_hvm_init_time_ops(void)
|
||||
void __init xen_hvm_init_time_ops(void)
|
||||
{
|
||||
/* vector callback is needed otherwise we cannot receive interrupts
|
||||
* on cpu > 0 and at this point we don't know how many cpus are
|
||||
|
|
|
@ -74,7 +74,7 @@ static inline void xen_hvm_smp_init(void) {}
|
|||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
void __init xen_init_spinlocks(void);
|
||||
__cpuinit void xen_init_lock_cpu(int cpu);
|
||||
void __cpuinit xen_init_lock_cpu(int cpu);
|
||||
void xen_uninit_lock_cpu(int cpu);
|
||||
#else
|
||||
static inline void xen_init_spinlocks(void)
|
||||
|
|
|
@ -4,21 +4,21 @@ obj-y += xenbus/
|
|||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
CFLAGS_features.o := $(nostackp)
|
||||
|
||||
obj-$(CONFIG_BLOCK) += biomerge.o
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
|
||||
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
|
||||
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
|
||||
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
|
||||
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
|
||||
obj-$(CONFIG_BLOCK) += biomerge.o
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
|
||||
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
|
||||
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
|
||||
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
|
||||
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
|
||||
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
|
||||
obj-$(CONFIG_XENFS) += xenfs/
|
||||
obj-$(CONFIG_XENFS) += xenfs/
|
||||
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
|
||||
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
|
||||
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
|
||||
obj-$(CONFIG_XEN_DOM0) += pci.o
|
||||
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
|
||||
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
|
||||
obj-$(CONFIG_XEN_DOM0) += pci.o
|
||||
|
||||
xen-evtchn-y := evtchn.o
|
||||
xen-evtchn-y := evtchn.o
|
||||
xen-gntdev-y := gntdev.o
|
||||
xen-gntalloc-y := gntalloc.o
|
||||
|
||||
xen-platform-pci-y := platform-pci.o
|
||||
xen-platform-pci-y := platform-pci.o
|
||||
|
|
|
@ -114,7 +114,6 @@ static void __balloon_append(struct page *page)
|
|||
if (PageHighMem(page)) {
|
||||
list_add_tail(&page->lru, &ballooned_pages);
|
||||
balloon_stats.balloon_high++;
|
||||
dec_totalhigh_pages();
|
||||
} else {
|
||||
list_add(&page->lru, &ballooned_pages);
|
||||
balloon_stats.balloon_low++;
|
||||
|
@ -124,6 +123,8 @@ static void __balloon_append(struct page *page)
|
|||
static void balloon_append(struct page *page)
|
||||
{
|
||||
__balloon_append(page);
|
||||
if (PageHighMem(page))
|
||||
dec_totalhigh_pages();
|
||||
totalram_pages--;
|
||||
}
|
||||
|
||||
|
@ -193,7 +194,7 @@ static enum bp_state update_schedule(enum bp_state state)
|
|||
return BP_EAGAIN;
|
||||
}
|
||||
|
||||
static unsigned long current_target(void)
|
||||
static long current_credit(void)
|
||||
{
|
||||
unsigned long target = balloon_stats.target_pages;
|
||||
|
||||
|
@ -202,7 +203,7 @@ static unsigned long current_target(void)
|
|||
balloon_stats.balloon_low +
|
||||
balloon_stats.balloon_high);
|
||||
|
||||
return target;
|
||||
return target - balloon_stats.current_pages;
|
||||
}
|
||||
|
||||
static enum bp_state increase_reservation(unsigned long nr_pages)
|
||||
|
@ -246,7 +247,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
|
|||
set_phys_to_machine(pfn, frame_list[i]);
|
||||
|
||||
/* Link back into the page tables if not highmem. */
|
||||
if (!xen_hvm_domain() && pfn < max_low_pfn) {
|
||||
if (xen_pv_domain() && !PageHighMem(page)) {
|
||||
int ret;
|
||||
ret = HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
|
@ -293,7 +294,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
|
||||
scrub_page(page);
|
||||
|
||||
if (!xen_hvm_domain() && !PageHighMem(page)) {
|
||||
if (xen_pv_domain() && !PageHighMem(page)) {
|
||||
ret = HYPERVISOR_update_va_mapping(
|
||||
(unsigned long)__va(pfn << PAGE_SHIFT),
|
||||
__pte_ma(0), 0);
|
||||
|
@ -337,7 +338,7 @@ static void balloon_process(struct work_struct *work)
|
|||
mutex_lock(&balloon_mutex);
|
||||
|
||||
do {
|
||||
credit = current_target() - balloon_stats.current_pages;
|
||||
credit = current_credit();
|
||||
|
||||
if (credit > 0)
|
||||
state = increase_reservation(credit);
|
||||
|
@ -420,7 +421,7 @@ void free_xenballooned_pages(int nr_pages, struct page** pages)
|
|||
}
|
||||
|
||||
/* The balloon may be too large now. Shrink it if needed. */
|
||||
if (current_target() != balloon_stats.current_pages)
|
||||
if (current_credit())
|
||||
schedule_delayed_work(&balloon_worker, 0);
|
||||
|
||||
mutex_unlock(&balloon_mutex);
|
||||
|
@ -429,7 +430,7 @@ EXPORT_SYMBOL(free_xenballooned_pages);
|
|||
|
||||
static int __init balloon_init(void)
|
||||
{
|
||||
unsigned long pfn, nr_pages, extra_pfn_end;
|
||||
unsigned long pfn, extra_pfn_end;
|
||||
struct page *page;
|
||||
|
||||
if (!xen_domain())
|
||||
|
@ -437,11 +438,7 @@ static int __init balloon_init(void)
|
|||
|
||||
pr_info("xen/balloon: Initialising balloon driver.\n");
|
||||
|
||||
if (xen_pv_domain())
|
||||
nr_pages = xen_start_info->nr_pages;
|
||||
else
|
||||
nr_pages = max_pfn;
|
||||
balloon_stats.current_pages = min(nr_pages, max_pfn);
|
||||
balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn;
|
||||
balloon_stats.target_pages = balloon_stats.current_pages;
|
||||
balloon_stats.balloon_low = 0;
|
||||
balloon_stats.balloon_high = 0;
|
||||
|
@ -466,7 +463,7 @@ static int __init balloon_init(void)
|
|||
pfn < extra_pfn_end;
|
||||
pfn++) {
|
||||
page = pfn_to_page(pfn);
|
||||
/* totalram_pages doesn't include the boot-time
|
||||
/* totalram_pages and totalhigh_pages do not include the boot-time
|
||||
balloon extension, so don't subtract from it. */
|
||||
__balloon_append(page);
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ static struct attribute_group xen_compilation_group = {
|
|||
.attrs = xen_compile_attrs,
|
||||
};
|
||||
|
||||
int __init static xen_compilation_init(void)
|
||||
static int __init xen_compilation_init(void)
|
||||
{
|
||||
return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче