xen: fixes for 4.16 rc1
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABAgAGBQJafa95AAoJELDendYovxMvz8oH+QGgM0ZN0YfJa2JuR/RAlYyn 3OfSlMbGOjlmT74C1Rcx+SHNbbxUunoAx70UnvLbgtzbYyX08yVNsfNFesrarAkr dkBBsAdLLzZYOoKnSKHWGl8Cf26F5eJbLo1FSSNYzmCaz0oD+geOqIWnOQMHkuUW Rv6En9SjgzrE6dvzQ/LNtpjqFnSwJ+cD8ZkI21YXyDmZ3/xvZ9h8ID5vrzlP4wVH gENxAMxn9w0nlbtHLvc2KGbVOUTSsA1LxbjDqzBEIGqgKmZVdt6d1J0KfO3eM6ej 9JuPcRt34HFPifuwI6xgtcKJjEr7QptIiDiSVvifXMvQnqfGc2b+qOFLhY6XwOU= =+zOt -----END PGP SIGNATURE----- Merge tag 'for-linus-4.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen fixes from Juergen Gross: "Only five small fixes for issues when running under Xen" * tag 'for-linus-4.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen: Fix {set,clear}_foreign_p2m_mapping on autotranslating guests pvcalls-back: do not return error on inet_accept EAGAIN xen-netfront: Fix race between device setup and open xen/grant-table: Use put_page instead of free_page x86/xen: init %gs very early to avoid page faults with stack protector
This commit is contained in:
Коммит
f9f1e41412
|
@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (kmap_ops) {
|
if (kmap_ops) {
|
||||||
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
|
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
|
||||||
kmap_ops, count);
|
kmap_ops, count);
|
||||||
|
@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
|
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
|
||||||
unsigned long pfn = page_to_pfn(pages[i]);
|
unsigned long pfn = page_to_pfn(pages[i]);
|
||||||
|
|
|
@ -9,7 +9,9 @@
|
||||||
|
|
||||||
#include <asm/boot.h>
|
#include <asm/boot.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
#include <asm/msr.h>
|
||||||
#include <asm/page_types.h>
|
#include <asm/page_types.h>
|
||||||
|
#include <asm/percpu.h>
|
||||||
#include <asm/unwind_hints.h>
|
#include <asm/unwind_hints.h>
|
||||||
|
|
||||||
#include <xen/interface/elfnote.h>
|
#include <xen/interface/elfnote.h>
|
||||||
|
@ -35,6 +37,20 @@ ENTRY(startup_xen)
|
||||||
mov %_ASM_SI, xen_start_info
|
mov %_ASM_SI, xen_start_info
|
||||||
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
|
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
/* Set up %gs.
|
||||||
|
*
|
||||||
|
* The base of %gs always points to the bottom of the irqstack
|
||||||
|
* union. If the stack protector canary is enabled, it is
|
||||||
|
* located at %gs:40. Note that, on SMP, the boot cpu uses
|
||||||
|
* init data section till per cpu areas are set up.
|
||||||
|
*/
|
||||||
|
movl $MSR_GS_BASE,%ecx
|
||||||
|
movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
|
||||||
|
cdq
|
||||||
|
wrmsr
|
||||||
|
#endif
|
||||||
|
|
||||||
jmp xen_start_kernel
|
jmp xen_start_kernel
|
||||||
END(startup_xen)
|
END(startup_xen)
|
||||||
__FINIT
|
__FINIT
|
||||||
|
|
|
@ -351,6 +351,9 @@ static int xennet_open(struct net_device *dev)
|
||||||
unsigned int i = 0;
|
unsigned int i = 0;
|
||||||
struct netfront_queue *queue = NULL;
|
struct netfront_queue *queue = NULL;
|
||||||
|
|
||||||
|
if (!np->queues)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
for (i = 0; i < num_queues; ++i) {
|
for (i = 0; i < num_queues; ++i) {
|
||||||
queue = &np->queues[i];
|
queue = &np->queues[i];
|
||||||
napi_enable(&queue->napi);
|
napi_enable(&queue->napi);
|
||||||
|
@ -1358,18 +1361,8 @@ static int netfront_probe(struct xenbus_device *dev,
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
info->netdev->sysfs_groups[0] = &xennet_dev_group;
|
info->netdev->sysfs_groups[0] = &xennet_dev_group;
|
||||||
#endif
|
#endif
|
||||||
err = register_netdev(info->netdev);
|
|
||||||
if (err) {
|
|
||||||
pr_warn("%s: register_netdev err=%d\n", __func__, err);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
|
||||||
xennet_free_netdev(netdev);
|
|
||||||
dev_set_drvdata(&dev->dev, NULL);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xennet_end_access(int ref, void *page)
|
static void xennet_end_access(int ref, void *page)
|
||||||
|
@ -1737,8 +1730,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
|
|
||||||
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
|
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
|
||||||
struct netfront_queue *queue = &info->queues[i];
|
struct netfront_queue *queue = &info->queues[i];
|
||||||
|
|
||||||
|
@ -1747,8 +1738,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
|
||||||
netif_napi_del(&queue->napi);
|
netif_napi_del(&queue->napi);
|
||||||
}
|
}
|
||||||
|
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
kfree(info->queues);
|
kfree(info->queues);
|
||||||
info->queues = NULL;
|
info->queues = NULL;
|
||||||
}
|
}
|
||||||
|
@ -1764,8 +1753,6 @@ static int xennet_create_queues(struct netfront_info *info,
|
||||||
if (!info->queues)
|
if (!info->queues)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
|
|
||||||
for (i = 0; i < *num_queues; i++) {
|
for (i = 0; i < *num_queues; i++) {
|
||||||
struct netfront_queue *queue = &info->queues[i];
|
struct netfront_queue *queue = &info->queues[i];
|
||||||
|
|
||||||
|
@ -1774,7 +1761,7 @@ static int xennet_create_queues(struct netfront_info *info,
|
||||||
|
|
||||||
ret = xennet_init_queue(queue);
|
ret = xennet_init_queue(queue);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_warn(&info->netdev->dev,
|
dev_warn(&info->xbdev->dev,
|
||||||
"only created %d queues\n", i);
|
"only created %d queues\n", i);
|
||||||
*num_queues = i;
|
*num_queues = i;
|
||||||
break;
|
break;
|
||||||
|
@ -1788,10 +1775,8 @@ static int xennet_create_queues(struct netfront_info *info,
|
||||||
|
|
||||||
netif_set_real_num_tx_queues(info->netdev, *num_queues);
|
netif_set_real_num_tx_queues(info->netdev, *num_queues);
|
||||||
|
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
if (*num_queues == 0) {
|
if (*num_queues == 0) {
|
||||||
dev_err(&info->netdev->dev, "no queues\n");
|
dev_err(&info->xbdev->dev, "no queues\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1828,6 +1813,7 @@ static int talk_to_netback(struct xenbus_device *dev,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
if (info->queues)
|
if (info->queues)
|
||||||
xennet_destroy_queues(info);
|
xennet_destroy_queues(info);
|
||||||
|
|
||||||
|
@ -1838,6 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev,
|
||||||
info->queues = NULL;
|
info->queues = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
/* Create shared ring, alloc event channel -- for each queue */
|
/* Create shared ring, alloc event channel -- for each queue */
|
||||||
for (i = 0; i < num_queues; ++i) {
|
for (i = 0; i < num_queues; ++i) {
|
||||||
|
@ -1934,8 +1921,10 @@ abort_transaction_no_dev_fatal:
|
||||||
xenbus_transaction_end(xbt, 1);
|
xenbus_transaction_end(xbt, 1);
|
||||||
destroy_ring:
|
destroy_ring:
|
||||||
xennet_disconnect_backend(info);
|
xennet_disconnect_backend(info);
|
||||||
|
rtnl_lock();
|
||||||
xennet_destroy_queues(info);
|
xennet_destroy_queues(info);
|
||||||
out:
|
out:
|
||||||
|
rtnl_unlock();
|
||||||
device_unregister(&dev->dev);
|
device_unregister(&dev->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1965,6 +1954,15 @@ static int xennet_connect(struct net_device *dev)
|
||||||
netdev_update_features(dev);
|
netdev_update_features(dev);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
|
if (dev->reg_state == NETREG_UNINITIALIZED) {
|
||||||
|
err = register_netdev(dev);
|
||||||
|
if (err) {
|
||||||
|
pr_warn("%s: register_netdev err=%d\n", __func__, err);
|
||||||
|
device_unregister(&np->xbdev->dev);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All public and private state should now be sane. Get
|
* All public and private state should now be sane. Get
|
||||||
* ready to start sending and receiving packets and give the driver
|
* ready to start sending and receiving packets and give the driver
|
||||||
|
@ -2150,10 +2148,14 @@ static int xennet_remove(struct xenbus_device *dev)
|
||||||
|
|
||||||
xennet_disconnect_backend(info);
|
xennet_disconnect_backend(info);
|
||||||
|
|
||||||
unregister_netdev(info->netdev);
|
if (info->netdev->reg_state == NETREG_REGISTERED)
|
||||||
|
unregister_netdev(info->netdev);
|
||||||
|
|
||||||
if (info->queues)
|
if (info->queues) {
|
||||||
|
rtnl_lock();
|
||||||
xennet_destroy_queues(info);
|
xennet_destroy_queues(info);
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
xennet_free_netdev(info->netdev);
|
xennet_free_netdev(info->netdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -382,7 +382,7 @@ static void gnttab_handle_deferred(struct timer_list *unused)
|
||||||
if (entry->page) {
|
if (entry->page) {
|
||||||
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
|
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
|
||||||
entry->ref, page_to_pfn(entry->page));
|
entry->ref, page_to_pfn(entry->page));
|
||||||
__free_page(entry->page);
|
put_page(entry->page);
|
||||||
} else
|
} else
|
||||||
pr_info("freeing g.e. %#x\n", entry->ref);
|
pr_info("freeing g.e. %#x\n", entry->ref);
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
|
@ -438,7 +438,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
|
||||||
if (gnttab_end_foreign_access_ref(ref, readonly)) {
|
if (gnttab_end_foreign_access_ref(ref, readonly)) {
|
||||||
put_free_entry(ref);
|
put_free_entry(ref);
|
||||||
if (page != 0)
|
if (page != 0)
|
||||||
free_page(page);
|
put_page(virt_to_page(page));
|
||||||
} else
|
} else
|
||||||
gnttab_add_deferred(ref, readonly,
|
gnttab_add_deferred(ref, readonly,
|
||||||
page ? virt_to_page(page) : NULL);
|
page ? virt_to_page(page) : NULL);
|
||||||
|
|
|
@ -548,7 +548,7 @@ static void __pvcalls_back_accept(struct work_struct *work)
|
||||||
ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
|
ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
sock_release(sock);
|
sock_release(sock);
|
||||||
goto out_error;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
map = pvcalls_new_active_socket(fedata,
|
map = pvcalls_new_active_socket(fedata,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче