[PATCH] uml: make copy_*_user atomic
Make __copy_*_user_inatomic really atomic to avoid "Sleeping function called in atomic context" warnings, especially from futex code. This is made by adding another kmap_atomic slot and making copy_*_user_skas use kmap_atomic; also copy_*_user() becomes atomic, but that's true and is not a problem for i386 (and we can always add might_sleep there as done elsewhere). For TT mode kmap is not used, so there's no need for this. I've had to use another slot since both KM_USER0 and KM_USER1 are used elsewhere and could cause conflicts. Till now we reused the kmap_atomic slot list from the subarch, but that's not needed as that list must contain the common ones (used by generic code) + the ones used in architecture specific code (and Uml till now used none); so I've taken the i386 one after comparing it with ones from other archs, and added KM_UML_USERCOPY. Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
8ec4d41f88
Коммит
47e5243afe
|
@ -8,6 +8,7 @@
|
|||
#include "linux/kernel.h"
|
||||
#include "linux/string.h"
|
||||
#include "linux/fs.h"
|
||||
#include "linux/hardirq.h"
|
||||
#include "linux/highmem.h"
|
||||
#include "asm/page.h"
|
||||
#include "asm/pgtable.h"
|
||||
|
@ -38,7 +39,7 @@ static unsigned long maybe_map(unsigned long virt, int is_write)
|
|||
return((unsigned long) phys);
|
||||
}
|
||||
|
||||
static int do_op(unsigned long addr, int len, int is_write,
|
||||
static int do_op_one_page(unsigned long addr, int len, int is_write,
|
||||
int (*op)(unsigned long addr, int len, void *arg), void *arg)
|
||||
{
|
||||
struct page *page;
|
||||
|
@ -49,9 +50,11 @@ static int do_op(unsigned long addr, int len, int is_write,
|
|||
return(-1);
|
||||
|
||||
page = phys_to_page(addr);
|
||||
addr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
|
||||
addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + (addr & ~PAGE_MASK);
|
||||
|
||||
n = (*op)(addr, len, arg);
|
||||
kunmap(page);
|
||||
|
||||
kunmap_atomic(page, KM_UML_USERCOPY);
|
||||
|
||||
return(n);
|
||||
}
|
||||
|
@ -77,7 +80,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr)
|
|||
remain = len;
|
||||
|
||||
current->thread.fault_catcher = jmpbuf;
|
||||
n = do_op(addr, size, is_write, op, arg);
|
||||
n = do_op_one_page(addr, size, is_write, op, arg);
|
||||
if(n != 0){
|
||||
*res = (n < 0 ? remain : 0);
|
||||
goto out;
|
||||
|
@ -91,7 +94,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr)
|
|||
}
|
||||
|
||||
while(addr < ((addr + remain) & PAGE_MASK)){
|
||||
n = do_op(addr, PAGE_SIZE, is_write, op, arg);
|
||||
n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
|
||||
if(n != 0){
|
||||
*res = (n < 0 ? remain : 0);
|
||||
goto out;
|
||||
|
@ -105,7 +108,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr)
|
|||
goto out;
|
||||
}
|
||||
|
||||
n = do_op(addr, remain, is_write, op, arg);
|
||||
n = do_op_one_page(addr, remain, is_write, op, arg);
|
||||
if(n != 0)
|
||||
*res = (n < 0 ? remain : 0);
|
||||
else *res = 0;
|
||||
|
|
|
@ -6,6 +6,24 @@
|
|||
#ifndef __UM_KMAP_TYPES_H
|
||||
#define __UM_KMAP_TYPES_H
|
||||
|
||||
#include "asm/arch/kmap_types.h"
|
||||
/* No more #include "asm/arch/kmap_types.h" ! */
|
||||
|
||||
enum km_type {
|
||||
KM_BOUNCE_READ,
|
||||
KM_SKB_SUNRPC_DATA,
|
||||
KM_SKB_DATA_SOFTIRQ,
|
||||
KM_USER0,
|
||||
KM_USER1,
|
||||
KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
|
||||
KM_BIO_SRC_IRQ,
|
||||
KM_BIO_DST_IRQ,
|
||||
KM_PTE0,
|
||||
KM_PTE1,
|
||||
KM_IRQ0,
|
||||
KM_IRQ1,
|
||||
KM_SOFTIRQ0,
|
||||
KM_SOFTIRQ1,
|
||||
KM_TYPE_NR
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче