[PATCH] NOMMU: Check that access_process_vm() has a valid target
Check that access_process_vm() is accessing a valid mapping in the target process. This limits ptrace() accesses and accesses through /proc/<pid>/maps to only those regions actually mapped by a program. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
361f6ed1d0
Коммит
0ec76a110f
|
@ -241,60 +241,6 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Access another process' address space.
|
||||
* Source/target buffer must be kernel space,
|
||||
* Do not walk the page table directly, use get_user_pages
|
||||
*/
|
||||
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct page *page;
|
||||
void *old_buf = buf;
|
||||
|
||||
mm = get_task_mm(tsk);
|
||||
if (!mm)
|
||||
return 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
/* ignore errors, just check how much was sucessfully transfered */
|
||||
while (len) {
|
||||
int bytes, ret, offset;
|
||||
void *maddr;
|
||||
|
||||
ret = get_user_pages(tsk, mm, addr, 1,
|
||||
write, 1, &page, &vma);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
|
||||
bytes = len;
|
||||
offset = addr & (PAGE_SIZE-1);
|
||||
if (bytes > PAGE_SIZE-offset)
|
||||
bytes = PAGE_SIZE-offset;
|
||||
|
||||
maddr = kmap(page);
|
||||
if (write) {
|
||||
copy_to_user_page(vma, page, addr,
|
||||
maddr + offset, buf, bytes);
|
||||
set_page_dirty_lock(page);
|
||||
} else {
|
||||
copy_from_user_page(vma, page, addr,
|
||||
buf, maddr + offset, bytes);
|
||||
}
|
||||
kunmap(page);
|
||||
page_cache_release(page);
|
||||
len -= bytes;
|
||||
buf += bytes;
|
||||
addr += bytes;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
|
||||
return buf - old_buf;
|
||||
}
|
||||
|
||||
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
|
||||
{
|
||||
int copied = 0;
|
||||
|
|
53
mm/memory.c
53
mm/memory.c
|
@ -2604,3 +2604,56 @@ int in_gate_area_no_task(unsigned long addr)
|
|||
}
|
||||
|
||||
#endif /* __HAVE_ARCH_GATE_AREA */
|
||||
|
||||
/*
|
||||
* Access another process' address space.
|
||||
* Source/target buffer must be kernel space,
|
||||
* Do not walk the page table directly, use get_user_pages
|
||||
*/
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct page *page;
|
||||
void *old_buf = buf;
|
||||
|
||||
mm = get_task_mm(tsk);
|
||||
if (!mm)
|
||||
return 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
/* ignore errors, just check how much was sucessfully transfered */
|
||||
while (len) {
|
||||
int bytes, ret, offset;
|
||||
void *maddr;
|
||||
|
||||
ret = get_user_pages(tsk, mm, addr, 1,
|
||||
write, 1, &page, &vma);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
|
||||
bytes = len;
|
||||
offset = addr & (PAGE_SIZE-1);
|
||||
if (bytes > PAGE_SIZE-offset)
|
||||
bytes = PAGE_SIZE-offset;
|
||||
|
||||
maddr = kmap(page);
|
||||
if (write) {
|
||||
copy_to_user_page(vma, page, addr,
|
||||
maddr + offset, buf, bytes);
|
||||
set_page_dirty_lock(page);
|
||||
} else {
|
||||
copy_from_user_page(vma, page, addr,
|
||||
buf, maddr + offset, bytes);
|
||||
}
|
||||
kunmap(page);
|
||||
page_cache_release(page);
|
||||
len -= bytes;
|
||||
buf += bytes;
|
||||
addr += bytes;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
|
||||
return buf - old_buf;
|
||||
}
|
||||
|
|
47
mm/nommu.c
47
mm/nommu.c
|
@ -1206,3 +1206,50 @@ struct page *filemap_nopage(struct vm_area_struct *area,
|
|||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Access another process' address space.
|
||||
* - source/target buffer must be kernel space
|
||||
*/
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
|
||||
{
|
||||
struct vm_list_struct *vml;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (addr + len < addr)
|
||||
return 0;
|
||||
|
||||
mm = get_task_mm(tsk);
|
||||
if (!mm)
|
||||
return 0;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
/* the access must start within one of the target process's mappings */
|
||||
for (vml = mm->context.vmlist; vml; vml = vml->next)
|
||||
if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
|
||||
break;
|
||||
|
||||
if (vml) {
|
||||
vma = vml->vma;
|
||||
|
||||
/* don't overrun this mapping */
|
||||
if (addr + len >= vma->vm_end)
|
||||
len = vma->vm_end - addr;
|
||||
|
||||
/* only read or write mappings where it is permitted */
|
||||
if (write && vma->vm_flags & VM_WRITE)
|
||||
len -= copy_to_user((void *) addr, buf, len);
|
||||
else if (!write && vma->vm_flags & VM_READ)
|
||||
len -= copy_from_user(buf, (void *) addr, len);
|
||||
else
|
||||
len = 0;
|
||||
} else {
|
||||
len = 0;
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
return len;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче