[PATCH] execute-in-place fixes

This patch includes feedback from Andrew and Christoph. Thanks for
taking time to review.

Use of empty_zero_page was eliminated to fix compilation for architectures
that don't have it.

This patch removes setting pages up-to-date in ext2_get_xip_page and all
bug checks to verify that the page is indeed up to date.  Setting the page
state on mapping to userland is bogus.  None of the code patchs involved
with these pages in mm cares about the page state.

still on my ToDo list: identify a place outside second extended where
__inode_direct_access should reside

Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Carsten Otte 2005-07-15 03:56:30 -07:00 коммит произвёл Linus Torvalds
Родитель c5287ba132
Коммит afa597ba20
2 изменённых файлов: 55 добавлений и 49 удалений

Просмотреть файл

@ -15,66 +15,79 @@
#include "xip.h"
static inline int
__inode_direct_access(struct inode *inode, sector_t sector, unsigned long *data) {
__inode_direct_access(struct inode *inode, sector_t sector,
unsigned long *data)
{
BUG_ON(!inode->i_sb->s_bdev->bd_disk->fops->direct_access);
return inode->i_sb->s_bdev->bd_disk->fops
->direct_access(inode->i_sb->s_bdev,sector,data);
}
static inline int
__ext2_get_sector(struct inode *inode, sector_t offset, int create,
sector_t *result)
{
struct buffer_head tmp;
int rc;
memset(&tmp, 0, sizeof(struct buffer_head));
rc = ext2_get_block(inode, offset/ (PAGE_SIZE/512), &tmp,
create);
*result = tmp.b_blocknr;
/* did we get a sparse block (hole in the file)? */
if (!(*result)) {
BUG_ON(create);
rc = -ENODATA;
}
return rc;
}
int
ext2_clear_xip_target(struct inode *inode, int block) {
sector_t sector = block*(PAGE_SIZE/512);
ext2_clear_xip_target(struct inode *inode, int block)
{
sector_t sector = block * (PAGE_SIZE/512);
unsigned long data;
int rc;
rc = __inode_direct_access(inode, sector, &data);
if (rc)
return rc;
clear_page((void*)data);
return 0;
if (!rc)
clear_page((void*)data);
return rc;
}
void ext2_xip_verify_sb(struct super_block *sb)
{
struct ext2_sb_info *sbi = EXT2_SB(sb);
if ((sbi->s_mount_opt & EXT2_MOUNT_XIP)) {
if ((sb->s_bdev == NULL) ||
sb->s_bdev->bd_disk == NULL ||
sb->s_bdev->bd_disk->fops == NULL ||
sb->s_bdev->bd_disk->fops->direct_access == NULL) {
sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
ext2_warning(sb, __FUNCTION__,
"ignoring xip option - not supported by bdev");
}
if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) &&
!sb->s_bdev->bd_disk->fops->direct_access) {
sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
ext2_warning(sb, __FUNCTION__,
"ignoring xip option - not supported by bdev");
}
}
struct page*
ext2_get_xip_page(struct address_space *mapping, sector_t blockno,
struct page *
ext2_get_xip_page(struct address_space *mapping, sector_t offset,
int create)
{
int rc;
unsigned long data;
struct buffer_head tmp;
sector_t sector;
tmp.b_state = 0;
tmp.b_blocknr = 0;
rc = ext2_get_block(mapping->host, blockno/(PAGE_SIZE/512) , &tmp,
create);
/* first, retrieve the sector number */
rc = __ext2_get_sector(mapping->host, offset, create, &sector);
if (rc)
return ERR_PTR(rc);
if (tmp.b_blocknr == 0) {
/* SPARSE block */
BUG_ON(create);
return ERR_PTR(-ENODATA);
}
goto error;
/* retrieve address of the target data */
rc = __inode_direct_access
(mapping->host,tmp.b_blocknr*(PAGE_SIZE/512) ,&data);
if (rc)
return ERR_PTR(rc);
(mapping->host, sector * (PAGE_SIZE/512), &data);
if (!rc)
return virt_to_page(data);
SetPageUptodate(virt_to_page(data));
return virt_to_page(data);
error:
return ERR_PTR(rc);
}

Просмотреть файл

@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping,
if (unlikely(IS_ERR(page))) {
if (PTR_ERR(page) == -ENODATA) {
/* sparse */
page = virt_to_page(empty_zero_page);
page = ZERO_PAGE(0);
} else {
desc->error = PTR_ERR(page);
goto out;
}
} else
BUG_ON(!PageUptodate(page));
}
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping,
flush_dcache_page(page);
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
* Ok, we have the page, so now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile);
* xip_write
*
* This function walks all vmas of the address_space and unmaps the
* empty_zero_page when found at pgoff. Should it go in rmap.c?
* ZERO_PAGE when found at pgoff. Should it go in rmap.c?
*/
static void
__xip_unmap (struct address_space * mapping,
@ -187,7 +185,7 @@ __xip_unmap (struct address_space * mapping,
* We need the page_table_lock to protect us from page faults,
* munmap, fork, etc...
*/
pte = page_check_address(virt_to_page(empty_zero_page), mm,
pte = page_check_address(ZERO_PAGE(address), mm,
address);
if (!IS_ERR(pte)) {
/* Nuke the page table entry. */
@ -230,7 +228,6 @@ xip_file_nopage(struct vm_area_struct * area,
page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
if (!IS_ERR(page)) {
BUG_ON(!PageUptodate(page));
return page;
}
if (PTR_ERR(page) != -ENODATA)
@ -245,12 +242,11 @@ xip_file_nopage(struct vm_area_struct * area,
pgoff*(PAGE_SIZE/512), 1);
if (IS_ERR(page))
return NULL;
BUG_ON(!PageUptodate(page));
/* unmap page at pgoff from all other vmas */
__xip_unmap(mapping, pgoff);
} else {
/* not shared and writable, use empty_zero_page */
page = virt_to_page(empty_zero_page);
/* not shared and writable, use ZERO_PAGE() */
page = ZERO_PAGE(address);
}
return page;
@ -319,8 +315,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
break;
}
BUG_ON(!PageUptodate(page));
copied = filemap_copy_from_user(page, offset, buf, bytes);
flush_dcache_page(page);
if (likely(copied > 0)) {
@ -435,8 +429,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
return 0;
else
return PTR_ERR(page);
} else
BUG_ON(!PageUptodate(page));
}
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, length);
kunmap_atomic(kaddr, KM_USER0);