mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c05c2ec96b
Коммит
09cbfeaf1a
|
@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
|
||||||
|
|
||||||
/* kernel reading from page with U-mapping */
|
/* kernel reading from page with U-mapping */
|
||||||
phys_addr_t paddr = (unsigned long)page_address(page);
|
phys_addr_t paddr = (unsigned long)page_address(page);
|
||||||
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
|
unsigned long vaddr = page->index << PAGE_SHIFT;
|
||||||
|
|
||||||
if (addr_not_cache_congruent(paddr, vaddr))
|
if (addr_not_cache_congruent(paddr, vaddr))
|
||||||
__flush_dcache_page(paddr, vaddr);
|
__flush_dcache_page(paddr, vaddr);
|
||||||
|
|
|
@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
|
||||||
*/
|
*/
|
||||||
if (mapping && cache_is_vipt_aliasing())
|
if (mapping && cache_is_vipt_aliasing())
|
||||||
flush_pfn_alias(page_to_pfn(page),
|
flush_pfn_alias(page_to_pfn(page),
|
||||||
page->index << PAGE_CACHE_SHIFT);
|
page->index << PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
|
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
|
||||||
|
@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
|
||||||
* data in the current VM view associated with this page.
|
* data in the current VM view associated with this page.
|
||||||
* - aliasing VIPT: we only need to find one mapping of this page.
|
* - aliasing VIPT: we only need to find one mapping of this page.
|
||||||
*/
|
*/
|
||||||
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
pgoff = page->index;
|
||||||
|
|
||||||
flush_dcache_mmap_lock(mapping);
|
flush_dcache_mmap_lock(mapping);
|
||||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||||
|
|
|
@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
pgoff = page->index;
|
||||||
|
|
||||||
/* We have carefully arranged in arch_get_unmapped_area() that
|
/* We have carefully arranged in arch_get_unmapped_area() that
|
||||||
* *any* mappings of a file are always congruently mapped (whether
|
* *any* mappings of a file are always congruently mapped (whether
|
||||||
|
|
|
@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
sb->s_magic = SPUFS_MAGIC;
|
sb->s_magic = SPUFS_MAGIC;
|
||||||
sb->s_op = &s_ops;
|
sb->s_op = &s_ops;
|
||||||
sb->s_fs_info = info;
|
sb->s_fs_info = info;
|
||||||
|
|
|
@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
sbi->uid = current_uid();
|
sbi->uid = current_uid();
|
||||||
sbi->gid = current_gid();
|
sbi->gid = current_gid();
|
||||||
sb->s_fs_info = sbi;
|
sb->s_fs_info = sbi;
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
sb->s_magic = HYPFS_MAGIC;
|
sb->s_magic = HYPFS_MAGIC;
|
||||||
sb->s_op = &hypfs_s_ops;
|
sb->s_op = &hypfs_s_ops;
|
||||||
if (hypfs_parse_options(data, sb))
|
if (hypfs_parse_options(data, sb))
|
||||||
|
|
|
@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
* release the pages we didn't map into the bio, if any
|
* release the pages we didn't map into the bio, if any
|
||||||
*/
|
*/
|
||||||
while (j < page_limit)
|
while (j < page_limit)
|
||||||
page_cache_release(pages[j++]);
|
put_page(pages[j++]);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
|
@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
for (j = 0; j < nr_pages; j++) {
|
for (j = 0; j < nr_pages; j++) {
|
||||||
if (!pages[j])
|
if (!pages[j])
|
||||||
break;
|
break;
|
||||||
page_cache_release(pages[j]);
|
put_page(pages[j]);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
|
@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
|
||||||
if (bio_data_dir(bio) == READ)
|
if (bio_data_dir(bio) == READ)
|
||||||
set_page_dirty_lock(bvec->bv_page);
|
set_page_dirty_lock(bvec->bv_page);
|
||||||
|
|
||||||
page_cache_release(bvec->bv_page);
|
put_page(bvec->bv_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
|
||||||
struct page *page = bvec->bv_page;
|
struct page *page = bvec->bv_page;
|
||||||
|
|
||||||
if (PageDirty(page) || PageCompound(page)) {
|
if (PageDirty(page) || PageCompound(page)) {
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
bvec->bv_page = NULL;
|
bvec->bv_page = NULL;
|
||||||
} else {
|
} else {
|
||||||
nr_clean_pages++;
|
nr_clean_pages++;
|
||||||
|
|
|
@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
goto fail_id;
|
goto fail_id;
|
||||||
|
|
||||||
q->backing_dev_info.ra_pages =
|
q->backing_dev_info.ra_pages =
|
||||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
||||||
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||||
q->backing_dev_info.name = "block";
|
q->backing_dev_info.name = "block";
|
||||||
q->node = node_id;
|
q->node = node_id;
|
||||||
|
|
|
@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
|
||||||
struct queue_limits *limits = &q->limits;
|
struct queue_limits *limits = &q->limits;
|
||||||
unsigned int max_sectors;
|
unsigned int max_sectors;
|
||||||
|
|
||||||
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
if ((max_hw_sectors << 9) < PAGE_SIZE) {
|
||||||
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
|
||||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||||
__func__, max_hw_sectors);
|
__func__, max_hw_sectors);
|
||||||
}
|
}
|
||||||
|
@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
|
||||||
**/
|
**/
|
||||||
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
|
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
|
||||||
{
|
{
|
||||||
if (max_size < PAGE_CACHE_SIZE) {
|
if (max_size < PAGE_SIZE) {
|
||||||
max_size = PAGE_CACHE_SIZE;
|
max_size = PAGE_SIZE;
|
||||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||||
__func__, max_size);
|
__func__, max_size);
|
||||||
}
|
}
|
||||||
|
@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
|
||||||
**/
|
**/
|
||||||
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
|
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
|
||||||
{
|
{
|
||||||
if (mask < PAGE_CACHE_SIZE - 1) {
|
if (mask < PAGE_SIZE - 1) {
|
||||||
mask = PAGE_CACHE_SIZE - 1;
|
mask = PAGE_SIZE - 1;
|
||||||
printk(KERN_INFO "%s: set to minimum %lx\n",
|
printk(KERN_INFO "%s: set to minimum %lx\n",
|
||||||
__func__, mask);
|
__func__, mask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
||||||
static ssize_t queue_ra_show(struct request_queue *q, char *page)
|
static ssize_t queue_ra_show(struct request_queue *q, char *page)
|
||||||
{
|
{
|
||||||
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
|
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
|
||||||
(PAGE_CACHE_SHIFT - 10);
|
(PAGE_SHIFT - 10);
|
||||||
|
|
||||||
return queue_var_show(ra_kb, (page));
|
return queue_var_show(ra_kb, (page));
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
||||||
if (blk_queue_cluster(q))
|
if (blk_queue_cluster(q))
|
||||||
return queue_var_show(queue_max_segment_size(q), (page));
|
return queue_var_show(queue_max_segment_size(q), (page));
|
||||||
|
|
||||||
return queue_var_show(PAGE_CACHE_SIZE, (page));
|
return queue_var_show(PAGE_SIZE, (page));
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
|
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
|
||||||
|
@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||||
{
|
{
|
||||||
unsigned long max_sectors_kb,
|
unsigned long max_sectors_kb,
|
||||||
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
|
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
|
||||||
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
|
page_kb = 1 << (PAGE_SHIFT - 10);
|
||||||
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
|
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|
|
@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
* idle timer unplug to continue working.
|
* idle timer unplug to continue working.
|
||||||
*/
|
*/
|
||||||
if (cfq_cfqq_wait_request(cfqq)) {
|
if (cfq_cfqq_wait_request(cfqq)) {
|
||||||
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
|
if (blk_rq_bytes(rq) > PAGE_SIZE ||
|
||||||
cfqd->busy_queues > 1) {
|
cfqd->busy_queues > 1) {
|
||||||
cfq_del_timer(cfqd, cfqq);
|
cfq_del_timer(cfqd, cfqq);
|
||||||
cfq_clear_cfqq_wait_request(cfqq);
|
cfq_clear_cfqq_wait_request(cfqq);
|
||||||
|
|
|
@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
bdi = blk_get_backing_dev_info(bdev);
|
bdi = blk_get_backing_dev_info(bdev);
|
||||||
return compat_put_long(arg,
|
return compat_put_long(arg,
|
||||||
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
|
(bdi->ra_pages * PAGE_SIZE) / 512);
|
||||||
case BLKROGET: /* compatible */
|
case BLKROGET: /* compatible */
|
||||||
return compat_put_int(arg, bdev_read_only(bdev) != 0);
|
return compat_put_int(arg, bdev_read_only(bdev) != 0);
|
||||||
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
|
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
|
||||||
|
@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
bdi = blk_get_backing_dev_info(bdev);
|
bdi = blk_get_backing_dev_info(bdev);
|
||||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||||
return 0;
|
return 0;
|
||||||
case BLKGETSIZE:
|
case BLKGETSIZE:
|
||||||
size = i_size_read(bdev->bd_inode);
|
size = i_size_read(bdev->bd_inode);
|
||||||
|
|
|
@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||||
if (!arg)
|
if (!arg)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
bdi = blk_get_backing_dev_info(bdev);
|
bdi = blk_get_backing_dev_info(bdev);
|
||||||
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
|
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
|
||||||
case BLKROGET:
|
case BLKROGET:
|
||||||
return put_int(arg, bdev_read_only(bdev) != 0);
|
return put_int(arg, bdev_read_only(bdev) != 0);
|
||||||
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
|
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
|
||||||
|
@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||||
if(!capable(CAP_SYS_ADMIN))
|
if(!capable(CAP_SYS_ADMIN))
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
bdi = blk_get_backing_dev_info(bdev);
|
bdi = blk_get_backing_dev_info(bdev);
|
||||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||||
return 0;
|
return 0;
|
||||||
case BLKBSZSET:
|
case BLKBSZSET:
|
||||||
return blkdev_bszset(bdev, mode, argp);
|
return blkdev_bszset(bdev, mode, argp);
|
||||||
|
|
|
@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||||
|
|
||||||
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
|
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
|
||||||
NULL);
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||||
|
@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
goto fail;
|
goto fail;
|
||||||
p->v = page;
|
p->v = page;
|
||||||
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
|
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
|
||||||
fail:
|
fail:
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
p->v = NULL;
|
p->v = NULL;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
|
||||||
WARN_ON(d->flags & DEVFL_UP);
|
WARN_ON(d->flags & DEVFL_UP);
|
||||||
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
|
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
|
||||||
q->backing_dev_info.name = "aoe";
|
q->backing_dev_info.name = "aoe";
|
||||||
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
|
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
|
||||||
d->bufpool = mp;
|
d->bufpool = mp;
|
||||||
d->blkq = gd->queue = q;
|
d->blkq = gd->queue = q;
|
||||||
q->queuedata = d;
|
q->queuedata = d;
|
||||||
|
|
|
@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
|
||||||
struct page *page, int rw)
|
struct page *page, int rw)
|
||||||
{
|
{
|
||||||
struct brd_device *brd = bdev->bd_disk->private_data;
|
struct brd_device *brd = bdev->bd_disk->private_data;
|
||||||
int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
|
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
|
||||||
page_endio(page, rw & WRITE, err);
|
page_endio(page, rw & WRITE, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
|
||||||
blk_queue_max_hw_sectors(q, max_hw_sectors);
|
blk_queue_max_hw_sectors(q, max_hw_sectors);
|
||||||
/* This is the workaround for "bio would need to, but cannot, be split" */
|
/* This is the workaround for "bio would need to, but cannot, be split" */
|
||||||
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
|
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
|
||||||
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
|
blk_queue_segment_boundary(q, PAGE_SIZE-1);
|
||||||
|
|
||||||
if (b) {
|
if (b) {
|
||||||
struct drbd_connection *connection = first_peer_device(device)->connection;
|
struct drbd_connection *connection = first_peer_device(device)->connection;
|
||||||
|
|
|
@ -616,7 +616,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
|
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_free_table(ttm->sg);
|
sg_free_table(ttm->sg);
|
||||||
|
|
|
@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
||||||
|
|
||||||
release:
|
release:
|
||||||
for_each_sg(sgt->sgl, sg, num, i)
|
for_each_sg(sgt->sgl, sg, num, i)
|
||||||
page_cache_release(sg_page(sg));
|
put_page(sg_page(sg));
|
||||||
free_table:
|
free_table:
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
free_sgt:
|
free_sgt:
|
||||||
|
@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||||
if (dobj->obj.filp) {
|
if (dobj->obj.filp) {
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||||
page_cache_release(sg_page(sg));
|
put_page(sg_page(sg));
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
|
|
|
@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
while (i--)
|
while (i--)
|
||||||
page_cache_release(pages[i]);
|
put_page(pages[i]);
|
||||||
|
|
||||||
drm_free_large(pages);
|
drm_free_large(pages);
|
||||||
return ERR_CAST(p);
|
return ERR_CAST(p);
|
||||||
|
@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
|
||||||
mark_page_accessed(pages[i]);
|
mark_page_accessed(pages[i]);
|
||||||
|
|
||||||
/* Undo the reference we took when populating the table */
|
/* Undo the reference we took when populating the table */
|
||||||
page_cache_release(pages[i]);
|
put_page(pages[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free_large(pages);
|
drm_free_large(pages);
|
||||||
|
|
|
@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||||
kunmap_atomic(src);
|
kunmap_atomic(src);
|
||||||
|
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
vaddr += PAGE_SIZE;
|
vaddr += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
if (obj->madv == I915_MADV_WILLNEED)
|
if (obj->madv == I915_MADV_WILLNEED)
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
vaddr += PAGE_SIZE;
|
vaddr += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
obj->dirty = 0;
|
obj->dirty = 0;
|
||||||
|
@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||||
if (obj->madv == I915_MADV_WILLNEED)
|
if (obj->madv == I915_MADV_WILLNEED)
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
|
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
obj->dirty = 0;
|
obj->dirty = 0;
|
||||||
|
|
||||||
|
@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||||
err_pages:
|
err_pages:
|
||||||
sg_mark_end(sg);
|
sg_mark_end(sg);
|
||||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
||||||
page_cache_release(sg_page_iter_page(&sg_iter));
|
put_page(sg_page_iter_page(&sg_iter));
|
||||||
sg_free_table(st);
|
sg_free_table(st);
|
||||||
kfree(st);
|
kfree(st);
|
||||||
|
|
||||||
|
|
|
@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
|
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
obj->dirty = 0;
|
obj->dirty = 0;
|
||||||
|
|
||||||
|
|
|
@ -609,7 +609,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
|
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_free_table(ttm->sg);
|
sg_free_table(ttm->sg);
|
||||||
|
|
|
@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
copy_highpage(to_page, from_page);
|
copy_highpage(to_page, from_page);
|
||||||
page_cache_release(from_page);
|
put_page(from_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
|
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
|
||||||
|
@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
||||||
copy_highpage(to_page, from_page);
|
copy_highpage(to_page, from_page);
|
||||||
set_page_dirty(to_page);
|
set_page_dirty(to_page);
|
||||||
mark_page_accessed(to_page);
|
mark_page_accessed(to_page);
|
||||||
page_cache_release(to_page);
|
put_page(to_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm_tt_unpopulate(ttm);
|
ttm_tt_unpopulate(ttm);
|
||||||
|
|
|
@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
|
||||||
if (NULL != (page = vsg->pages[i])) {
|
if (NULL != (page = vsg->pages[i])) {
|
||||||
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
|
||||||
SetPageDirty(page);
|
SetPageDirty(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case dr_via_pages_alloc:
|
case dr_via_pages_alloc:
|
||||||
|
|
|
@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
|
||||||
{
|
{
|
||||||
ClearPagePrivate(page);
|
ClearPagePrivate(page);
|
||||||
set_page_private(page, 0);
|
set_page_private(page, 0);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
static void free_buffers(struct page *page)
|
static void free_buffers(struct page *page)
|
||||||
{
|
{
|
||||||
|
|
|
@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
|
||||||
|
|
||||||
if (dma->pages) {
|
if (dma->pages) {
|
||||||
for (i = 0; i < dma->nr_pages; i++)
|
for (i = 0; i < dma->nr_pages; i++)
|
||||||
page_cache_release(dma->pages[i]);
|
put_page(dma->pages[i]);
|
||||||
kfree(dma->pages);
|
kfree(dma->pages);
|
||||||
dma->pages = NULL;
|
dma->pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
|
||||||
{
|
{
|
||||||
struct inode *root;
|
struct inode *root;
|
||||||
|
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
sb->s_magic = IBMASMFS_MAGIC;
|
sb->s_magic = IBMASMFS_MAGIC;
|
||||||
sb->s_op = &ibmasmfs_s_ops;
|
sb->s_op = &ibmasmfs_s_ops;
|
||||||
sb->s_time_gran = 1;
|
sb->s_time_gran = 1;
|
||||||
|
|
|
@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
|
||||||
if (dirty)
|
if (dirty)
|
||||||
set_page_dirty(pages[i]);
|
set_page_dirty(pages[i]);
|
||||||
|
|
||||||
page_cache_release(pages[i]);
|
put_page(pages[i]);
|
||||||
pages[i] = NULL;
|
pages[i] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
||||||
* They have to set these according to their abilities.
|
* They have to set these according to their abilities.
|
||||||
*/
|
*/
|
||||||
host->max_segs = 1;
|
host->max_segs = 1;
|
||||||
host->max_seg_size = PAGE_CACHE_SIZE;
|
host->max_seg_size = PAGE_SIZE;
|
||||||
|
|
||||||
host->max_req_size = PAGE_CACHE_SIZE;
|
host->max_req_size = PAGE_SIZE;
|
||||||
host->max_blk_size = 512;
|
host->max_blk_size = 512;
|
||||||
host->max_blk_count = PAGE_CACHE_SIZE / 512;
|
host->max_blk_count = PAGE_SIZE / 512;
|
||||||
|
|
||||||
return host;
|
return host;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
|
||||||
mmc->caps |= pd->caps;
|
mmc->caps |= pd->caps;
|
||||||
mmc->max_segs = 32;
|
mmc->max_segs = 32;
|
||||||
mmc->max_blk_size = 512;
|
mmc->max_blk_size = 512;
|
||||||
mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
|
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
|
||||||
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
|
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
|
||||||
mmc->max_seg_size = mmc->max_req_size;
|
mmc->max_seg_size = mmc->max_req_size;
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
|
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
|
||||||
(align & PAGE_MASK))) || !multiple) {
|
(align & PAGE_MASK))) || !multiple) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto pio;
|
goto pio;
|
||||||
|
@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
|
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
|
||||||
(align & PAGE_MASK))) || !multiple) {
|
(align & PAGE_MASK))) || !multiple) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto pio;
|
goto pio;
|
||||||
|
|
|
@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
|
||||||
mmc->caps2 |= pdata->capabilities2;
|
mmc->caps2 |= pdata->capabilities2;
|
||||||
mmc->max_segs = 32;
|
mmc->max_segs = 32;
|
||||||
mmc->max_blk_size = 512;
|
mmc->max_blk_size = 512;
|
||||||
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
|
mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
|
||||||
mmc->max_segs;
|
mmc->max_segs;
|
||||||
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
|
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
|
||||||
mmc->max_seg_size = mmc->max_req_size;
|
mmc->max_seg_size = mmc->max_req_size;
|
||||||
|
|
|
@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
|
||||||
/* Set .max_segs to some random number. Feel free to adjust. */
|
/* Set .max_segs to some random number. Feel free to adjust. */
|
||||||
mmc->max_segs = 32;
|
mmc->max_segs = 32;
|
||||||
mmc->max_blk_size = 512;
|
mmc->max_blk_size = 512;
|
||||||
mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
|
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
|
||||||
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
|
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
|
||||||
/*
|
/*
|
||||||
* Setting .max_seg_size to 1 page would simplify our page-mapping code,
|
* Setting .max_seg_size to 1 page would simplify our page-mapping code,
|
||||||
|
|
|
@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
pages--;
|
pages--;
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
|
||||||
return PTR_ERR(page);
|
return PTR_ERR(page);
|
||||||
|
|
||||||
memcpy(buf, page_address(page) + offset, cpylen);
|
memcpy(buf, page_address(page) + offset, cpylen);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
|
|
||||||
if (retlen)
|
if (retlen)
|
||||||
*retlen += cpylen;
|
*retlen += cpylen;
|
||||||
|
@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
balance_dirty_pages_ratelimited(mapping);
|
balance_dirty_pages_ratelimited(mapping);
|
||||||
}
|
}
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
|
|
||||||
if (retlen)
|
if (retlen)
|
||||||
*retlen += cpylen;
|
*retlen += cpylen;
|
||||||
|
|
|
@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ns->held_cnt; i++)
|
for (i = 0; i < ns->held_cnt; i++)
|
||||||
page_cache_release(ns->held_pages[i]);
|
put_page(ns->held_pages[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get page cache pages in advance to provide NOFS memory allocation */
|
/* Get page cache pages in advance to provide NOFS memory allocation */
|
||||||
|
@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct address_space *mapping = file->f_mapping;
|
struct address_space *mapping = file->f_mapping;
|
||||||
|
|
||||||
start_index = pos >> PAGE_CACHE_SHIFT;
|
start_index = pos >> PAGE_SHIFT;
|
||||||
end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
|
end_index = (pos + count - 1) >> PAGE_SHIFT;
|
||||||
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
|
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ns->held_cnt = 0;
|
ns->held_cnt = 0;
|
||||||
|
|
|
@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
|
||||||
{
|
{
|
||||||
struct btt *btt = bdev->bd_disk->private_data;
|
struct btt *btt = bdev->bd_disk->private_data;
|
||||||
|
|
||||||
btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
|
btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
|
||||||
page_endio(page, rw & WRITE, 0);
|
page_endio(page, rw & WRITE, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,7 +151,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||||
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
|
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
|
||||||
if (rw & WRITE)
|
if (rw & WRITE)
|
||||||
wmb_pmem();
|
wmb_pmem();
|
||||||
|
|
||||||
|
|
|
@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
{
|
{
|
||||||
struct inode *root_inode;
|
struct inode *root_inode;
|
||||||
|
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
sb->s_magic = OPROFILEFS_MAGIC;
|
sb->s_magic = OPROFILEFS_MAGIC;
|
||||||
sb->s_op = &s_ops;
|
sb->s_op = &s_ops;
|
||||||
sb->s_time_gran = 1;
|
sb->s_time_gran = 1;
|
||||||
|
|
|
@ -2891,7 +2891,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||||
if (sdkp->opt_xfer_blocks &&
|
if (sdkp->opt_xfer_blocks &&
|
||||||
sdkp->opt_xfer_blocks <= dev_max &&
|
sdkp->opt_xfer_blocks <= dev_max &&
|
||||||
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
|
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
|
||||||
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
|
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
|
||||||
rw_max = q->limits.io_opt =
|
rw_max = q->limits.io_opt =
|
||||||
sdkp->opt_xfer_blocks * sdp->sector_size;
|
sdkp->opt_xfer_blocks * sdp->sector_size;
|
||||||
else
|
else
|
||||||
|
|
|
@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
||||||
out_unmap:
|
out_unmap:
|
||||||
if (res > 0) {
|
if (res > 0) {
|
||||||
for (j=0; j < res; j++)
|
for (j=0; j < res; j++)
|
||||||
page_cache_release(pages[j]);
|
put_page(pages[j]);
|
||||||
res = 0;
|
res = 0;
|
||||||
}
|
}
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
|
@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
|
||||||
/* FIXME: cache flush missing for rw==READ
|
/* FIXME: cache flush missing for rw==READ
|
||||||
* FIXME: call the correct reference counting function
|
* FIXME: call the correct reference counting function
|
||||||
*/
|
*/
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
kfree(STbp->mapped_pages);
|
kfree(STbp->mapped_pages);
|
||||||
STbp->mapped_pages = NULL;
|
STbp->mapped_pages = NULL;
|
||||||
|
|
|
@ -88,7 +88,7 @@ do { \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifndef LIBCFS_VMALLOC_SIZE
|
#ifndef LIBCFS_VMALLOC_SIZE
|
||||||
#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
|
#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define LIBCFS_ALLOC_PRE(size, mask) \
|
#define LIBCFS_ALLOC_PRE(size, mask) \
|
||||||
|
|
|
@ -57,7 +57,7 @@
|
||||||
#include "../libcfs_cpu.h"
|
#include "../libcfs_cpu.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
|
#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
|
||||||
#define page_index(p) ((p)->index)
|
#define page_index(p) ((p)->index)
|
||||||
|
|
||||||
#define memory_pressure_get() (current->flags & PF_MEMALLOC)
|
#define memory_pressure_get() (current->flags & PF_MEMALLOC)
|
||||||
|
@ -67,7 +67,7 @@
|
||||||
#if BITS_PER_LONG == 32
|
#if BITS_PER_LONG == 32
|
||||||
/* limit to lowmem on 32-bit systems */
|
/* limit to lowmem on 32-bit systems */
|
||||||
#define NUM_CACHEPAGES \
|
#define NUM_CACHEPAGES \
|
||||||
min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
|
min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
|
||||||
#else
|
#else
|
||||||
#define NUM_CACHEPAGES totalram_pages
|
#define NUM_CACHEPAGES totalram_pages
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
|
||||||
|
|
||||||
for (nob = i = 0; i < niov; i++) {
|
for (nob = i = 0; i < niov; i++) {
|
||||||
if ((kiov[i].kiov_offset && i > 0) ||
|
if ((kiov[i].kiov_offset && i > 0) ||
|
||||||
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
|
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pages[i] = kiov[i].kiov_page;
|
pages[i] = kiov[i].kiov_page;
|
||||||
|
|
|
@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
|
||||||
max = TCD_MAX_PAGES;
|
max = TCD_MAX_PAGES;
|
||||||
} else {
|
} else {
|
||||||
max = max / num_possible_cpus();
|
max = max / num_possible_cpus();
|
||||||
max <<= (20 - PAGE_CACHE_SHIFT);
|
max <<= (20 - PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
rc = cfs_tracefile_init(max);
|
rc = cfs_tracefile_init(max);
|
||||||
|
|
||||||
|
|
|
@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
|
||||||
if (tcd->tcd_cur_pages > 0) {
|
if (tcd->tcd_cur_pages > 0) {
|
||||||
__LASSERT(!list_empty(&tcd->tcd_pages));
|
__LASSERT(!list_empty(&tcd->tcd_pages));
|
||||||
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
|
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
|
||||||
if (tage->used + len <= PAGE_CACHE_SIZE)
|
if (tage->used + len <= PAGE_SIZE)
|
||||||
return tage;
|
return tage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
|
||||||
* from here: this will lead to infinite recursion.
|
* from here: this will lead to infinite recursion.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (len > PAGE_CACHE_SIZE) {
|
if (len > PAGE_SIZE) {
|
||||||
pr_err("cowardly refusing to write %lu bytes in a page\n", len);
|
pr_err("cowardly refusing to write %lu bytes in a page\n", len);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
|
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
|
||||||
if (!tage) {
|
if (!tage) {
|
||||||
if (needed + known_size > PAGE_CACHE_SIZE)
|
if (needed + known_size > PAGE_SIZE)
|
||||||
mask |= D_ERROR;
|
mask |= D_ERROR;
|
||||||
|
|
||||||
cfs_trace_put_tcd(tcd);
|
cfs_trace_put_tcd(tcd);
|
||||||
|
@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
|
||||||
string_buf = (char *)page_address(tage->page) +
|
string_buf = (char *)page_address(tage->page) +
|
||||||
tage->used + known_size;
|
tage->used + known_size;
|
||||||
|
|
||||||
max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
|
max_nob = PAGE_SIZE - tage->used - known_size;
|
||||||
if (max_nob <= 0) {
|
if (max_nob <= 0) {
|
||||||
printk(KERN_EMERG "negative max_nob: %d\n",
|
printk(KERN_EMERG "negative max_nob: %d\n",
|
||||||
max_nob);
|
max_nob);
|
||||||
|
@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
|
||||||
__LASSERT(debug_buf == string_buf);
|
__LASSERT(debug_buf == string_buf);
|
||||||
|
|
||||||
tage->used += needed;
|
tage->used += needed;
|
||||||
__LASSERT(tage->used <= PAGE_CACHE_SIZE);
|
__LASSERT(tage->used <= PAGE_SIZE);
|
||||||
|
|
||||||
console:
|
console:
|
||||||
if ((mask & libcfs_printk) == 0) {
|
if ((mask & libcfs_printk) == 0) {
|
||||||
|
@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
|
||||||
|
|
||||||
int cfs_trace_allocate_string_buffer(char **str, int nob)
|
int cfs_trace_allocate_string_buffer(char **str, int nob)
|
||||||
{
|
{
|
||||||
if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
|
if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
|
*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
|
||||||
|
@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
mb /= num_possible_cpus();
|
mb /= num_possible_cpus();
|
||||||
pages = mb << (20 - PAGE_CACHE_SHIFT);
|
pages = mb << (20 - PAGE_SHIFT);
|
||||||
|
|
||||||
cfs_tracefile_write_lock();
|
cfs_tracefile_write_lock();
|
||||||
|
|
||||||
|
@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
|
||||||
|
|
||||||
cfs_tracefile_read_unlock();
|
cfs_tracefile_read_unlock();
|
||||||
|
|
||||||
return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
|
return (total_pages >> (20 - PAGE_SHIFT)) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tracefiled(void *arg)
|
static int tracefiled(void *arg)
|
||||||
|
|
|
@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
|
||||||
extern int libcfs_panic_in_progress;
|
extern int libcfs_panic_in_progress;
|
||||||
int cfs_trace_max_debug_mb(void);
|
int cfs_trace_max_debug_mb(void);
|
||||||
|
|
||||||
#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
|
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
|
||||||
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
|
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
|
||||||
#define CFS_TRACEFILE_SIZE (500 << 20)
|
#define CFS_TRACEFILE_SIZE (500 << 20)
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
|
||||||
/*
|
/*
|
||||||
* Private declare for tracefile
|
* Private declare for tracefile
|
||||||
*/
|
*/
|
||||||
#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
|
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
|
||||||
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
|
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
|
||||||
|
|
||||||
#define CFS_TRACEFILE_SIZE (500 << 20)
|
#define CFS_TRACEFILE_SIZE (500 << 20)
|
||||||
|
@ -257,7 +257,7 @@ do { \
|
||||||
do { \
|
do { \
|
||||||
__LASSERT(tage); \
|
__LASSERT(tage); \
|
||||||
__LASSERT(tage->page); \
|
__LASSERT(tage->page); \
|
||||||
__LASSERT(tage->used <= PAGE_CACHE_SIZE); \
|
__LASSERT(tage->used <= PAGE_SIZE); \
|
||||||
__LASSERT(page_count(tage->page) > 0); \
|
__LASSERT(page_count(tage->page) > 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
|
||||||
for (i = 0; i < (int)niov; i++) {
|
for (i = 0; i < (int)niov; i++) {
|
||||||
/* We take the page pointer on trust */
|
/* We take the page pointer on trust */
|
||||||
if (lmd->md_iov.kiov[i].kiov_offset +
|
if (lmd->md_iov.kiov[i].kiov_offset +
|
||||||
lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
|
lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
|
||||||
return -EINVAL; /* invalid length */
|
return -EINVAL; /* invalid length */
|
||||||
|
|
||||||
total_length += lmd->md_iov.kiov[i].kiov_len;
|
total_length += lmd->md_iov.kiov[i].kiov_len;
|
||||||
|
|
|
@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
|
||||||
if (len <= frag_len) {
|
if (len <= frag_len) {
|
||||||
dst->kiov_len = len;
|
dst->kiov_len = len;
|
||||||
LASSERT(dst->kiov_offset + dst->kiov_len
|
LASSERT(dst->kiov_offset + dst->kiov_len
|
||||||
<= PAGE_CACHE_SIZE);
|
<= PAGE_SIZE);
|
||||||
return niov;
|
return niov;
|
||||||
}
|
}
|
||||||
|
|
||||||
dst->kiov_len = frag_len;
|
dst->kiov_len = frag_len;
|
||||||
LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
|
LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
|
||||||
|
|
||||||
len -= frag_len;
|
len -= frag_len;
|
||||||
dst++;
|
dst++;
|
||||||
|
@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
|
||||||
rbp = &the_lnet.ln_rtrpools[cpt][0];
|
rbp = &the_lnet.ln_rtrpools[cpt][0];
|
||||||
|
|
||||||
LASSERT(msg->msg_len <= LNET_MTU);
|
LASSERT(msg->msg_len <= LNET_MTU);
|
||||||
while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
|
while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
|
||||||
rbp++;
|
rbp++;
|
||||||
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
|
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
|
||||||
nalloc = 16; /* first guess at max interfaces */
|
nalloc = 16; /* first guess at max interfaces */
|
||||||
toobig = 0;
|
toobig = 0;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
|
if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
|
||||||
toobig = 1;
|
toobig = 1;
|
||||||
nalloc = PAGE_CACHE_SIZE / sizeof(*ifr);
|
nalloc = PAGE_SIZE / sizeof(*ifr);
|
||||||
CWARN("Too many interfaces: only enumerating first %d\n",
|
CWARN("Too many interfaces: only enumerating first %d\n",
|
||||||
nalloc);
|
nalloc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,8 +27,8 @@
|
||||||
#define LNET_NRB_SMALL_PAGES 1
|
#define LNET_NRB_SMALL_PAGES 1
|
||||||
#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
|
#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
|
||||||
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
|
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
|
||||||
#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
|
#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
|
||||||
PAGE_CACHE_SHIFT)
|
PAGE_SHIFT)
|
||||||
|
|
||||||
static char *forwarding = "";
|
static char *forwarding = "";
|
||||||
module_param(forwarding, charp, 0444);
|
module_param(forwarding, charp, 0444);
|
||||||
|
@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
|
rb->rb_kiov[i].kiov_len = PAGE_SIZE;
|
||||||
rb->rb_kiov[i].kiov_offset = 0;
|
rb->rb_kiov[i].kiov_offset = 0;
|
||||||
rb->rb_kiov[i].kiov_page = page;
|
rb->rb_kiov[i].kiov_page = page;
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
|
||||||
* NB: this is not going to work for variable page size,
|
* NB: this is not going to work for variable page size,
|
||||||
* but we have to keep it for compatibility
|
* but we have to keep it for compatibility
|
||||||
*/
|
*/
|
||||||
len = npg * PAGE_CACHE_SIZE;
|
len = npg * PAGE_SIZE;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
|
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
|
||||||
|
@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
|
||||||
opc = breq->blk_opc;
|
opc = breq->blk_opc;
|
||||||
flags = breq->blk_flags;
|
flags = breq->blk_flags;
|
||||||
len = breq->blk_len;
|
len = breq->blk_len;
|
||||||
npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (npg > LNET_MAX_IOV || npg <= 0)
|
if (npg > LNET_MAX_IOV || npg <= 0)
|
||||||
|
@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
|
||||||
|
|
||||||
if (pattern == LST_BRW_CHECK_SIMPLE) {
|
if (pattern == LST_BRW_CHECK_SIMPLE) {
|
||||||
memcpy(addr, &magic, BRW_MSIZE);
|
memcpy(addr, &magic, BRW_MSIZE);
|
||||||
addr += PAGE_CACHE_SIZE - BRW_MSIZE;
|
addr += PAGE_SIZE - BRW_MSIZE;
|
||||||
memcpy(addr, &magic, BRW_MSIZE);
|
memcpy(addr, &magic, BRW_MSIZE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pattern == LST_BRW_CHECK_FULL) {
|
if (pattern == LST_BRW_CHECK_FULL) {
|
||||||
for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
|
for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
|
||||||
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
|
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
|
||||||
if (data != magic)
|
if (data != magic)
|
||||||
goto bad_data;
|
goto bad_data;
|
||||||
|
|
||||||
addr += PAGE_CACHE_SIZE - BRW_MSIZE;
|
addr += PAGE_SIZE - BRW_MSIZE;
|
||||||
data = *((__u64 *)addr);
|
data = *((__u64 *)addr);
|
||||||
if (data != magic)
|
if (data != magic)
|
||||||
goto bad_data;
|
goto bad_data;
|
||||||
|
@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pattern == LST_BRW_CHECK_FULL) {
|
if (pattern == LST_BRW_CHECK_FULL) {
|
||||||
for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
|
for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
|
||||||
data = *(((__u64 *)addr) + i);
|
data = *(((__u64 *)addr) + i);
|
||||||
if (data != magic)
|
if (data != magic)
|
||||||
goto bad_data;
|
goto bad_data;
|
||||||
|
@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
|
||||||
opc = breq->blk_opc;
|
opc = breq->blk_opc;
|
||||||
flags = breq->blk_flags;
|
flags = breq->blk_flags;
|
||||||
npg = breq->blk_npg;
|
npg = breq->blk_npg;
|
||||||
len = npg * PAGE_CACHE_SIZE;
|
len = npg * PAGE_SIZE;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
|
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
|
||||||
|
@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
|
||||||
opc = breq->blk_opc;
|
opc = breq->blk_opc;
|
||||||
flags = breq->blk_flags;
|
flags = breq->blk_flags;
|
||||||
len = breq->blk_len;
|
len = breq->blk_len;
|
||||||
npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
|
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
|
||||||
|
@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
|
||||||
reply->brw_status = EINVAL;
|
reply->brw_status = EINVAL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
|
npg = reqst->brw_len >> PAGE_SHIFT;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
|
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
|
||||||
|
|
|
@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
|
||||||
if (args->lstio_tes_param &&
|
if (args->lstio_tes_param &&
|
||||||
(args->lstio_tes_param_len <= 0 ||
|
(args->lstio_tes_param_len <= 0 ||
|
||||||
args->lstio_tes_param_len >
|
args->lstio_tes_param_len >
|
||||||
PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
|
PAGE_SIZE - sizeof(lstcon_test_t)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
|
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
|
||||||
|
@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
|
||||||
|
|
||||||
opc = data->ioc_u32[0];
|
opc = data->ioc_u32[0];
|
||||||
|
|
||||||
if (data->ioc_plen1 > PAGE_CACHE_SIZE)
|
if (data->ioc_plen1 > PAGE_SIZE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
LIBCFS_ALLOC(buf, data->ioc_plen1);
|
LIBCFS_ALLOC(buf, data->ioc_plen1);
|
||||||
|
|
|
@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
|
||||||
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
|
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
|
||||||
|
|
||||||
brq->blk_opc = param->blk_opc;
|
brq->blk_opc = param->blk_opc;
|
||||||
brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
|
brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
|
||||||
PAGE_CACHE_SIZE;
|
PAGE_SIZE;
|
||||||
brq->blk_flags = param->blk_flags;
|
brq->blk_flags = param->blk_flags;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
|
||||||
if (transop == LST_TRANS_TSBCLIADD) {
|
if (transop == LST_TRANS_TSBCLIADD) {
|
||||||
npg = sfw_id_pages(test->tes_span);
|
npg = sfw_id_pages(test->tes_span);
|
||||||
nob = !(feats & LST_FEAT_BULK_LEN) ?
|
nob = !(feats & LST_FEAT_BULK_LEN) ?
|
||||||
npg * PAGE_CACHE_SIZE :
|
npg * PAGE_SIZE :
|
||||||
sizeof(lnet_process_id_packed_t) * test->tes_span;
|
sizeof(lnet_process_id_packed_t) * test->tes_span;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
|
||||||
LASSERT(nob > 0);
|
LASSERT(nob > 0);
|
||||||
|
|
||||||
len = !(feats & LST_FEAT_BULK_LEN) ?
|
len = !(feats & LST_FEAT_BULK_LEN) ?
|
||||||
PAGE_CACHE_SIZE :
|
PAGE_SIZE :
|
||||||
min_t(int, nob, PAGE_CACHE_SIZE);
|
min_t(int, nob, PAGE_SIZE);
|
||||||
nob -= len;
|
nob -= len;
|
||||||
|
|
||||||
bulk->bk_iovs[i].kiov_offset = 0;
|
bulk->bk_iovs[i].kiov_offset = 0;
|
||||||
|
|
|
@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
|
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
|
||||||
len = npg * PAGE_CACHE_SIZE;
|
len = npg * PAGE_SIZE;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
len = sizeof(lnet_process_id_packed_t) *
|
len = sizeof(lnet_process_id_packed_t) *
|
||||||
|
|
|
@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
|
||||||
static int
|
static int
|
||||||
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
|
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
|
||||||
{
|
{
|
||||||
nob = min_t(int, nob, PAGE_CACHE_SIZE);
|
nob = min_t(int, nob, PAGE_SIZE);
|
||||||
|
|
||||||
LASSERT(nob > 0);
|
LASSERT(nob > 0);
|
||||||
LASSERT(i >= 0 && i < bk->bk_niov);
|
LASSERT(i >= 0 && i < bk->bk_niov);
|
||||||
|
|
|
@ -393,7 +393,7 @@ typedef struct sfw_test_instance {
|
||||||
/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
|
/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
|
||||||
* the end of pages are not used */
|
* the end of pages are not used */
|
||||||
#define SFW_MAX_CONCUR LST_MAX_CONCUR
|
#define SFW_MAX_CONCUR LST_MAX_CONCUR
|
||||||
#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
|
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
|
||||||
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
|
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
|
||||||
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
|
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (PagePrivate(page))
|
if (PagePrivate(page))
|
||||||
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
|
page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
|
||||||
|
|
||||||
cancel_dirty_page(page);
|
cancel_dirty_page(page);
|
||||||
ClearPageMappedToDisk(page);
|
ClearPageMappedToDisk(page);
|
||||||
|
|
|
@ -1031,7 +1031,7 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
|
||||||
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
|
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
|
||||||
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
|
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
|
||||||
|
|
||||||
#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
|
#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
|
||||||
|
|
||||||
/** @} lu_dir */
|
/** @} lu_dir */
|
||||||
|
|
||||||
|
|
|
@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
|
||||||
if (cli->cl_max_mds_easize < body->max_mdsize) {
|
if (cli->cl_max_mds_easize < body->max_mdsize) {
|
||||||
cli->cl_max_mds_easize = body->max_mdsize;
|
cli->cl_max_mds_easize = body->max_mdsize;
|
||||||
cli->cl_default_mds_easize =
|
cli->cl_default_mds_easize =
|
||||||
min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
|
min_t(__u32, body->max_mdsize, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
|
if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
|
||||||
cli->cl_max_mds_cookiesize = body->max_cookiesize;
|
cli->cl_max_mds_cookiesize = body->max_cookiesize;
|
||||||
cli->cl_default_mds_cookiesize =
|
cli->cl_default_mds_cookiesize =
|
||||||
min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
|
min_t(__u32, body->max_cookiesize, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,13 +99,13 @@
|
||||||
*/
|
*/
|
||||||
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
|
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
|
||||||
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
|
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
|
||||||
#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
|
#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
|
||||||
|
|
||||||
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
|
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
|
||||||
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
|
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
|
||||||
#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
|
#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
|
||||||
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
|
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
|
||||||
#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
|
#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
|
||||||
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
|
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
|
||||||
|
|
||||||
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
|
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
|
||||||
|
|
|
@ -1318,7 +1318,7 @@ bad_format:
|
||||||
|
|
||||||
static inline int cli_brw_size(struct obd_device *obd)
|
static inline int cli_brw_size(struct obd_device *obd)
|
||||||
{
|
{
|
||||||
return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
|
return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __OBD_H */
|
#endif /* __OBD_H */
|
||||||
|
|
|
@ -500,7 +500,7 @@ extern char obd_jobid_var[];
|
||||||
|
|
||||||
#ifdef POISON_BULK
|
#ifdef POISON_BULK
|
||||||
#define POISON_PAGE(page, val) do { \
|
#define POISON_PAGE(page, val) do { \
|
||||||
memset(kmap(page), val, PAGE_CACHE_SIZE); \
|
memset(kmap(page), val, PAGE_SIZE); \
|
||||||
kunmap(page); \
|
kunmap(page); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
|
||||||
* --bug 17336
|
* --bug 17336
|
||||||
*/
|
*/
|
||||||
loff_t size = cl_isize_read(inode);
|
loff_t size = cl_isize_read(inode);
|
||||||
loff_t cur_index = start >> PAGE_CACHE_SHIFT;
|
loff_t cur_index = start >> PAGE_SHIFT;
|
||||||
loff_t size_index = (size - 1) >>
|
loff_t size_index = (size - 1) >>
|
||||||
PAGE_CACHE_SHIFT;
|
PAGE_SHIFT;
|
||||||
|
|
||||||
if ((size == 0 && cur_index != 0) ||
|
if ((size == 0 && cur_index != 0) ||
|
||||||
size_index < cur_index)
|
size_index < cur_index)
|
||||||
|
|
|
@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
|
||||||
cli->cl_avail_grant = 0;
|
cli->cl_avail_grant = 0;
|
||||||
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
|
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
|
||||||
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
|
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
|
||||||
if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
|
if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
|
||||||
cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
|
cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
|
||||||
INIT_LIST_HEAD(&cli->cl_cache_waiters);
|
INIT_LIST_HEAD(&cli->cl_cache_waiters);
|
||||||
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
|
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
|
||||||
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
|
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
|
||||||
|
@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
|
||||||
* In the future this should likely be increased. LU-1431
|
* In the future this should likely be increased. LU-1431
|
||||||
*/
|
*/
|
||||||
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
|
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
|
||||||
LNET_MTU >> PAGE_CACHE_SHIFT);
|
LNET_MTU >> PAGE_SHIFT);
|
||||||
|
|
||||||
if (!strcmp(name, LUSTRE_MDC_NAME)) {
|
if (!strcmp(name, LUSTRE_MDC_NAME)) {
|
||||||
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
|
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
|
||||||
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
|
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
|
||||||
cli->cl_max_rpcs_in_flight = 2;
|
cli->cl_max_rpcs_in_flight = 2;
|
||||||
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
|
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
|
||||||
cli->cl_max_rpcs_in_flight = 3;
|
cli->cl_max_rpcs_in_flight = 3;
|
||||||
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
|
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
|
||||||
cli->cl_max_rpcs_in_flight = 4;
|
cli->cl_max_rpcs_in_flight = 4;
|
||||||
} else {
|
} else {
|
||||||
cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
|
cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
|
||||||
|
|
|
@ -107,7 +107,7 @@
|
||||||
/*
|
/*
|
||||||
* 50 ldlm locks for 1MB of RAM.
|
* 50 ldlm locks for 1MB of RAM.
|
||||||
*/
|
*/
|
||||||
#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
|
#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximal possible grant step plan in %.
|
* Maximal possible grant step plan in %.
|
||||||
|
|
|
@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
|
||||||
{
|
{
|
||||||
int avail;
|
int avail;
|
||||||
|
|
||||||
avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
|
avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
|
||||||
if (likely(avail >= 0))
|
if (likely(avail >= 0))
|
||||||
avail /= (int)sizeof(struct lustre_handle);
|
avail /= (int)sizeof(struct lustre_handle);
|
||||||
else
|
else
|
||||||
|
|
|
@ -153,7 +153,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
|
||||||
struct page **page_pool;
|
struct page **page_pool;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct lu_dirpage *dp;
|
struct lu_dirpage *dp;
|
||||||
int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
|
int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
|
||||||
int nrdpgs = 0; /* number of pages read actually */
|
int nrdpgs = 0; /* number of pages read actually */
|
||||||
int npages;
|
int npages;
|
||||||
int i;
|
int i;
|
||||||
|
@ -193,8 +193,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
|
||||||
if (body->valid & OBD_MD_FLSIZE)
|
if (body->valid & OBD_MD_FLSIZE)
|
||||||
cl_isize_write(inode, body->size);
|
cl_isize_write(inode, body->size);
|
||||||
|
|
||||||
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
|
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
|
||||||
>> PAGE_CACHE_SHIFT;
|
>> PAGE_SHIFT;
|
||||||
SetPageUptodate(page0);
|
SetPageUptodate(page0);
|
||||||
}
|
}
|
||||||
unlock_page(page0);
|
unlock_page(page0);
|
||||||
|
@ -209,7 +209,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
|
||||||
page = page_pool[i];
|
page = page_pool[i];
|
||||||
|
|
||||||
if (rc < 0 || i >= nrdpgs) {
|
if (rc < 0 || i >= nrdpgs) {
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
|
||||||
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
|
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
|
||||||
offset, ret);
|
offset, ret);
|
||||||
}
|
}
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (page_pool != &page0)
|
if (page_pool != &page0)
|
||||||
|
@ -247,7 +247,7 @@ void ll_release_page(struct page *page, int remove)
|
||||||
truncate_complete_page(page->mapping, page);
|
truncate_complete_page(page->mapping, page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -273,7 +273,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
|
||||||
if (found > 0 && !radix_tree_exceptional_entry(page)) {
|
if (found > 0 && !radix_tree_exceptional_entry(page)) {
|
||||||
struct lu_dirpage *dp;
|
struct lu_dirpage *dp;
|
||||||
|
|
||||||
page_cache_get(page);
|
get_page(page);
|
||||||
spin_unlock_irq(&mapping->tree_lock);
|
spin_unlock_irq(&mapping->tree_lock);
|
||||||
/*
|
/*
|
||||||
* In contrast to find_lock_page() we are sure that directory
|
* In contrast to find_lock_page() we are sure that directory
|
||||||
|
@ -313,7 +313,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
|
||||||
page = NULL;
|
page = NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
page = ERR_PTR(-EIO);
|
page = ERR_PTR(-EIO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1507,7 +1507,7 @@ skip_lmm:
|
||||||
st.st_gid = body->gid;
|
st.st_gid = body->gid;
|
||||||
st.st_rdev = body->rdev;
|
st.st_rdev = body->rdev;
|
||||||
st.st_size = body->size;
|
st.st_size = body->size;
|
||||||
st.st_blksize = PAGE_CACHE_SIZE;
|
st.st_blksize = PAGE_SIZE;
|
||||||
st.st_blocks = body->blocks;
|
st.st_blocks = body->blocks;
|
||||||
st.st_atime = body->atime;
|
st.st_atime = body->atime;
|
||||||
st.st_mtime = body->mtime;
|
st.st_mtime = body->mtime;
|
||||||
|
|
|
@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
|
||||||
/* default to about 40meg of readahead on a given system. That much tied
|
/* default to about 40meg of readahead on a given system. That much tied
|
||||||
* up in 512k readahead requests serviced at 40ms each is about 1GB/s.
|
* up in 512k readahead requests serviced at 40ms each is about 1GB/s.
|
||||||
*/
|
*/
|
||||||
#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
|
#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
|
||||||
|
|
||||||
/* default to read-ahead full files smaller than 2MB on the second read */
|
/* default to read-ahead full files smaller than 2MB on the second read */
|
||||||
#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
|
#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
|
||||||
|
|
||||||
enum ra_stat {
|
enum ra_stat {
|
||||||
RA_STAT_HIT = 0,
|
RA_STAT_HIT = 0,
|
||||||
|
@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
|
||||||
static inline void ll_invalidate_page(struct page *vmpage)
|
static inline void ll_invalidate_page(struct page *vmpage)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = vmpage->mapping;
|
struct address_space *mapping = vmpage->mapping;
|
||||||
loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
|
loff_t offset = vmpage->index << PAGE_SHIFT;
|
||||||
|
|
||||||
LASSERT(PageLocked(vmpage));
|
LASSERT(PageLocked(vmpage));
|
||||||
if (!mapping)
|
if (!mapping)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
|
ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
|
||||||
truncate_complete_page(mapping, vmpage);
|
truncate_complete_page(mapping, vmpage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
|
||||||
|
|
||||||
si_meminfo(&si);
|
si_meminfo(&si);
|
||||||
pages = si.totalram - si.totalhigh;
|
pages = si.totalram - si.totalhigh;
|
||||||
if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
|
if (pages >> (20 - PAGE_SHIFT) < 512)
|
||||||
lru_page_max = pages / 2;
|
lru_page_max = pages / 2;
|
||||||
else
|
else
|
||||||
lru_page_max = (pages / 4) * 3;
|
lru_page_max = (pages / 4) * 3;
|
||||||
|
@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
|
||||||
valid != CLIENT_CONNECT_MDT_REQD) {
|
valid != CLIENT_CONNECT_MDT_REQD) {
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
|
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_md_fid;
|
goto out_md_fid;
|
||||||
}
|
}
|
||||||
obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
|
obd_connect_flags2str(buf, PAGE_SIZE,
|
||||||
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
|
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
|
||||||
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
|
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
|
||||||
sbi->ll_md_exp->exp_obd->obd_name, buf);
|
sbi->ll_md_exp->exp_obd->obd_name, buf);
|
||||||
|
@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
|
||||||
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
|
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
|
||||||
sbi->ll_md_brw_size = data->ocd_brw_size;
|
sbi->ll_md_brw_size = data->ocd_brw_size;
|
||||||
else
|
else
|
||||||
sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
|
sbi->ll_md_brw_size = PAGE_SIZE;
|
||||||
|
|
||||||
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
|
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
|
||||||
LCONSOLE_INFO("Layout lock feature supported.\n");
|
LCONSOLE_INFO("Layout lock feature supported.\n");
|
||||||
|
|
|
@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
|
||||||
size_t count)
|
size_t count)
|
||||||
{
|
{
|
||||||
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
|
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
|
||||||
(vma->vm_pgoff << PAGE_CACHE_SHIFT);
|
(vma->vm_pgoff << PAGE_SHIFT);
|
||||||
policy->l_extent.end = (policy->l_extent.start + count - 1) |
|
policy->l_extent.end = (policy->l_extent.start + count - 1) |
|
||||||
~CFS_PAGE_MASK;
|
~CFS_PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
|
|
||||||
vmpage = vio->u.fault.ft_vmpage;
|
vmpage = vio->u.fault.ft_vmpage;
|
||||||
if (result != 0 && vmpage) {
|
if (result != 0 && vmpage) {
|
||||||
page_cache_release(vmpage);
|
put_page(vmpage);
|
||||||
vmf->page = NULL;
|
vmf->page = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,7 @@ restart:
|
||||||
lock_page(vmpage);
|
lock_page(vmpage);
|
||||||
if (unlikely(!vmpage->mapping)) { /* unlucky */
|
if (unlikely(!vmpage->mapping)) { /* unlucky */
|
||||||
unlock_page(vmpage);
|
unlock_page(vmpage);
|
||||||
page_cache_release(vmpage);
|
put_page(vmpage);
|
||||||
vmf->page = NULL;
|
vmf->page = NULL;
|
||||||
|
|
||||||
if (!printed && ++count > 16) {
|
if (!printed && ++count > 16) {
|
||||||
|
@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
|
||||||
LASSERTF(last > first, "last %llu first %llu\n", last, first);
|
LASSERTF(last > first, "last %llu first %llu\n", last, first);
|
||||||
if (mapping_mapped(mapping)) {
|
if (mapping_mapped(mapping)) {
|
||||||
rc = 0;
|
rc = 0;
|
||||||
unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
|
unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
|
||||||
last - first + 1, 0);
|
last - first + 1, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
|
||||||
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
|
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
|
||||||
bio_for_each_segment(bvec, bio, iter) {
|
bio_for_each_segment(bvec, bio, iter) {
|
||||||
BUG_ON(bvec.bv_offset != 0);
|
BUG_ON(bvec.bv_offset != 0);
|
||||||
BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
|
BUG_ON(bvec.bv_len != PAGE_SIZE);
|
||||||
|
|
||||||
pages[page_count] = bvec.bv_page;
|
pages[page_count] = bvec.bv_page;
|
||||||
offsets[page_count] = offset;
|
offsets[page_count] = offset;
|
||||||
|
@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
|
||||||
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
|
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
|
||||||
page_count);
|
page_count);
|
||||||
|
|
||||||
pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
|
pvec->ldp_size = page_count << PAGE_SHIFT;
|
||||||
pvec->ldp_nr = page_count;
|
pvec->ldp_nr = page_count;
|
||||||
|
|
||||||
/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
|
/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
|
||||||
|
@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
|
||||||
|
|
||||||
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
|
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
|
||||||
|
|
||||||
lo->lo_blocksize = PAGE_CACHE_SIZE;
|
lo->lo_blocksize = PAGE_SIZE;
|
||||||
lo->lo_device = bdev;
|
lo->lo_device = bdev;
|
||||||
lo->lo_flags = lo_flags;
|
lo->lo_flags = lo_flags;
|
||||||
lo->lo_backing_file = file;
|
lo->lo_backing_file = file;
|
||||||
|
@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
|
||||||
lo->lo_queue->queuedata = lo;
|
lo->lo_queue->queuedata = lo;
|
||||||
|
|
||||||
/* queue parameters */
|
/* queue parameters */
|
||||||
CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
|
CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
|
||||||
blk_queue_logical_block_size(lo->lo_queue,
|
blk_queue_logical_block_size(lo->lo_queue,
|
||||||
(unsigned short)PAGE_CACHE_SIZE);
|
(unsigned short)PAGE_SIZE);
|
||||||
blk_queue_max_hw_sectors(lo->lo_queue,
|
blk_queue_max_hw_sectors(lo->lo_queue,
|
||||||
LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
|
LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
|
||||||
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
|
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
|
||||||
|
|
||||||
set_capacity(disks[lo->lo_number], size);
|
set_capacity(disks[lo->lo_number], size);
|
||||||
|
|
|
@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
|
||||||
pages_number = sbi->ll_ra_info.ra_max_pages;
|
pages_number = sbi->ll_ra_info.ra_max_pages;
|
||||||
spin_unlock(&sbi->ll_lock);
|
spin_unlock(&sbi->ll_lock);
|
||||||
|
|
||||||
mult = 1 << (20 - PAGE_CACHE_SHIFT);
|
mult = 1 << (20 - PAGE_SHIFT);
|
||||||
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
|
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
|
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
|
||||||
|
|
||||||
if (pages_number > totalram_pages / 2) {
|
if (pages_number > totalram_pages / 2) {
|
||||||
|
|
||||||
CERROR("can't set file readahead more than %lu MB\n",
|
CERROR("can't set file readahead more than %lu MB\n",
|
||||||
totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
|
totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
|
||||||
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
|
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
|
||||||
spin_unlock(&sbi->ll_lock);
|
spin_unlock(&sbi->ll_lock);
|
||||||
|
|
||||||
mult = 1 << (20 - PAGE_CACHE_SHIFT);
|
mult = 1 << (20 - PAGE_SHIFT);
|
||||||
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
|
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
|
||||||
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
|
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
|
||||||
spin_unlock(&sbi->ll_lock);
|
spin_unlock(&sbi->ll_lock);
|
||||||
|
|
||||||
mult = 1 << (20 - PAGE_CACHE_SHIFT);
|
mult = 1 << (20 - PAGE_SHIFT);
|
||||||
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
|
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
|
||||||
*/
|
*/
|
||||||
if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
|
if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
|
||||||
CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
|
CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
|
||||||
sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
|
sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
|
||||||
struct super_block *sb = m->private;
|
struct super_block *sb = m->private;
|
||||||
struct ll_sb_info *sbi = ll_s2sbi(sb);
|
struct ll_sb_info *sbi = ll_s2sbi(sb);
|
||||||
struct cl_client_cache *cache = &sbi->ll_cache;
|
struct cl_client_cache *cache = &sbi->ll_cache;
|
||||||
int shift = 20 - PAGE_CACHE_SHIFT;
|
int shift = 20 - PAGE_SHIFT;
|
||||||
int max_cached_mb;
|
int max_cached_mb;
|
||||||
int unused_mb;
|
int unused_mb;
|
||||||
|
|
||||||
|
@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
kernbuf[count] = 0;
|
kernbuf[count] = 0;
|
||||||
|
|
||||||
mult = 1 << (20 - PAGE_CACHE_SHIFT);
|
mult = 1 << (20 - PAGE_SHIFT);
|
||||||
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
|
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
|
||||||
kernbuf;
|
kernbuf;
|
||||||
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
|
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
|
||||||
|
@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
|
||||||
if (pages_number < 0 || pages_number > totalram_pages) {
|
if (pages_number < 0 || pages_number > totalram_pages) {
|
||||||
CERROR("%s: can't set max cache more than %lu MB\n",
|
CERROR("%s: can't set max cache more than %lu MB\n",
|
||||||
ll_get_fsname(sb, NULL, 0),
|
ll_get_fsname(sb, NULL, 0),
|
||||||
totalram_pages >> (20 - PAGE_CACHE_SHIFT));
|
totalram_pages >> (20 - PAGE_SHIFT));
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
|
||||||
*/
|
*/
|
||||||
io->ci_lockreq = CILR_NEVER;
|
io->ci_lockreq = CILR_NEVER;
|
||||||
|
|
||||||
pos = vmpage->index << PAGE_CACHE_SHIFT;
|
pos = vmpage->index << PAGE_SHIFT;
|
||||||
|
|
||||||
/* Create a temp IO to serve write. */
|
/* Create a temp IO to serve write. */
|
||||||
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
|
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
cio->cui_fd = LUSTRE_FPRIVATE(file);
|
cio->cui_fd = LUSTRE_FPRIVATE(file);
|
||||||
cio->cui_iter = NULL;
|
cio->cui_iter = NULL;
|
||||||
|
@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
|
||||||
}
|
}
|
||||||
if (rc != 1)
|
if (rc != 1)
|
||||||
unlock_page(vmpage);
|
unlock_page(vmpage);
|
||||||
page_cache_release(vmpage);
|
put_page(vmpage);
|
||||||
} else {
|
} else {
|
||||||
which = RA_STAT_FAILED_GRAB_PAGE;
|
which = RA_STAT_FAILED_GRAB_PAGE;
|
||||||
msg = "g_c_p_n failed";
|
msg = "g_c_p_n failed";
|
||||||
|
@ -527,7 +527,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
|
||||||
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
|
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
|
||||||
* up quickly which will affect read performance significantly. See LU-2816
|
* up quickly which will affect read performance significantly. See LU-2816
|
||||||
*/
|
*/
|
||||||
#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
|
#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
|
||||||
|
|
||||||
static inline int stride_io_mode(struct ll_readahead_state *ras)
|
static inline int stride_io_mode(struct ll_readahead_state *ras)
|
||||||
{
|
{
|
||||||
|
@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
|
||||||
end = rpc_boundary;
|
end = rpc_boundary;
|
||||||
|
|
||||||
/* Truncate RA window to end of file */
|
/* Truncate RA window to end of file */
|
||||||
end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
|
end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
|
||||||
|
|
||||||
ras->ras_next_readahead = max(end, end + 1);
|
ras->ras_next_readahead = max(end, end + 1);
|
||||||
RAS_CDEBUG(ras);
|
RAS_CDEBUG(ras);
|
||||||
|
@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
|
||||||
if (reserved != 0)
|
if (reserved != 0)
|
||||||
ll_ra_count_put(ll_i2sbi(inode), reserved);
|
ll_ra_count_put(ll_i2sbi(inode), reserved);
|
||||||
|
|
||||||
if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
|
if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
|
||||||
ll_ra_stats_inc(mapping, RA_STAT_EOF);
|
ll_ra_stats_inc(mapping, RA_STAT_EOF);
|
||||||
|
|
||||||
/* if we didn't get to the end of the region we reserved from
|
/* if we didn't get to the end of the region we reserved from
|
||||||
|
@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
|
||||||
if (ras->ras_requests == 2 && !ras->ras_request_index) {
|
if (ras->ras_requests == 2 && !ras->ras_request_index) {
|
||||||
__u64 kms_pages;
|
__u64 kms_pages;
|
||||||
|
|
||||||
kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
|
kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
|
||||||
PAGE_CACHE_SHIFT;
|
PAGE_SHIFT;
|
||||||
|
|
||||||
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
|
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
|
||||||
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
|
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
|
||||||
|
@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
|
||||||
* PageWriteback or clean the page.
|
* PageWriteback or clean the page.
|
||||||
*/
|
*/
|
||||||
result = cl_sync_file_range(inode, offset,
|
result = cl_sync_file_range(inode, offset,
|
||||||
offset + PAGE_CACHE_SIZE - 1,
|
offset + PAGE_SIZE - 1,
|
||||||
CL_FSYNC_LOCAL, 1);
|
CL_FSYNC_LOCAL, 1);
|
||||||
if (result > 0) {
|
if (result > 0) {
|
||||||
/* actually we may have written more than one page.
|
/* actually we may have written more than one page.
|
||||||
|
@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||||
int ignore_layout = 0;
|
int ignore_layout = 0;
|
||||||
|
|
||||||
if (wbc->range_cyclic) {
|
if (wbc->range_cyclic) {
|
||||||
start = mapping->writeback_index << PAGE_CACHE_SHIFT;
|
start = mapping->writeback_index << PAGE_SHIFT;
|
||||||
end = OBD_OBJECT_EOF;
|
end = OBD_OBJECT_EOF;
|
||||||
} else {
|
} else {
|
||||||
start = wbc->range_start;
|
start = wbc->range_start;
|
||||||
|
@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||||
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
|
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
|
||||||
if (end == OBD_OBJECT_EOF)
|
if (end == OBD_OBJECT_EOF)
|
||||||
end = i_size_read(inode);
|
end = i_size_read(inode);
|
||||||
mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
|
mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
|
||||||
* below because they are run with page locked and all our io is
|
* below because they are run with page locked and all our io is
|
||||||
* happening with locked page too
|
* happening with locked page too
|
||||||
*/
|
*/
|
||||||
if (offset == 0 && length == PAGE_CACHE_SIZE) {
|
if (offset == 0 && length == PAGE_SIZE) {
|
||||||
env = cl_env_get(&refcheck);
|
env = cl_env_get(&refcheck);
|
||||||
if (!IS_ERR(env)) {
|
if (!IS_ERR(env)) {
|
||||||
inode = vmpage->mapping->host;
|
inode = vmpage->mapping->host;
|
||||||
|
@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
*max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
*max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
*max_pages -= user_addr >> PAGE_CACHE_SHIFT;
|
*max_pages -= user_addr >> PAGE_SHIFT;
|
||||||
|
|
||||||
*pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
|
*pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
|
||||||
if (*pages) {
|
if (*pages) {
|
||||||
|
@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
if (do_dirty)
|
if (do_dirty)
|
||||||
set_page_dirty_lock(pages[i]);
|
set_page_dirty_lock(pages[i]);
|
||||||
page_cache_release(pages[i]);
|
put_page(pages[i]);
|
||||||
}
|
}
|
||||||
kvfree(pages);
|
kvfree(pages);
|
||||||
}
|
}
|
||||||
|
@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
|
||||||
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
|
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
|
||||||
*/
|
*/
|
||||||
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
|
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
|
||||||
PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
|
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
|
||||||
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
loff_t file_offset)
|
loff_t file_offset)
|
||||||
{
|
{
|
||||||
|
@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
CDEBUG(D_VFSTRACE,
|
CDEBUG(D_VFSTRACE,
|
||||||
"VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
|
"VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
|
||||||
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
|
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
|
||||||
file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
|
file_offset, file_offset, count >> PAGE_SHIFT,
|
||||||
MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
|
MAX_DIO_SIZE >> PAGE_SHIFT);
|
||||||
|
|
||||||
/* Check that all user buffers are aligned as well */
|
/* Check that all user buffers are aligned as well */
|
||||||
if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
|
if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
|
||||||
|
@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
* page worth of page pointers = 4MB on i386.
|
* page worth of page pointers = 4MB on i386.
|
||||||
*/
|
*/
|
||||||
if (result == -ENOMEM &&
|
if (result == -ENOMEM &&
|
||||||
size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
|
size > (PAGE_SIZE / sizeof(*pages)) *
|
||||||
PAGE_CACHE_SIZE) {
|
PAGE_SIZE) {
|
||||||
size = ((((size / 2) - 1) |
|
size = ((((size / 2) - 1) |
|
||||||
~CFS_PAGE_MASK) + 1) &
|
~CFS_PAGE_MASK) + 1) &
|
||||||
CFS_PAGE_MASK;
|
CFS_PAGE_MASK;
|
||||||
|
@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
|
||||||
loff_t pos, unsigned len, unsigned flags,
|
loff_t pos, unsigned len, unsigned flags,
|
||||||
struct page **pagep, void **fsdata)
|
struct page **pagep, void **fsdata)
|
||||||
{
|
{
|
||||||
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
|
pgoff_t index = pos >> PAGE_SHIFT;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int rc;
|
int rc;
|
||||||
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
|
unsigned from = pos & (PAGE_SIZE - 1);
|
||||||
|
|
||||||
page = grab_cache_page_write_begin(mapping, index, flags);
|
page = grab_cache_page_write_begin(mapping, index, flags);
|
||||||
if (!page)
|
if (!page)
|
||||||
|
@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
|
||||||
rc = ll_prepare_write(file, page, from, from + len);
|
rc = ll_prepare_write(file, page, from, from + len);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
|
||||||
loff_t pos, unsigned len, unsigned copied,
|
loff_t pos, unsigned len, unsigned copied,
|
||||||
struct page *page, void *fsdata)
|
struct page *page, void *fsdata)
|
||||||
{
|
{
|
||||||
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
|
unsigned from = pos & (PAGE_SIZE - 1);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = ll_commit_write(file, page, from, from + copied);
|
rc = ll_commit_write(file, page, from, from + copied);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
|
|
||||||
return rc ?: copied;
|
return rc ?: copied;
|
||||||
}
|
}
|
||||||
|
|
|
@ -514,7 +514,7 @@ static int vvp_io_read_start(const struct lu_env *env,
|
||||||
/*
|
/*
|
||||||
* XXX: explicit PAGE_CACHE_SIZE
|
* XXX: explicit PAGE_CACHE_SIZE
|
||||||
*/
|
*/
|
||||||
bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
|
bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
|
||||||
ll_ra_read_in(file, bead);
|
ll_ra_read_in(file, bead);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
|
||||||
* We're completely overwriting an existing page, so _don't_
|
* We're completely overwriting an existing page, so _don't_
|
||||||
* set it up to date until commit_write
|
* set it up to date until commit_write
|
||||||
*/
|
*/
|
||||||
if (from == 0 && to == PAGE_CACHE_SIZE) {
|
if (from == 0 && to == PAGE_SIZE) {
|
||||||
CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
|
CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
|
||||||
POISON_PAGE(page, 0x11);
|
POISON_PAGE(page, 0x11);
|
||||||
} else
|
} else
|
||||||
|
@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
|
||||||
set_page_dirty(vmpage);
|
set_page_dirty(vmpage);
|
||||||
vvp_write_pending(cl2ccc(obj), cp);
|
vvp_write_pending(cl2ccc(obj), cp);
|
||||||
} else if (result == -EDQUOT) {
|
} else if (result == -EDQUOT) {
|
||||||
pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
|
pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
|
||||||
bool need_clip = true;
|
bool need_clip = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
|
||||||
* being.
|
* being.
|
||||||
*/
|
*/
|
||||||
if (last_index > pg->cp_index) {
|
if (last_index > pg->cp_index) {
|
||||||
to = PAGE_CACHE_SIZE;
|
to = PAGE_SIZE;
|
||||||
need_clip = false;
|
need_clip = false;
|
||||||
} else if (last_index == pg->cp_index) {
|
} else if (last_index == pg->cp_index) {
|
||||||
int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
|
int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
|
||||||
|
|
|
@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
|
||||||
struct page *vmpage = cp->cpg_page;
|
struct page *vmpage = cp->cpg_page;
|
||||||
|
|
||||||
LASSERT(vmpage);
|
LASSERT(vmpage);
|
||||||
page_cache_release(vmpage);
|
put_page(vmpage);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vvp_page_fini(const struct lu_env *env,
|
static void vvp_page_fini(const struct lu_env *env,
|
||||||
|
@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
|
||||||
LASSERT(vmpage);
|
LASSERT(vmpage);
|
||||||
LASSERT(PageLocked(vmpage));
|
LASSERT(PageLocked(vmpage));
|
||||||
|
|
||||||
offset = vmpage->index << PAGE_CACHE_SHIFT;
|
offset = vmpage->index << PAGE_SHIFT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX is it safe to call this with the page lock held?
|
* XXX is it safe to call this with the page lock held?
|
||||||
*/
|
*/
|
||||||
ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
|
ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
|
||||||
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
|
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
|
||||||
|
|
||||||
cpg->cpg_page = vmpage;
|
cpg->cpg_page = vmpage;
|
||||||
page_cache_get(vmpage);
|
get_page(vmpage);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
|
INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
|
||||||
if (page->cp_type == CPT_CACHEABLE) {
|
if (page->cp_type == CPT_CACHEABLE) {
|
||||||
|
|
|
@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
|
ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
|
||||||
>> PAGE_CACHE_SHIFT;
|
>> PAGE_SHIFT;
|
||||||
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
|
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
|
||||||
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
|
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
|
||||||
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
|
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
|
||||||
|
|
|
@ -1002,10 +1002,10 @@ restart_bulk:
|
||||||
|
|
||||||
/* NB req now owns desc and will free it when it gets freed */
|
/* NB req now owns desc and will free it when it gets freed */
|
||||||
for (i = 0; i < op_data->op_npages; i++)
|
for (i = 0; i < op_data->op_npages; i++)
|
||||||
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
|
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
|
||||||
|
|
||||||
mdc_readdir_pack(req, op_data->op_offset,
|
mdc_readdir_pack(req, op_data->op_offset,
|
||||||
PAGE_CACHE_SIZE * op_data->op_npages,
|
PAGE_SIZE * op_data->op_npages,
|
||||||
&op_data->op_fid1);
|
&op_data->op_fid1);
|
||||||
|
|
||||||
ptlrpc_request_set_replen(req);
|
ptlrpc_request_set_replen(req);
|
||||||
|
@ -1037,7 +1037,7 @@ restart_bulk:
|
||||||
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
|
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
|
||||||
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
|
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
|
||||||
req->rq_bulk->bd_nob_transferred,
|
req->rq_bulk->bd_nob_transferred,
|
||||||
PAGE_CACHE_SIZE * op_data->op_npages);
|
PAGE_SIZE * op_data->op_npages);
|
||||||
ptlrpc_req_finished(req);
|
ptlrpc_req_finished(req);
|
||||||
return -EPROTO;
|
return -EPROTO;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
|
||||||
}
|
}
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
|
CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
|
||||||
CONFIG_READ_NRPAGES = 4
|
CONFIG_READ_NRPAGES = 4
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
|
||||||
LASSERT(cfg->cfg_instance);
|
LASSERT(cfg->cfg_instance);
|
||||||
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
|
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
|
||||||
|
|
||||||
inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
|
inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||||
if (!inst)
|
if (!inst)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
|
pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
|
||||||
if (pos >= PAGE_CACHE_SIZE) {
|
if (pos >= PAGE_SIZE) {
|
||||||
kfree(inst);
|
kfree(inst);
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
++pos;
|
++pos;
|
||||||
buf = inst + pos;
|
buf = inst + pos;
|
||||||
bufsz = PAGE_CACHE_SIZE - pos;
|
bufsz = PAGE_SIZE - pos;
|
||||||
|
|
||||||
while (datalen > 0) {
|
while (datalen > 0) {
|
||||||
int entry_len = sizeof(*entry);
|
int entry_len = sizeof(*entry);
|
||||||
|
@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
|
||||||
/* Keep this swab for normal mixed endian handling. LU-1644 */
|
/* Keep this swab for normal mixed endian handling. LU-1644 */
|
||||||
if (mne_swab)
|
if (mne_swab)
|
||||||
lustre_swab_mgs_nidtbl_entry(entry);
|
lustre_swab_mgs_nidtbl_entry(entry);
|
||||||
if (entry->mne_length > PAGE_CACHE_SIZE) {
|
if (entry->mne_length > PAGE_SIZE) {
|
||||||
CERROR("MNE too large (%u)\n", entry->mne_length);
|
CERROR("MNE too large (%u)\n", entry->mne_length);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1371,7 +1371,7 @@ again:
|
||||||
}
|
}
|
||||||
body->mcb_offset = cfg->cfg_last_idx + 1;
|
body->mcb_offset = cfg->cfg_last_idx + 1;
|
||||||
body->mcb_type = cld->cld_type;
|
body->mcb_type = cld->cld_type;
|
||||||
body->mcb_bits = PAGE_CACHE_SHIFT;
|
body->mcb_bits = PAGE_SHIFT;
|
||||||
body->mcb_units = nrpages;
|
body->mcb_units = nrpages;
|
||||||
|
|
||||||
/* allocate bulk transfer descriptor */
|
/* allocate bulk transfer descriptor */
|
||||||
|
@ -1383,7 +1383,7 @@ again:
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nrpages; i++)
|
for (i = 0; i < nrpages; i++)
|
||||||
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
|
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
|
||||||
|
|
||||||
ptlrpc_request_set_replen(req);
|
ptlrpc_request_set_replen(req);
|
||||||
rc = ptlrpc_queue_wait(req);
|
rc = ptlrpc_queue_wait(req);
|
||||||
|
@ -1411,7 +1411,7 @@ again:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ealen > nrpages << PAGE_CACHE_SHIFT) {
|
if (ealen > nrpages << PAGE_SHIFT) {
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1439,7 +1439,7 @@ again:
|
||||||
|
|
||||||
ptr = kmap(pages[i]);
|
ptr = kmap(pages[i]);
|
||||||
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
|
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
|
||||||
min_t(int, ealen, PAGE_CACHE_SIZE),
|
min_t(int, ealen, PAGE_SIZE),
|
||||||
mne_swab);
|
mne_swab);
|
||||||
kunmap(pages[i]);
|
kunmap(pages[i]);
|
||||||
if (rc2 < 0) {
|
if (rc2 < 0) {
|
||||||
|
@ -1448,7 +1448,7 @@ again:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ealen -= PAGE_CACHE_SIZE;
|
ealen -= PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
|
||||||
/*
|
/*
|
||||||
* XXX for now.
|
* XXX for now.
|
||||||
*/
|
*/
|
||||||
return (loff_t)idx << PAGE_CACHE_SHIFT;
|
return (loff_t)idx << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cl_offset);
|
EXPORT_SYMBOL(cl_offset);
|
||||||
|
|
||||||
|
@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
|
||||||
/*
|
/*
|
||||||
* XXX for now.
|
* XXX for now.
|
||||||
*/
|
*/
|
||||||
return offset >> PAGE_CACHE_SHIFT;
|
return offset >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cl_index);
|
EXPORT_SYMBOL(cl_index);
|
||||||
|
|
||||||
int cl_page_size(const struct cl_object *obj)
|
int cl_page_size(const struct cl_object *obj)
|
||||||
{
|
{
|
||||||
return 1 << PAGE_CACHE_SHIFT;
|
return 1 << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cl_page_size);
|
EXPORT_SYMBOL(cl_page_size);
|
||||||
|
|
||||||
|
|
|
@ -461,9 +461,9 @@ static int obd_init_checks(void)
|
||||||
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
|
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
|
if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
|
||||||
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
|
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
|
||||||
(__u64)PAGE_CACHE_SIZE);
|
(__u64)PAGE_SIZE);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,7 +509,7 @@ static int __init obdclass_init(void)
|
||||||
* For clients with less memory, a larger fraction is needed
|
* For clients with less memory, a larger fraction is needed
|
||||||
* for other purposes (mostly for BGL).
|
* for other purposes (mostly for BGL).
|
||||||
*/
|
*/
|
||||||
if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
|
if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
|
||||||
obd_max_dirty_pages = totalram_pages / 4;
|
obd_max_dirty_pages = totalram_pages / 4;
|
||||||
else
|
else
|
||||||
obd_max_dirty_pages = totalram_pages / 2;
|
obd_max_dirty_pages = totalram_pages / 2;
|
||||||
|
|
|
@ -71,8 +71,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
|
||||||
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
|
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
|
||||||
dst->i_blkbits = ffs(src->o_blksize) - 1;
|
dst->i_blkbits = ffs(src->o_blksize) - 1;
|
||||||
|
|
||||||
if (dst->i_blkbits < PAGE_CACHE_SHIFT)
|
if (dst->i_blkbits < PAGE_SHIFT)
|
||||||
dst->i_blkbits = PAGE_CACHE_SHIFT;
|
dst->i_blkbits = PAGE_SHIFT;
|
||||||
|
|
||||||
/* allocation of space */
|
/* allocation of space */
|
||||||
if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
|
if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
|
||||||
|
|
|
@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
return sprintf(buf, "%ul\n",
|
return sprintf(buf, "%ul\n",
|
||||||
obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
|
obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
|
static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
|
||||||
|
@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
|
val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
|
||||||
|
|
||||||
if (val > ((totalram_pages / 10) * 9)) {
|
if (val > ((totalram_pages / 10) * 9)) {
|
||||||
/* Somebody wants to assign too much memory to dirty pages */
|
/* Somebody wants to assign too much memory to dirty pages */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
|
if (val < 4 << (20 - PAGE_SHIFT)) {
|
||||||
/* Less than 4 Mb for dirty cache is also bad */
|
/* Less than 4 Mb for dirty cache is also bad */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -840,8 +840,8 @@ static int lu_htable_order(void)
|
||||||
|
|
||||||
#if BITS_PER_LONG == 32
|
#if BITS_PER_LONG == 32
|
||||||
/* limit hashtable size for lowmem systems to low RAM */
|
/* limit hashtable size for lowmem systems to low RAM */
|
||||||
if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
|
if (cache_size > 1 << (30 - PAGE_SHIFT))
|
||||||
cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
|
cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* clear off unreasonable cache setting. */
|
/* clear off unreasonable cache setting. */
|
||||||
|
@ -853,7 +853,7 @@ static int lu_htable_order(void)
|
||||||
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
|
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
|
||||||
}
|
}
|
||||||
cache_size = cache_size / 100 * lu_cache_percent *
|
cache_size = cache_size / 100 * lu_cache_percent *
|
||||||
(PAGE_CACHE_SIZE / 1024);
|
(PAGE_SIZE / 1024);
|
||||||
|
|
||||||
for (bits = 1; (1 << bits) < cache_size; ++bits) {
|
for (bits = 1; (1 << bits) < cache_size; ++bits) {
|
||||||
;
|
;
|
||||||
|
|
|
@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
|
||||||
struct page *vmpage = ep->ep_vmpage;
|
struct page *vmpage = ep->ep_vmpage;
|
||||||
|
|
||||||
atomic_dec(&eco->eo_npages);
|
atomic_dec(&eco->eo_npages);
|
||||||
page_cache_release(vmpage);
|
put_page(vmpage);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int echo_page_prep(const struct lu_env *env,
|
static int echo_page_prep(const struct lu_env *env,
|
||||||
|
@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
|
||||||
struct echo_object *eco = cl2echo_obj(obj);
|
struct echo_object *eco = cl2echo_obj(obj);
|
||||||
|
|
||||||
ep->ep_vmpage = vmpage;
|
ep->ep_vmpage = vmpage;
|
||||||
page_cache_get(vmpage);
|
get_page(vmpage);
|
||||||
mutex_init(&ep->ep_lock);
|
mutex_init(&ep->ep_lock);
|
||||||
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
|
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
|
||||||
atomic_inc(&eco->eo_npages);
|
atomic_inc(&eco->eo_npages);
|
||||||
|
@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
|
||||||
LASSERT(rc == 0);
|
LASSERT(rc == 0);
|
||||||
|
|
||||||
rc = cl_echo_enqueue0(env, eco, offset,
|
rc = cl_echo_enqueue0(env, eco, offset,
|
||||||
offset + npages * PAGE_CACHE_SIZE - 1,
|
offset + npages * PAGE_SIZE - 1,
|
||||||
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
|
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
|
||||||
CEF_NEVER);
|
CEF_NEVER);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
|
@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
|
||||||
int delta;
|
int delta;
|
||||||
|
|
||||||
/* no partial pages on the client */
|
/* no partial pages on the client */
|
||||||
LASSERT(count == PAGE_CACHE_SIZE);
|
LASSERT(count == PAGE_SIZE);
|
||||||
|
|
||||||
addr = kmap(page);
|
addr = kmap(page);
|
||||||
|
|
||||||
for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
|
for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
|
||||||
if (rw == OBD_BRW_WRITE) {
|
if (rw == OBD_BRW_WRITE) {
|
||||||
stripe_off = offset + delta;
|
stripe_off = offset + delta;
|
||||||
stripe_id = id;
|
stripe_id = id;
|
||||||
|
@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
|
||||||
int rc2;
|
int rc2;
|
||||||
|
|
||||||
/* no partial pages on the client */
|
/* no partial pages on the client */
|
||||||
LASSERT(count == PAGE_CACHE_SIZE);
|
LASSERT(count == PAGE_SIZE);
|
||||||
|
|
||||||
addr = kmap(page);
|
addr = kmap(page);
|
||||||
|
|
||||||
for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
|
for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
|
||||||
stripe_off = offset + delta;
|
stripe_off = offset + delta;
|
||||||
stripe_id = id;
|
stripe_id = id;
|
||||||
|
|
||||||
|
@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* XXX think again with misaligned I/O */
|
/* XXX think again with misaligned I/O */
|
||||||
npages = count >> PAGE_CACHE_SHIFT;
|
npages = count >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (rw == OBD_BRW_WRITE)
|
if (rw == OBD_BRW_WRITE)
|
||||||
brw_flags = OBD_BRW_ASYNC;
|
brw_flags = OBD_BRW_ASYNC;
|
||||||
|
@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
|
||||||
|
|
||||||
for (i = 0, pgp = pga, off = offset;
|
for (i = 0, pgp = pga, off = offset;
|
||||||
i < npages;
|
i < npages;
|
||||||
i++, pgp++, off += PAGE_CACHE_SIZE) {
|
i++, pgp++, off += PAGE_SIZE) {
|
||||||
|
|
||||||
LASSERT(!pgp->pg); /* for cleanup */
|
LASSERT(!pgp->pg); /* for cleanup */
|
||||||
|
|
||||||
|
@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pages[i] = pgp->pg;
|
pages[i] = pgp->pg;
|
||||||
pgp->count = PAGE_CACHE_SIZE;
|
pgp->count = PAGE_SIZE;
|
||||||
pgp->off = off;
|
pgp->off = off;
|
||||||
pgp->flag = brw_flags;
|
pgp->flag = brw_flags;
|
||||||
|
|
||||||
|
@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
|
||||||
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
|
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
npages = batch >> PAGE_CACHE_SHIFT;
|
npages = batch >> PAGE_SHIFT;
|
||||||
tot_pages = count >> PAGE_CACHE_SHIFT;
|
tot_pages = count >> PAGE_SHIFT;
|
||||||
|
|
||||||
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
|
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
|
||||||
rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
|
rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
|
||||||
|
@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
|
||||||
if (tot_pages < npages)
|
if (tot_pages < npages)
|
||||||
npages = tot_pages;
|
npages = tot_pages;
|
||||||
|
|
||||||
for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
|
for (i = 0; i < npages; i++, off += PAGE_SIZE) {
|
||||||
rnb[i].offset = off;
|
rnb[i].offset = off;
|
||||||
rnb[i].len = PAGE_CACHE_SIZE;
|
rnb[i].len = PAGE_SIZE;
|
||||||
rnb[i].flags = brw_flags;
|
rnb[i].flags = brw_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
|
||||||
{
|
{
|
||||||
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
|
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
|
||||||
|
|
||||||
LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
|
LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
|
||||||
|
|
||||||
return echo_client_init();
|
return echo_client_init();
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
|
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
|
||||||
|
|
||||||
if (pages_number <= 0 ||
|
if (pages_number <= 0 ||
|
||||||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
|
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
|
||||||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
|
pages_number > totalram_pages / 4) /* 1/4 of RAM */
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
client_obd_list_lock(&cli->cl_loi_list_lock);
|
client_obd_list_lock(&cli->cl_loi_list_lock);
|
||||||
cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
|
cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
|
||||||
osc_wake_cache_waiters(cli);
|
osc_wake_cache_waiters(cli);
|
||||||
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct obd_device *dev = m->private;
|
struct obd_device *dev = m->private;
|
||||||
struct client_obd *cli = &dev->u.cli;
|
struct client_obd *cli = &dev->u.cli;
|
||||||
int shift = 20 - PAGE_CACHE_SHIFT;
|
int shift = 20 - PAGE_SHIFT;
|
||||||
|
|
||||||
seq_printf(m,
|
seq_printf(m,
|
||||||
"used_mb: %d\n"
|
"used_mb: %d\n"
|
||||||
|
@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
kernbuf[count] = 0;
|
kernbuf[count] = 0;
|
||||||
|
|
||||||
mult = 1 << (20 - PAGE_CACHE_SHIFT);
|
mult = 1 << (20 - PAGE_SHIFT);
|
||||||
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
|
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
|
||||||
kernbuf;
|
kernbuf;
|
||||||
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
|
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
|
||||||
|
@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
|
||||||
|
|
||||||
/* if the max_pages is specified in bytes, convert to pages */
|
/* if the max_pages is specified in bytes, convert to pages */
|
||||||
if (val >= ONE_MB_BRW_SIZE)
|
if (val >= ONE_MB_BRW_SIZE)
|
||||||
val >>= PAGE_CACHE_SHIFT;
|
val >>= PAGE_SHIFT;
|
||||||
|
|
||||||
chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
|
chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
|
||||||
/* max_pages_per_rpc must be chunk aligned */
|
/* max_pages_per_rpc must be chunk aligned */
|
||||||
val = (val + ~chunk_mask) & chunk_mask;
|
val = (val + ~chunk_mask) & chunk_mask;
|
||||||
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
|
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
}
|
}
|
||||||
client_obd_list_lock(&cli->cl_loi_list_lock);
|
client_obd_list_lock(&cli->cl_loi_list_lock);
|
||||||
|
|
|
@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
LASSERT(cur->oe_osclock == victim->oe_osclock);
|
LASSERT(cur->oe_osclock == victim->oe_osclock);
|
||||||
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
|
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
|
||||||
chunk_start = cur->oe_start >> ppc_bits;
|
chunk_start = cur->oe_start >> ppc_bits;
|
||||||
chunk_end = cur->oe_end >> ppc_bits;
|
chunk_end = cur->oe_end >> ppc_bits;
|
||||||
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
|
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
|
||||||
|
@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
|
||||||
lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
|
lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
|
||||||
LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
|
LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
|
||||||
|
|
||||||
LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
|
LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
|
||||||
ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
|
ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
|
||||||
chunk_mask = ~((1 << ppc_bits) - 1);
|
chunk_mask = ~((1 << ppc_bits) - 1);
|
||||||
chunksize = 1 << cli->cl_chunkbits;
|
chunksize = 1 << cli->cl_chunkbits;
|
||||||
chunk = index >> ppc_bits;
|
chunk = index >> ppc_bits;
|
||||||
|
@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
|
||||||
|
|
||||||
if (!sent) {
|
if (!sent) {
|
||||||
lost_grant = ext->oe_grants;
|
lost_grant = ext->oe_grants;
|
||||||
} else if (blocksize < PAGE_CACHE_SIZE &&
|
} else if (blocksize < PAGE_SIZE &&
|
||||||
last_count != PAGE_CACHE_SIZE) {
|
last_count != PAGE_SIZE) {
|
||||||
/* For short writes we shouldn't count parts of pages that
|
/* For short writes we shouldn't count parts of pages that
|
||||||
* span a whole chunk on the OST side, or our accounting goes
|
* span a whole chunk on the OST side, or our accounting goes
|
||||||
* wrong. Should match the code in filter_grant_check.
|
* wrong. Should match the code in filter_grant_check.
|
||||||
|
@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
|
||||||
if (end)
|
if (end)
|
||||||
count += blocksize - end;
|
count += blocksize - end;
|
||||||
|
|
||||||
lost_grant = PAGE_CACHE_SIZE - count;
|
lost_grant = PAGE_SIZE - count;
|
||||||
}
|
}
|
||||||
if (ext->oe_grants > 0)
|
if (ext->oe_grants > 0)
|
||||||
osc_free_grant(cli, nr_pages, lost_grant);
|
osc_free_grant(cli, nr_pages, lost_grant);
|
||||||
|
@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
|
||||||
struct osc_async_page *oap;
|
struct osc_async_page *oap;
|
||||||
struct osc_async_page *tmp;
|
struct osc_async_page *tmp;
|
||||||
int pages_in_chunk = 0;
|
int pages_in_chunk = 0;
|
||||||
int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
|
int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
|
||||||
__u64 trunc_chunk = trunc_index >> ppc_bits;
|
__u64 trunc_chunk = trunc_index >> ppc_bits;
|
||||||
int grants = 0;
|
int grants = 0;
|
||||||
int nr_pages = 0;
|
int nr_pages = 0;
|
||||||
|
@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
|
||||||
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
|
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
|
||||||
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
|
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
|
||||||
LASSERT(last->oap_count > 0);
|
LASSERT(last->oap_count > 0);
|
||||||
LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
|
LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
|
||||||
last->oap_async_flags |= ASYNC_COUNT_STABLE;
|
last->oap_async_flags |= ASYNC_COUNT_STABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
|
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
|
||||||
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
|
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
|
||||||
oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
|
oap->oap_count = PAGE_SIZE - oap->oap_page_off;
|
||||||
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
|
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
|
||||||
struct osc_object *obj = ext->oe_obj;
|
struct osc_object *obj = ext->oe_obj;
|
||||||
struct client_obd *cli = osc_cli(obj);
|
struct client_obd *cli = osc_cli(obj);
|
||||||
struct osc_extent *next;
|
struct osc_extent *next;
|
||||||
int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
|
int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
|
||||||
pgoff_t chunk = index >> ppc_bits;
|
pgoff_t chunk = index >> ppc_bits;
|
||||||
pgoff_t end_chunk;
|
pgoff_t end_chunk;
|
||||||
pgoff_t end_index;
|
pgoff_t end_index;
|
||||||
|
@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
|
||||||
return 0;
|
return 0;
|
||||||
else if (cl_offset(obj, page->cp_index + 1) > kms)
|
else if (cl_offset(obj, page->cp_index + 1) > kms)
|
||||||
/* catch sub-page write at end of file */
|
/* catch sub-page write at end of file */
|
||||||
return kms % PAGE_CACHE_SIZE;
|
return kms % PAGE_SIZE;
|
||||||
else
|
else
|
||||||
return PAGE_CACHE_SIZE;
|
return PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
|
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
|
||||||
|
@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
|
||||||
assert_spin_locked(&cli->cl_loi_list_lock.lock);
|
assert_spin_locked(&cli->cl_loi_list_lock.lock);
|
||||||
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
|
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
|
||||||
atomic_inc(&obd_dirty_pages);
|
atomic_inc(&obd_dirty_pages);
|
||||||
cli->cl_dirty += PAGE_CACHE_SIZE;
|
cli->cl_dirty += PAGE_SIZE;
|
||||||
pga->flag |= OBD_BRW_FROM_GRANT;
|
pga->flag |= OBD_BRW_FROM_GRANT;
|
||||||
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
|
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
|
||||||
PAGE_CACHE_SIZE, pga, pga->pg);
|
PAGE_SIZE, pga, pga->pg);
|
||||||
osc_update_next_shrink(cli);
|
osc_update_next_shrink(cli);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
|
||||||
|
|
||||||
pga->flag &= ~OBD_BRW_FROM_GRANT;
|
pga->flag &= ~OBD_BRW_FROM_GRANT;
|
||||||
atomic_dec(&obd_dirty_pages);
|
atomic_dec(&obd_dirty_pages);
|
||||||
cli->cl_dirty -= PAGE_CACHE_SIZE;
|
cli->cl_dirty -= PAGE_SIZE;
|
||||||
if (pga->flag & OBD_BRW_NOCACHE) {
|
if (pga->flag & OBD_BRW_NOCACHE) {
|
||||||
pga->flag &= ~OBD_BRW_NOCACHE;
|
pga->flag &= ~OBD_BRW_NOCACHE;
|
||||||
atomic_dec(&obd_dirty_transit_pages);
|
atomic_dec(&obd_dirty_transit_pages);
|
||||||
cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
|
cli->cl_dirty_transit -= PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
|
||||||
|
|
||||||
client_obd_list_lock(&cli->cl_loi_list_lock);
|
client_obd_list_lock(&cli->cl_loi_list_lock);
|
||||||
atomic_sub(nr_pages, &obd_dirty_pages);
|
atomic_sub(nr_pages, &obd_dirty_pages);
|
||||||
cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
|
cli->cl_dirty -= nr_pages << PAGE_SHIFT;
|
||||||
cli->cl_lost_grant += lost_grant;
|
cli->cl_lost_grant += lost_grant;
|
||||||
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
|
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
|
||||||
/* borrow some grant from truncate to avoid the case that
|
/* borrow some grant from truncate to avoid the case that
|
||||||
|
@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
|
if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
|
||||||
atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
|
atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
|
||||||
osc_consume_write_grant(cli, &oap->oap_brw_page);
|
osc_consume_write_grant(cli, &oap->oap_brw_page);
|
||||||
if (transient) {
|
if (transient) {
|
||||||
cli->cl_dirty_transit += PAGE_CACHE_SIZE;
|
cli->cl_dirty_transit += PAGE_SIZE;
|
||||||
atomic_inc(&obd_dirty_transit_pages);
|
atomic_inc(&obd_dirty_transit_pages);
|
||||||
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
|
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
|
||||||
}
|
}
|
||||||
|
@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
|
||||||
* of queued writes and create a discontiguous rpc stream
|
* of queued writes and create a discontiguous rpc stream
|
||||||
*/
|
*/
|
||||||
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
|
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
|
||||||
cli->cl_dirty_max < PAGE_CACHE_SIZE ||
|
cli->cl_dirty_max < PAGE_SIZE ||
|
||||||
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
|
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
|
||||||
rc = -EDQUOT;
|
rc = -EDQUOT;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
|
||||||
|
|
||||||
ocw->ocw_rc = -EDQUOT;
|
ocw->ocw_rc = -EDQUOT;
|
||||||
/* we can't dirty more */
|
/* we can't dirty more */
|
||||||
if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
|
if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
|
||||||
(atomic_read(&obd_dirty_pages) + 1 >
|
(atomic_read(&obd_dirty_pages) + 1 >
|
||||||
obd_max_dirty_pages)) {
|
obd_max_dirty_pages)) {
|
||||||
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
|
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
|
||||||
|
|
|
@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
opg->ops_from = 0;
|
opg->ops_from = 0;
|
||||||
opg->ops_to = PAGE_CACHE_SIZE;
|
opg->ops_to = PAGE_SIZE;
|
||||||
|
|
||||||
result = osc_prep_async_page(osc, opg, vmpage,
|
result = osc_prep_async_page(osc, opg, vmpage,
|
||||||
cl_offset(obj, page->cp_index));
|
cl_offset(obj, page->cp_index));
|
||||||
|
@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
|
||||||
/* LRU pages are freed in batch mode. OSC should at least free this
|
/* LRU pages are freed in batch mode. OSC should at least free this
|
||||||
* number of pages to avoid running out of LRU budget, and..
|
* number of pages to avoid running out of LRU budget, and..
|
||||||
*/
|
*/
|
||||||
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
|
static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
|
||||||
/* free this number at most otherwise it will take too long time to finish. */
|
/* free this number at most otherwise it will take too long time to finish. */
|
||||||
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
|
static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
|
||||||
|
|
||||||
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
|
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
|
||||||
* we should free slots aggressively. In this way, slots are freed in a steady
|
* we should free slots aggressively. In this way, slots are freed in a steady
|
||||||
|
|
|
@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
|
||||||
oa->o_undirty = 0;
|
oa->o_undirty = 0;
|
||||||
} else {
|
} else {
|
||||||
long max_in_flight = (cli->cl_max_pages_per_rpc <<
|
long max_in_flight = (cli->cl_max_pages_per_rpc <<
|
||||||
PAGE_CACHE_SHIFT)*
|
PAGE_SHIFT)*
|
||||||
(cli->cl_max_rpcs_in_flight + 1);
|
(cli->cl_max_rpcs_in_flight + 1);
|
||||||
oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
|
oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
|
||||||
}
|
}
|
||||||
|
@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
|
||||||
static int osc_shrink_grant(struct client_obd *cli)
|
static int osc_shrink_grant(struct client_obd *cli)
|
||||||
{
|
{
|
||||||
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
|
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
|
||||||
(cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
|
(cli->cl_max_pages_per_rpc << PAGE_SHIFT);
|
||||||
|
|
||||||
client_obd_list_lock(&cli->cl_loi_list_lock);
|
client_obd_list_lock(&cli->cl_loi_list_lock);
|
||||||
if (cli->cl_avail_grant <= target_bytes)
|
if (cli->cl_avail_grant <= target_bytes)
|
||||||
target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
|
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
|
||||||
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
||||||
|
|
||||||
return osc_shrink_grant_to_target(cli, target_bytes);
|
return osc_shrink_grant_to_target(cli, target_bytes);
|
||||||
|
@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
|
||||||
* We don't want to shrink below a single RPC, as that will negatively
|
* We don't want to shrink below a single RPC, as that will negatively
|
||||||
* impact block allocation and long-term performance.
|
* impact block allocation and long-term performance.
|
||||||
*/
|
*/
|
||||||
if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
|
if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
|
||||||
target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
|
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
|
||||||
|
|
||||||
if (target_bytes >= cli->cl_avail_grant) {
|
if (target_bytes >= cli->cl_avail_grant) {
|
||||||
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
||||||
|
@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
|
||||||
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
|
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
|
||||||
* Keep comment here so that it can be found by searching.
|
* Keep comment here so that it can be found by searching.
|
||||||
*/
|
*/
|
||||||
int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
|
int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
|
||||||
|
|
||||||
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
|
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
|
||||||
client->cl_avail_grant > brw_size)
|
client->cl_avail_grant > brw_size)
|
||||||
|
@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* determine the appropriate chunk size used by osc_extent. */
|
/* determine the appropriate chunk size used by osc_extent. */
|
||||||
cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
|
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
|
||||||
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
client_obd_list_unlock(&cli->cl_loi_list_lock);
|
||||||
|
|
||||||
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
|
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
|
||||||
|
@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
|
||||||
LASSERT(pg->count > 0);
|
LASSERT(pg->count > 0);
|
||||||
/* make sure there is no gap in the middle of page array */
|
/* make sure there is no gap in the middle of page array */
|
||||||
LASSERTF(page_count == 1 ||
|
LASSERTF(page_count == 1 ||
|
||||||
(ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
|
(ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
|
||||||
ergo(i > 0 && i < page_count - 1,
|
ergo(i > 0 && i < page_count - 1,
|
||||||
poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
|
poff == 0 && pg->count == PAGE_SIZE) &&
|
||||||
ergo(i == page_count - 1, poff == 0)),
|
ergo(i == page_count - 1, poff == 0)),
|
||||||
"i: %d/%d pg: %p off: %llu, count: %u\n",
|
"i: %d/%d pg: %p off: %llu, count: %u\n",
|
||||||
i, page_count, pg, pg->off, pg->count);
|
i, page_count, pg, pg->off, pg->count);
|
||||||
|
@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
|
||||||
oap->oap_count;
|
oap->oap_count;
|
||||||
else
|
else
|
||||||
LASSERT(oap->oap_page_off + oap->oap_count ==
|
LASSERT(oap->oap_page_off + oap->oap_count ==
|
||||||
PAGE_CACHE_SIZE);
|
PAGE_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
|
||||||
tmp->oap_request = ptlrpc_request_addref(req);
|
tmp->oap_request = ptlrpc_request_addref(req);
|
||||||
|
|
||||||
client_obd_list_lock(&cli->cl_loi_list_lock);
|
client_obd_list_lock(&cli->cl_loi_list_lock);
|
||||||
starting_offset >>= PAGE_CACHE_SHIFT;
|
starting_offset >>= PAGE_SHIFT;
|
||||||
if (cmd == OBD_BRW_READ) {
|
if (cmd == OBD_BRW_READ) {
|
||||||
cli->cl_r_in_flight++;
|
cli->cl_r_in_flight++;
|
||||||
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
|
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
|
||||||
|
@ -2790,12 +2790,12 @@ out:
|
||||||
CFS_PAGE_MASK;
|
CFS_PAGE_MASK;
|
||||||
|
|
||||||
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
|
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
|
||||||
fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
|
fm_key->fiemap.fm_start + PAGE_SIZE - 1)
|
||||||
policy.l_extent.end = OBD_OBJECT_EOF;
|
policy.l_extent.end = OBD_OBJECT_EOF;
|
||||||
else
|
else
|
||||||
policy.l_extent.end = (fm_key->fiemap.fm_start +
|
policy.l_extent.end = (fm_key->fiemap.fm_start +
|
||||||
fm_key->fiemap.fm_length +
|
fm_key->fiemap.fm_length +
|
||||||
PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
|
PAGE_SIZE - 1) & CFS_PAGE_MASK;
|
||||||
|
|
||||||
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
|
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
|
||||||
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
|
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
|
||||||
|
|
|
@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
|
||||||
LASSERT(page);
|
LASSERT(page);
|
||||||
LASSERT(pageoffset >= 0);
|
LASSERT(pageoffset >= 0);
|
||||||
LASSERT(len > 0);
|
LASSERT(len > 0);
|
||||||
LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
|
LASSERT(pageoffset + len <= PAGE_SIZE);
|
||||||
|
|
||||||
desc->bd_nob += len;
|
desc->bd_nob += len;
|
||||||
|
|
||||||
if (pin)
|
if (pin)
|
||||||
page_cache_get(page);
|
get_page(page);
|
||||||
|
|
||||||
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
|
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
|
||||||
|
|
||||||
if (unpin) {
|
if (unpin) {
|
||||||
for (i = 0; i < desc->bd_iov_count; i++)
|
for (i = 0; i < desc->bd_iov_count; i++)
|
||||||
page_cache_release(desc->bd_iov[i].kiov_page);
|
put_page(desc->bd_iov[i].kiov_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(desc);
|
kfree(desc);
|
||||||
|
|
|
@ -1092,7 +1092,7 @@ finish:
|
||||||
|
|
||||||
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
|
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
|
||||||
cli->cl_max_pages_per_rpc =
|
cli->cl_max_pages_per_rpc =
|
||||||
min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
|
min(ocd->ocd_brw_size >> PAGE_SHIFT,
|
||||||
cli->cl_max_pages_per_rpc);
|
cli->cl_max_pages_per_rpc);
|
||||||
else if (imp->imp_connect_op == MDS_CONNECT ||
|
else if (imp->imp_connect_op == MDS_CONNECT ||
|
||||||
imp->imp_connect_op == MGS_CONNECT)
|
imp->imp_connect_op == MGS_CONNECT)
|
||||||
|
|
|
@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
|
||||||
* hose a kernel by allowing the request history to grow too
|
* hose a kernel by allowing the request history to grow too
|
||||||
* far.
|
* far.
|
||||||
*/
|
*/
|
||||||
bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
if (val > totalram_pages / (2 * bufpages))
|
if (val > totalram_pages / (2 * bufpages))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
|
@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
|
||||||
const char prefix[] = "connection=";
|
const char prefix[] = "connection=";
|
||||||
const int prefix_len = sizeof(prefix) - 1;
|
const int prefix_len = sizeof(prefix) - 1;
|
||||||
|
|
||||||
if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
|
if (count > PAGE_SIZE - 1 || count <= prefix_len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
kbuf = kzalloc(count + 1, GFP_NOFS);
|
kbuf = kzalloc(count + 1, GFP_NOFS);
|
||||||
|
|
|
@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
|
list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
|
||||||
LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
|
LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
|
||||||
"req %p bad\n", req);
|
"req %p bad\n", req);
|
||||||
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
|
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
|
||||||
if (!ptlrpc_no_resend(req))
|
if (!ptlrpc_no_resend(req))
|
||||||
|
|
|
@ -58,7 +58,7 @@
|
||||||
* bulk encryption page pools *
|
* bulk encryption page pools *
|
||||||
****************************************/
|
****************************************/
|
||||||
|
|
||||||
#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
|
#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
|
||||||
#define PAGES_PER_POOL (POINTERS_PER_PAGE)
|
#define PAGES_PER_POOL (POINTERS_PER_PAGE)
|
||||||
|
|
||||||
#define IDLE_IDX_MAX (100)
|
#define IDLE_IDX_MAX (100)
|
||||||
|
|
|
@ -1147,8 +1147,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
|
||||||
ffs->sb = sb;
|
ffs->sb = sb;
|
||||||
data->ffs_data = NULL;
|
data->ffs_data = NULL;
|
||||||
sb->s_fs_info = ffs;
|
sb->s_fs_info = ffs;
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
sb->s_magic = FUNCTIONFS_MAGIC;
|
sb->s_magic = FUNCTIONFS_MAGIC;
|
||||||
sb->s_op = &ffs_sb_operations;
|
sb->s_op = &ffs_sb_operations;
|
||||||
sb->s_time_gran = 1;
|
sb->s_time_gran = 1;
|
||||||
|
|
|
@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* superblock */
|
/* superblock */
|
||||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
sb->s_blocksize_bits = PAGE_SHIFT;
|
||||||
sb->s_magic = GADGETFS_MAGIC;
|
sb->s_magic = GADGETFS_MAGIC;
|
||||||
sb->s_op = &gadget_fs_operations;
|
sb->s_op = &gadget_fs_operations;
|
||||||
sb->s_time_gran = 1;
|
sb->s_time_gran = 1;
|
||||||
|
|
|
@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
|
||||||
unsigned int max_sectors = 64;
|
unsigned int max_sectors = 64;
|
||||||
|
|
||||||
if (us->fflags & US_FL_MAX_SECTORS_MIN)
|
if (us->fflags & US_FL_MAX_SECTORS_MIN)
|
||||||
max_sectors = PAGE_CACHE_SIZE >> 9;
|
max_sectors = PAGE_SIZE >> 9;
|
||||||
if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
|
if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
|
||||||
blk_queue_max_hw_sectors(sdev->request_queue,
|
blk_queue_max_hw_sectors(sdev->request_queue,
|
||||||
max_sectors);
|
max_sectors);
|
||||||
|
|
|
@ -735,7 +735,7 @@ out:
|
||||||
|
|
||||||
out_unmap:
|
out_unmap:
|
||||||
for (i = 0; i < nr_pages; i++)
|
for (i = 0; i < nr_pages; i++)
|
||||||
page_cache_release(pages[i]);
|
put_page(pages[i]);
|
||||||
|
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
|
||||||
* If called with zero offset, we should release
|
* If called with zero offset, we should release
|
||||||
* the private state assocated with the page
|
* the private state assocated with the page
|
||||||
*/
|
*/
|
||||||
if (offset == 0 && length == PAGE_CACHE_SIZE)
|
if (offset == 0 && length == PAGE_SIZE)
|
||||||
v9fs_fscache_invalidate_page(page);
|
v9fs_fscache_invalidate_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page)
|
||||||
struct bio_vec bvec;
|
struct bio_vec bvec;
|
||||||
int err, len;
|
int err, len;
|
||||||
|
|
||||||
if (page->index == size >> PAGE_CACHE_SHIFT)
|
if (page->index == size >> PAGE_SHIFT)
|
||||||
len = size & ~PAGE_CACHE_MASK;
|
len = size & ~PAGE_MASK;
|
||||||
else
|
else
|
||||||
len = PAGE_CACHE_SIZE;
|
len = PAGE_SIZE;
|
||||||
|
|
||||||
bvec.bv_page = page;
|
bvec.bv_page = page;
|
||||||
bvec.bv_offset = 0;
|
bvec.bv_offset = 0;
|
||||||
|
@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct v9fs_inode *v9inode;
|
struct v9fs_inode *v9inode;
|
||||||
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
|
pgoff_t index = pos >> PAGE_SHIFT;
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
|
|
||||||
|
|
||||||
|
@ -288,11 +288,11 @@ start:
|
||||||
if (PageUptodate(page))
|
if (PageUptodate(page))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (len == PAGE_CACHE_SIZE)
|
if (len == PAGE_SIZE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
|
retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
if (!retval)
|
if (!retval)
|
||||||
goto start;
|
goto start;
|
||||||
out:
|
out:
|
||||||
|
@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
|
||||||
/*
|
/*
|
||||||
* zero out the rest of the area
|
* zero out the rest of the area
|
||||||
*/
|
*/
|
||||||
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
|
unsigned from = pos & (PAGE_SIZE - 1);
|
||||||
|
|
||||||
zero_user(page, from + copied, len - copied);
|
zero_user(page, from + copied, len - copied);
|
||||||
flush_dcache_page(page);
|
flush_dcache_page(page);
|
||||||
|
@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
|
||||||
}
|
}
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
page_cache_release(page);
|
put_page(page);
|
||||||
|
|
||||||
return copied;
|
return copied;
|
||||||
}
|
}
|
||||||
|
|
|
@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||||
struct inode *inode = file_inode(file);
|
struct inode *inode = file_inode(file);
|
||||||
loff_t i_size;
|
loff_t i_size;
|
||||||
unsigned long pg_start, pg_end;
|
unsigned long pg_start, pg_end;
|
||||||
pg_start = origin >> PAGE_CACHE_SHIFT;
|
pg_start = origin >> PAGE_SHIFT;
|
||||||
pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
|
pg_end = (origin + retval - 1) >> PAGE_SHIFT;
|
||||||
if (inode->i_mapping && inode->i_mapping->nrpages)
|
if (inode->i_mapping && inode->i_mapping->nrpages)
|
||||||
invalidate_inode_pages2_range(inode->i_mapping,
|
invalidate_inode_pages2_range(inode->i_mapping,
|
||||||
pg_start, pg_end);
|
pg_start, pg_end);
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче