readahead: compacting file_ra_state
Use 'unsigned int' instead of 'unsigned long' for readahead sizes. This helps reduce memory consumption on 64bit CPU when a lot of files are opened. CC: Andi Kleen <andi@firstfloor.org> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
43fac94dd6
Коммит
937085aa35
|
@ -697,12 +697,12 @@ struct fown_struct {
|
|||
* Track a single file's readahead state
|
||||
*/
|
||||
struct file_ra_state {
|
||||
pgoff_t start; /* where readahead started */
|
||||
unsigned long size; /* # of readahead pages */
|
||||
unsigned long async_size; /* do asynchronous readahead when
|
||||
pgoff_t start; /* where readahead started */
|
||||
unsigned int size; /* # of readahead pages */
|
||||
unsigned int async_size; /* do asynchronous readahead when
|
||||
there are only # of pages ahead */
|
||||
|
||||
unsigned long ra_pages; /* Maximum readahead window */
|
||||
unsigned int ra_pages; /* Maximum readahead window */
|
||||
unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
|
||||
unsigned long mmap_miss; /* Cache miss stat for mmap accesses */
|
||||
unsigned long prev_index; /* Cache last read() position */
|
||||
|
|
|
@ -351,7 +351,7 @@ ondemand_readahead(struct address_space *mapping,
|
|||
bool hit_readahead_marker, pgoff_t offset,
|
||||
unsigned long req_size)
|
||||
{
|
||||
unsigned long max; /* max readahead pages */
|
||||
int max; /* max readahead pages */
|
||||
int sequential;
|
||||
|
||||
max = ra->ra_pages;
|
||||
|
|
Загрузка…
Ссылка в новой задаче