ide-tape: improve buffer allocation strategy

Instead of allocating pages for the buffer one by one, take advantage of the
buddy alloc system and request them 2^order at a time. This increases the chance
for bigger buffer parts to be contigious and reduces loop iteration count. While
at it, rename function __idetape_kmalloc_stage() to ide_tape_kmalloc_buffer().

[bart: fold with "ide-tape: fix mem leak" patch to preserve bisectability]

Signed-off-by: Borislav Petkov <petkovbb@gmail.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
This commit is contained in:
Borislav Petkov 2008-04-27 15:38:32 +02:00 коммит произвёл Bartlomiej Zolnierkiewicz
Родитель 4c3032d8a4
Коммит 41aa17069e
1 изменённых файлов: 37 добавлений и 23 удалений

Просмотреть файл

@ -1290,20 +1290,20 @@ out:
} }
/* /*
* The function below uses __get_free_page to allocate a pipeline stage, along * The function below uses __get_free_pages to allocate a data buffer of size
* with all the necessary small buffers which together make a buffer of size
* tape->stage_size (or a bit more). We attempt to combine sequential pages as * tape->stage_size (or a bit more). We attempt to combine sequential pages as
* much as possible. * much as possible.
* *
* It returns a pointer to the new allocated stage, or NULL if we can't (or * It returns a pointer to the newly allocated buffer, or NULL in case of
* don't want to) allocate a stage. * failure.
*/ */
static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full, static idetape_stage_t *ide_tape_kmalloc_buffer(idetape_tape_t *tape, int full,
int clear) int clear)
{ {
idetape_stage_t *stage; idetape_stage_t *stage;
struct idetape_bh *prev_bh, *bh; struct idetape_bh *prev_bh, *bh;
int pages = tape->pages_per_stage; int pages = tape->pages_per_stage;
unsigned int order, b_allocd;
char *b_data = NULL; char *b_data = NULL;
stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL); stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
@ -1315,46 +1315,60 @@ static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
bh = stage->bh; bh = stage->bh;
if (bh == NULL) if (bh == NULL)
goto abort; goto abort;
bh->b_reqnext = NULL;
bh->b_data = (char *) __get_free_page(GFP_KERNEL); order = fls(pages) - 1;
bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
if (!bh->b_data) if (!bh->b_data)
goto abort; goto abort;
b_allocd = (1 << order) * PAGE_SIZE;
pages &= (order-1);
if (clear) if (clear)
memset(bh->b_data, 0, PAGE_SIZE); memset(bh->b_data, 0, b_allocd);
bh->b_size = PAGE_SIZE; bh->b_reqnext = NULL;
bh->b_size = b_allocd;
atomic_set(&bh->b_count, full ? bh->b_size : 0); atomic_set(&bh->b_count, full ? bh->b_size : 0);
while (--pages) { while (pages) {
b_data = (char *) __get_free_page(GFP_KERNEL); order = fls(pages) - 1;
b_data = (char *) __get_free_pages(GFP_KERNEL, order);
if (!b_data) if (!b_data)
goto abort; goto abort;
b_allocd = (1 << order) * PAGE_SIZE;
if (clear) if (clear)
memset(b_data, 0, PAGE_SIZE); memset(b_data, 0, b_allocd);
if (bh->b_data == b_data + PAGE_SIZE) {
bh->b_size += PAGE_SIZE; /* newly allocated page frames below buffer header or ...*/
bh->b_data -= PAGE_SIZE; if (bh->b_data == b_data + b_allocd) {
bh->b_size += b_allocd;
bh->b_data -= b_allocd;
if (full) if (full)
atomic_add(PAGE_SIZE, &bh->b_count); atomic_add(b_allocd, &bh->b_count);
continue; continue;
} }
/* they are above the header */
if (b_data == bh->b_data + bh->b_size) { if (b_data == bh->b_data + bh->b_size) {
bh->b_size += PAGE_SIZE; bh->b_size += b_allocd;
if (full) if (full)
atomic_add(PAGE_SIZE, &bh->b_count); atomic_add(b_allocd, &bh->b_count);
continue; continue;
} }
prev_bh = bh; prev_bh = bh;
bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
if (!bh) { if (!bh) {
free_page((unsigned long) b_data); free_pages((unsigned long) b_data, order);
goto abort; goto abort;
} }
bh->b_reqnext = NULL; bh->b_reqnext = NULL;
bh->b_data = b_data; bh->b_data = b_data;
bh->b_size = PAGE_SIZE; bh->b_size = b_allocd;
atomic_set(&bh->b_count, full ? bh->b_size : 0); atomic_set(&bh->b_count, full ? bh->b_size : 0);
prev_bh->b_reqnext = bh; prev_bh->b_reqnext = bh;
pages &= (order-1);
} }
bh->b_size -= tape->excess_bh_size; bh->b_size -= tape->excess_bh_size;
if (full) if (full)
atomic_sub(tape->excess_bh_size, &bh->b_count); atomic_sub(tape->excess_bh_size, &bh->b_count);
@ -1837,7 +1851,7 @@ static int idetape_init_read(ide_drive_t *drive)
" 0 now\n"); " 0 now\n");
tape->merge_stage_size = 0; tape->merge_stage_size = 0;
} }
tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); tape->merge_stage = ide_tape_kmalloc_buffer(tape, 0, 0);
if (!tape->merge_stage) if (!tape->merge_stage)
return -ENOMEM; return -ENOMEM;
tape->chrdev_dir = IDETAPE_DIR_READ; tape->chrdev_dir = IDETAPE_DIR_READ;
@ -2115,7 +2129,7 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
"should be 0 now\n"); "should be 0 now\n");
tape->merge_stage_size = 0; tape->merge_stage_size = 0;
} }
tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); tape->merge_stage = ide_tape_kmalloc_buffer(tape, 0, 0);
if (!tape->merge_stage) if (!tape->merge_stage)
return -ENOMEM; return -ENOMEM;
tape->chrdev_dir = IDETAPE_DIR_WRITE; tape->chrdev_dir = IDETAPE_DIR_WRITE;
@ -2495,7 +2509,7 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
idetape_tape_t *tape = drive->driver_data; idetape_tape_t *tape = drive->driver_data;
idetape_empty_write_pipeline(drive); idetape_empty_write_pipeline(drive);
tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0); tape->merge_stage = ide_tape_kmalloc_buffer(tape, 1, 0);
if (tape->merge_stage != NULL) { if (tape->merge_stage != NULL) {
idetape_pad_zeros(drive, tape->blk_size * idetape_pad_zeros(drive, tape->blk_size *
(tape->user_bs_factor - 1)); (tape->user_bs_factor - 1));