direct-io: use a slab cache for struct dio

A direct slab call is slightly faster than kmalloc and can be better cached
per CPU. It also avoids rounding to the next kmalloc slab.

In addition this enforces cache line alignment for struct dio to avoid
any false sharing.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Andi Kleen 2011-08-01 21:38:06 -07:00 коммит произвёл root
Родитель 0dc2bc49be
Коммит 6e8267f532
1 изменённых файлов: 14 добавлений и 5 удалений

Просмотреть файл

@ -140,7 +140,9 @@ struct dio {
* wish that they not be zeroed.
*/
struct page *pages[DIO_PAGES]; /* page buffer */
};
} ____cacheline_aligned_in_smp;
static struct kmem_cache *dio_cache __read_mostly;
static void __inode_dio_wait(struct inode *inode)
{
@ -330,7 +332,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
if (remaining == 0) {
dio_complete(dio, dio->iocb->ki_pos, 0, true);
kfree(dio);
kmem_cache_free(dio_cache, dio);
}
}
@ -1180,7 +1182,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
if (ret2 == 0) {
ret = dio_complete(dio, offset, ret, false);
kfree(dio);
kmem_cache_free(dio_cache, dio);
} else
BUG_ON(ret != -EIOCBQUEUED);
@ -1256,7 +1258,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (rw == READ && end == offset)
return 0;
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;
@ -1280,7 +1282,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
end - 1);
if (retval) {
mutex_unlock(&inode->i_mutex);
kfree(dio);
kmem_cache_free(dio_cache, dio);
goto out;
}
}
@ -1308,3 +1310,10 @@ out:
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO);
static __init int dio_init(void)
{
dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
return 0;
}
module_init(dio_init)