mm/slab: limit kmalloc() minimum alignment to dma_get_cache_alignment()
Do not create kmalloc() caches which are not aligned to dma_get_cache_alignment(). There is no functional change since for current architectures defining ARCH_DMA_MINALIGN, ARCH_KMALLOC_MINALIGN equals ARCH_DMA_MINALIGN (and dma_get_cache_alignment()). On architectures without a specific ARCH_DMA_MINALIGN, dma_get_cache_alignment() is 1, so no change to the kmalloc() caches. Link: https://lkml.kernel.org/r/20230612153201.554742-5-catalin.marinas@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Isaac J. Manjarres <isaacmanjarres@google.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Jerry Snitselaar <jsnitsel@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jonathan Cameron <jic23@kernel.org> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Lars-Peter Clausen <lars@metafoo.de> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Mike Snitzer <snitzer@kernel.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: Saravana Kannan <saravanak@google.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Родитель
0c474d31a6
Коммит
963e84b0f2
|
@ -17,6 +17,7 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kasan.h>
|
||||
|
@ -862,9 +863,18 @@ void __init setup_kmalloc_cache_index_table(void)
|
|||
}
|
||||
}
|
||||
|
||||
static unsigned int __kmalloc_minalign(void)
|
||||
{
|
||||
return dma_get_cache_alignment();
|
||||
}
|
||||
|
||||
void __init
|
||||
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
|
||||
{
|
||||
unsigned int minalign = __kmalloc_minalign();
|
||||
unsigned int aligned_size = kmalloc_info[idx].size;
|
||||
int aligned_idx = idx;
|
||||
|
||||
if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
|
||||
flags |= SLAB_RECLAIM_ACCOUNT;
|
||||
} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
|
||||
|
@ -877,9 +887,17 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
|
|||
flags |= SLAB_CACHE_DMA;
|
||||
}
|
||||
|
||||
kmalloc_caches[type][idx] = create_kmalloc_cache(
|
||||
kmalloc_info[idx].name[type],
|
||||
kmalloc_info[idx].size, flags);
|
||||
if (minalign > ARCH_KMALLOC_MINALIGN) {
|
||||
aligned_size = ALIGN(aligned_size, minalign);
|
||||
aligned_idx = __kmalloc_index(aligned_size, false);
|
||||
}
|
||||
|
||||
if (!kmalloc_caches[type][aligned_idx])
|
||||
kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
|
||||
kmalloc_info[aligned_idx].name[type],
|
||||
aligned_size, flags);
|
||||
if (idx != aligned_idx)
|
||||
kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
|
||||
|
||||
/*
|
||||
* If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
|
||||
|
|
Загрузка…
Ссылка в новой задаче