cma: factor out minimum alignment requirement
[ Upstream commit e16faf2678
]
Patch series "mm: enforce pageblock_order < MAX_ORDER".
Having pageblock_order >= MAX_ORDER seems to be able to happen in corner
cases and some parts of the kernel are not prepared for it.
For example, Aneesh has shown [1] that such kernels can be compiled on
ppc64 with 64k base pages by setting FORCE_MAX_ZONEORDER=8, which will
run into a WARN_ON_ONCE(order >= MAX_ORDER) in comapction code right
during boot.
We can get pageblock_order >= MAX_ORDER when the default hugetlb size is
bigger than the maximum allocation granularity of the buddy, in which
case we are no longer talking about huge pages but instead gigantic
pages.
Having pageblock_order >= MAX_ORDER can only make alloc_contig_range()
of such gigantic pages more likely to succeed.
Reliable use of gigantic pages either requires boot time allcoation or
CMA, no need to overcomplicate some places in the kernel to optimize for
corner cases that are broken in other areas of the kernel.
This patch (of 2):
Let's enforce pageblock_order < MAX_ORDER and simplify.
Especially patch #1 can be regarded a cleanup before:
[PATCH v5 0/6] Use pageblock_order for cma and alloc_contig_range
alignment. [2]
[1] https://lkml.kernel.org/r/87r189a2ks.fsf@linux.ibm.com
[2] https://lkml.kernel.org/r/20220211164135.1803616-1-zi.yan@sent.com
Link: https://lkml.kernel.org/r/20220214174132.219303-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Rob Herring <robh@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: John Garry via iommu <iommu@lists.linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Stable-dep-of: b174f139bdc8 ("mm/cma: drop incorrect alignment check in cma_init_reserved_mem")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Родитель
8dd8de424f
Коммит
51163bfef6
|
@ -19,11 +19,6 @@
|
|||
|
||||
#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
|
||||
|
||||
/* Alignment per CMA requirement. */
|
||||
#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \
|
||||
max_t(unsigned long, MAX_ORDER - 1, \
|
||||
pageblock_order))
|
||||
|
||||
/* FAD commands */
|
||||
#define FADUMP_REGISTER 1
|
||||
#define FADUMP_UNREGISTER 2
|
||||
|
|
|
@ -544,7 +544,7 @@ int __init fadump_reserve_mem(void)
|
|||
if (!fw_dump.nocma) {
|
||||
fw_dump.boot_memory_size =
|
||||
ALIGN(fw_dump.boot_memory_size,
|
||||
FADUMP_CMA_ALIGNMENT);
|
||||
CMA_MIN_ALIGNMENT_BYTES);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/cma.h>
|
||||
|
||||
#include "of_private.h"
|
||||
|
||||
|
@ -117,12 +118,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
|
|||
if (IS_ENABLED(CONFIG_CMA)
|
||||
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
|
||||
&& of_get_flat_dt_prop(node, "reusable", NULL)
|
||||
&& !nomap) {
|
||||
unsigned long order =
|
||||
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
|
||||
|
||||
align = max(align, (phys_addr_t)PAGE_SIZE << order);
|
||||
}
|
||||
&& !nomap)
|
||||
align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
|
||||
|
||||
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
|
||||
if (prop) {
|
||||
|
|
|
@ -20,6 +20,15 @@
|
|||
|
||||
#define CMA_MAX_NAME 64
|
||||
|
||||
/*
|
||||
* TODO: once the buddy -- especially pageblock merging and alloc_contig_range()
|
||||
* -- can deal with only some pageblocks of a higher-order page being
|
||||
* MIGRATE_CMA, we can use pageblock_nr_pages.
|
||||
*/
|
||||
#define CMA_MIN_ALIGNMENT_PAGES max_t(phys_addr_t, MAX_ORDER_NR_PAGES, \
|
||||
pageblock_nr_pages)
|
||||
#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
|
||||
|
||||
struct cma;
|
||||
|
||||
extern unsigned long totalcma_pages;
|
||||
|
|
|
@ -399,8 +399,6 @@ static const struct reserved_mem_ops rmem_cma_ops = {
|
|||
|
||||
static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
||||
{
|
||||
phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
||||
phys_addr_t mask = align - 1;
|
||||
unsigned long node = rmem->fdt_node;
|
||||
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
|
||||
struct cma *cma;
|
||||
|
@ -416,7 +414,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
|||
of_get_flat_dt_prop(node, "no-map", NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if ((rmem->base & mask) || (rmem->size & mask)) {
|
||||
if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
|
||||
pr_err("Reserved memory: incorrect alignment of CMA region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
20
mm/cma.c
20
mm/cma.c
|
@ -169,7 +169,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|||
struct cma **res_cma)
|
||||
{
|
||||
struct cma *cma;
|
||||
phys_addr_t alignment;
|
||||
|
||||
/* Sanity checks */
|
||||
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
||||
|
@ -180,15 +179,12 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|||
if (!size || !memblock_is_region_reserved(base, size))
|
||||
return -EINVAL;
|
||||
|
||||
/* ensure minimal alignment required by mm core */
|
||||
alignment = PAGE_SIZE <<
|
||||
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
|
||||
|
||||
/* alignment should be aligned with order_per_bit */
|
||||
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
|
||||
if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
|
||||
return -EINVAL;
|
||||
|
||||
if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
|
||||
/* ensure minimal alignment required by mm core */
|
||||
if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -263,14 +259,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
|
|||
if (alignment && !is_power_of_2(alignment))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Sanitise input arguments.
|
||||
* Pages both ends in CMA area could be merged into adjacent unmovable
|
||||
* migratetype page by page allocator's buddy algorithm. In the case,
|
||||
* you couldn't get a contiguous memory, which is not what we want.
|
||||
*/
|
||||
alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
|
||||
max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
|
||||
/* Sanitise input arguments. */
|
||||
alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
|
||||
if (fixed && base & (alignment - 1)) {
|
||||
ret = -EINVAL;
|
||||
pr_err("Region at %pa must be aligned to %pa bytes\n",
|
||||
|
|
Загрузка…
Ссылка в новой задаче