Linux 5.1-rc7
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAlzGP4QeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGU10H/254js04AIRm2V6m ULzSNiIcSOlRZt/Wv/iKth6OGxhifgJ5u6uazQ8+EjZ+ofSUNDwFE+JYzYekyLoi g/wm78HwGkI5RnzzPS3zRuC8ld9rRq1LcH8AEx3VYT2VqYqurdmLy+vvdx84vyjW 8DHaLI53ufr46g2qcS1uXWWfetzyPV+iCTyDLUENv4L3sl6jTCmd5M4N1SHM9Kag MEb9KXwzi95isdOBI8NZHfGuU+eV3S08MVJ0Hp99F3dLrYx4LLFiFej9qMvbIxfp snuGoiXIzt0kNGxBQ36d0w6FEcvx2GWtfVQDWVA+9h5fDA1O1RkJ8LAo3HLDP8Cg MOeNpS0= =mWQq -----END PGP SIGNATURE----- Merge tag 'v5.1-rc7' into x86/mm, to pick up fixes Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
d5963d87bf
|
@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
|
|||
increase the success rate of future high-order allocations such as SLUB
|
||||
allocations, THP and hugetlbfs pages.
|
||||
|
||||
To make it sensible with respect to the watermark_scale_factor parameter,
|
||||
the unit is in fractions of 10,000. The default value of 15,000 means
|
||||
that up to 150% of the high watermark will be reclaimed in the event of
|
||||
a pageblock being mixed due to fragmentation. The level of reclaim is
|
||||
determined by the number of fragmentation events that occurred in the
|
||||
recent past. If this value is smaller than a pageblock then a pageblocks
|
||||
worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
|
||||
of 0 will disable the feature.
|
||||
To make it sensible with respect to the watermark_scale_factor
|
||||
parameter, the unit is in fractions of 10,000. The default value of
|
||||
15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
|
||||
watermark will be reclaimed in the event of a pageblock being mixed due
|
||||
to fragmentation. The level of reclaim is determined by the number of
|
||||
fragmentation events that occurred in the recent past. If this value is
|
||||
smaller than a pageblock then a pageblocks worth of pages will be reclaimed
|
||||
(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
|
||||
|
||||
=============================================================
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Shy Crocodile
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -73,7 +73,7 @@ config ARM
|
|||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
|
|
|
@ -47,8 +47,8 @@ config DEBUG_WX
|
|||
|
||||
choice
|
||||
prompt "Choose kernel unwinder"
|
||||
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
|
||||
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
|
||||
default UNWINDER_ARM if AEABI
|
||||
default UNWINDER_FRAME_POINTER if !AEABI
|
||||
help
|
||||
This determines which method will be used for unwinding kernel stack
|
||||
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
|
||||
|
@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
|
|||
|
||||
config UNWINDER_ARM
|
||||
bool "ARM EABI stack unwinder"
|
||||
depends on AEABI
|
||||
depends on AEABI && !FUNCTION_GRAPH_TRACER
|
||||
select ARM_UNWIND
|
||||
help
|
||||
This option enables stack unwinding support in the kernel
|
||||
|
|
|
@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
|
|||
|
||||
@ Preserve return value of efi_entry() in r4
|
||||
mov r4, r0
|
||||
bl cache_clean_flush
|
||||
|
||||
@ our cache maintenance code relies on CP15 barrier instructions
|
||||
@ but since we arrived here with the MMU and caches configured
|
||||
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
|
||||
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
|
||||
@ the enable path will be executed on v7+ only.
|
||||
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
|
||||
tst r1, #(1 << 5) @ CP15BEN bit set?
|
||||
bne 0f
|
||||
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
|
||||
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
|
||||
ARM( .inst 0xf57ff06f @ v7+ isb )
|
||||
THUMB( isb )
|
||||
|
||||
0: bl cache_clean_flush
|
||||
bl cache_off
|
||||
|
||||
@ Set parameters for booting zImage according to boot protocol
|
||||
|
|
|
@ -133,9 +133,9 @@ __secondary_data:
|
|||
*/
|
||||
.text
|
||||
__after_proc_init:
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
|
||||
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
M_CLASS(ldr r3, [r12, 0x50])
|
||||
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
|
||||
and r3, r3, #(MMFR0_PMSA) @ PMSA field
|
||||
|
|
|
@ -103,10 +103,15 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
* to be revisited if support for multiple ftrace entry points
|
||||
* is added in the future, but for now, the pr_err() below
|
||||
* deals with a theoretical issue only.
|
||||
*
|
||||
* Note that PLTs are place relative, and plt_entries_equal()
|
||||
* checks whether they point to the same target. Here, we need
|
||||
* to check if the actual opcodes are in fact identical,
|
||||
* regardless of the offset in memory so use memcmp() instead.
|
||||
*/
|
||||
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
||||
&trampoline)) {
|
||||
if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
|
||||
sizeof(trampoline))) {
|
||||
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
|
||||
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -363,7 +363,7 @@ void __init arm64_memblock_init(void)
|
|||
* Otherwise, this is a no-op
|
||||
*/
|
||||
u64 base = phys_initrd_start & PAGE_MASK;
|
||||
u64 size = PAGE_ALIGN(phys_initrd_size);
|
||||
u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
|
||||
|
||||
/*
|
||||
* We can only add back the initrd memory if we don't end up
|
||||
|
|
|
@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
|
|||
CONFIG_MSDOS_FS=m
|
||||
CONFIG_VFAT_FS=m
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_NLS=y
|
||||
|
|
|
@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||
unsigned long entries, unsigned long dev_hpa,
|
||||
struct mm_iommu_table_group_mem_t **pmem)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
long i, ret, locked_entries = 0;
|
||||
struct mm_iommu_table_group_mem_t *mem, *mem2;
|
||||
long i, ret, locked_entries = 0, pinned = 0;
|
||||
unsigned int pageshift;
|
||||
|
||||
mutex_lock(&mem_list_mutex);
|
||||
|
||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
|
||||
next) {
|
||||
/* Overlap? */
|
||||
if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
|
||||
(ua < (mem->ua +
|
||||
(mem->entries << PAGE_SHIFT)))) {
|
||||
ret = -EINVAL;
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
}
|
||||
unsigned long entry, chunk;
|
||||
|
||||
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
|
||||
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
|
||||
if (ret)
|
||||
goto unlock_exit;
|
||||
return ret;
|
||||
|
||||
locked_entries = entries;
|
||||
}
|
||||
|
@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||
}
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
if (ret != entries) {
|
||||
/* free the reference taken */
|
||||
for (i = 0; i < ret; i++)
|
||||
put_page(mem->hpages[i]);
|
||||
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
|
||||
sizeof(struct vm_area_struct *);
|
||||
chunk = min(chunk, entries);
|
||||
for (entry = 0; entry < entries; entry += chunk) {
|
||||
unsigned long n = min(entries - entry, chunk);
|
||||
|
||||
vfree(mem->hpas);
|
||||
kfree(mem);
|
||||
ret = -EFAULT;
|
||||
goto unlock_exit;
|
||||
ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
|
||||
FOLL_WRITE, mem->hpages + entry, NULL);
|
||||
if (ret == n) {
|
||||
pinned += n;
|
||||
continue;
|
||||
}
|
||||
if (ret > 0)
|
||||
pinned += ret;
|
||||
break;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
if (pinned != entries) {
|
||||
if (!ret)
|
||||
ret = -EFAULT;
|
||||
goto free_exit;
|
||||
}
|
||||
|
||||
pageshift = PAGE_SHIFT;
|
||||
|
@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||
}
|
||||
|
||||
good_exit:
|
||||
ret = 0;
|
||||
atomic64_set(&mem->mapped, 1);
|
||||
mem->used = 1;
|
||||
mem->ua = ua;
|
||||
mem->entries = entries;
|
||||
*pmem = mem;
|
||||
|
||||
mutex_lock(&mem_list_mutex);
|
||||
|
||||
list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
|
||||
/* Overlap? */
|
||||
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
|
||||
(ua < (mem2->ua +
|
||||
(mem2->entries << PAGE_SHIFT)))) {
|
||||
ret = -EINVAL;
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
goto free_exit;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
|
||||
|
||||
unlock_exit:
|
||||
if (locked_entries && ret)
|
||||
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
|
||||
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
|
||||
*pmem = mem;
|
||||
|
||||
return 0;
|
||||
|
||||
free_exit:
|
||||
/* free the reference taken */
|
||||
for (i = 0; i < pinned; i++)
|
||||
put_page(mem->hpages[i]);
|
||||
|
||||
vfree(mem->hpas);
|
||||
kfree(mem);
|
||||
|
||||
unlock_exit:
|
||||
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
|
|||
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
|
||||
{
|
||||
long ret = 0;
|
||||
unsigned long entries, dev_hpa;
|
||||
unsigned long unlock_entries = 0;
|
||||
|
||||
mutex_lock(&mem_list_mutex);
|
||||
|
||||
|
@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
|
|||
goto unlock_exit;
|
||||
}
|
||||
|
||||
/* @mapped became 0 so now mappings are disabled, release the region */
|
||||
entries = mem->entries;
|
||||
dev_hpa = mem->dev_hpa;
|
||||
mm_iommu_release(mem);
|
||||
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
|
||||
unlock_entries = mem->entries;
|
||||
|
||||
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
|
||||
mm_iommu_adjust_locked_vm(mm, entries, false);
|
||||
/* @mapped became 0 so now mappings are disabled, release the region */
|
||||
mm_iommu_release(mem);
|
||||
|
||||
unlock_exit:
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
|
||||
mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mm_iommu_put);
|
||||
|
|
|
@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
|||
|
||||
config PPC_RADIX_MMU
|
||||
bool "Radix MMU Support"
|
||||
depends on PPC_BOOK3S_64
|
||||
depends on PPC_BOOK3S_64 && HUGETLB_PAGE
|
||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -352,7 +352,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
|
|||
boot_params->hdr.loadflags &= ~KASLR_FLAG;
|
||||
|
||||
/* Save RSDP address for later use. */
|
||||
boot_params->acpi_rsdp_addr = get_rsdp_addr();
|
||||
/* boot_params->acpi_rsdp_addr = get_rsdp_addr(); */
|
||||
|
||||
sanitize_boot_params(boot_params);
|
||||
|
||||
|
|
|
@ -76,15 +76,15 @@
|
|||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
|
||||
* perf code: 0x04
|
||||
* Available model: HSW ULT,CNL
|
||||
* Available model: HSW ULT,KBL,CNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
|
||||
* perf code: 0x05
|
||||
* Available model: HSW ULT,CNL
|
||||
* Available model: HSW ULT,KBL,CNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
|
||||
* perf code: 0x06
|
||||
* Available model: HSW ULT,GLM,CNL
|
||||
* Available model: HSW ULT,KBL,GLM,CNL
|
||||
* Scope: Package (physical package)
|
||||
*
|
||||
*/
|
||||
|
@ -566,8 +566,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
|||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/swapfile.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/e820/api.h>
|
||||
|
@ -766,6 +767,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
|
|||
if (debug_pagealloc_enabled()) {
|
||||
pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
|
||||
begin, end - 1);
|
||||
/*
|
||||
* Inform kmemleak about the hole in the memory since the
|
||||
* corresponding pages will be unmapped.
|
||||
*/
|
||||
kmemleak_free_part((void *)begin, end - begin);
|
||||
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
|
||||
} else {
|
||||
/*
|
||||
|
|
|
@ -774,18 +774,18 @@ struct zram_work {
|
|||
struct zram *zram;
|
||||
unsigned long entry;
|
||||
struct bio *bio;
|
||||
struct bio_vec bvec;
|
||||
};
|
||||
|
||||
#if PAGE_SIZE != 4096
|
||||
static void zram_sync_read(struct work_struct *work)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct zram_work *zw = container_of(work, struct zram_work, work);
|
||||
struct zram *zram = zw->zram;
|
||||
unsigned long entry = zw->entry;
|
||||
struct bio *bio = zw->bio;
|
||||
|
||||
read_from_bdev_async(zram, &bvec, entry, bio);
|
||||
read_from_bdev_async(zram, &zw->bvec, entry, bio);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
|
|||
{
|
||||
struct zram_work work;
|
||||
|
||||
work.bvec = *bvec;
|
||||
work.zram = zram;
|
||||
work.entry = entry;
|
||||
work.bio = bio;
|
||||
|
|
|
@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
|
|||
d = bcm2835_dma_create_cb_chain(chan, direction, false,
|
||||
info, extra,
|
||||
frames, src, dst, 0, 0,
|
||||
GFP_KERNEL);
|
||||
GFP_NOWAIT);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
|
|||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
|
||||
#else
|
||||
mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
|
||||
mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
|
||||
#endif
|
||||
|
||||
/* setup the length */
|
||||
|
|
|
@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
enum dma_status status;
|
||||
unsigned int residue = 0;
|
||||
unsigned int dptr = 0;
|
||||
unsigned int chcrb;
|
||||
unsigned int tcrb;
|
||||
unsigned int i;
|
||||
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to read two registers.
|
||||
* Make sure the control register does not skip to next chunk
|
||||
* while reading the counter.
|
||||
* Trying it 3 times should be enough: Initial read, retry, retry
|
||||
* for the paranoid.
|
||||
*/
|
||||
for (i = 0; i < 3; i++) {
|
||||
chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||
RCAR_DMACHCRB_DPTR_MASK;
|
||||
tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
|
||||
/* Still the same? */
|
||||
if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||
RCAR_DMACHCRB_DPTR_MASK))
|
||||
break;
|
||||
}
|
||||
WARN_ONCE(i >= 3, "residue might be not continuous!");
|
||||
|
||||
/*
|
||||
* In descriptor mode the descriptor running pointer is not maintained
|
||||
* by the interrupt handler, find the running descriptor from the
|
||||
|
@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
* mode just use the running descriptor pointer.
|
||||
*/
|
||||
if (desc->hwdescs.use) {
|
||||
dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||
RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
|
||||
dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
|
||||
if (dptr == 0)
|
||||
dptr = desc->nchunks;
|
||||
dptr--;
|
||||
|
@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
}
|
||||
|
||||
/* Add the residue for the current chunk. */
|
||||
residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
|
||||
residue += tcrb << desc->xfer_shift;
|
||||
|
||||
return residue;
|
||||
}
|
||||
|
@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
|
|||
enum dma_status status;
|
||||
unsigned long flags;
|
||||
unsigned int residue;
|
||||
bool cyclic;
|
||||
|
||||
status = dma_cookie_status(chan, cookie, txstate);
|
||||
if (status == DMA_COMPLETE || !txstate)
|
||||
|
@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
|
|||
|
||||
spin_lock_irqsave(&rchan->lock, flags);
|
||||
residue = rcar_dmac_chan_get_residue(rchan, cookie);
|
||||
cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
|
||||
spin_unlock_irqrestore(&rchan->lock, flags);
|
||||
|
||||
/* if there's no residue, the cookie is complete */
|
||||
if (!residue)
|
||||
if (!residue && !cyclic)
|
||||
return DMA_COMPLETE;
|
||||
|
||||
dma_set_residue(txstate, residue);
|
||||
|
|
|
@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
|||
irq_set_handler_locked(data, handle_edge_irq);
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
|
||||
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
|
||||
irq_set_handler_locked(data, handle_edge_irq);
|
||||
break;
|
||||
|
|
|
@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||
|
||||
status = gpiochip_add_irqchip(chip, lock_key, request_key);
|
||||
if (status)
|
||||
goto err_remove_chip;
|
||||
goto err_free_gpiochip_mask;
|
||||
|
||||
status = of_gpiochip_add(chip);
|
||||
if (status)
|
||||
|
@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||
|
||||
status = gpiochip_init_valid_mask(chip);
|
||||
if (status)
|
||||
goto err_remove_chip;
|
||||
goto err_remove_of_chip;
|
||||
|
||||
for (i = 0; i < chip->ngpio; i++) {
|
||||
struct gpio_desc *desc = &gdev->descs[i];
|
||||
|
@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||
if (gpiolib_initialized) {
|
||||
status = gpiochip_setup_dev(gdev);
|
||||
if (status)
|
||||
goto err_remove_chip;
|
||||
goto err_remove_acpi_chip;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_remove_chip:
|
||||
err_remove_acpi_chip:
|
||||
acpi_gpiochip_remove(chip);
|
||||
err_remove_of_chip:
|
||||
gpiochip_free_hogs(chip);
|
||||
of_gpiochip_remove(chip);
|
||||
err_remove_chip:
|
||||
gpiochip_irqchip_remove(chip);
|
||||
err_free_gpiochip_mask:
|
||||
gpiochip_free_valid_mask(chip);
|
||||
err_remove_irqchip_mask:
|
||||
gpiochip_irqchip_free_valid_mask(chip);
|
||||
|
|
|
@ -1046,6 +1046,10 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
|
|||
if (hdmi->version < 0x200a)
|
||||
return false;
|
||||
|
||||
/* Disable if no DDC bus */
|
||||
if (!hdmi->ddc)
|
||||
return false;
|
||||
|
||||
/* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
|
||||
if (!display->hdmi.scdc.supported ||
|
||||
!display->hdmi.scdc.scrambling.supported)
|
||||
|
@ -1684,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
* Source Devices compliant shall set the
|
||||
* Source Version = 1.
|
||||
*/
|
||||
drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
|
||||
drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
|
||||
&bytes);
|
||||
drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
|
||||
drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
|
||||
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
|
||||
|
||||
/* Enabled Scrambling in the Sink */
|
||||
drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
|
||||
drm_scdc_set_scrambling(hdmi->ddc, 1);
|
||||
|
||||
/*
|
||||
* To activate the scrambler feature, you must ensure
|
||||
|
@ -1706,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
|
||||
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
|
||||
HDMI_MC_SWRSTZ);
|
||||
drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
|
||||
drm_scdc_set_scrambling(hdmi->ddc, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1800,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
|
|||
* iteration for others.
|
||||
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
|
||||
* the workaround with a single iteration.
|
||||
* The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
|
||||
* been identified as needing the workaround with a single iteration.
|
||||
*/
|
||||
|
||||
switch (hdmi->version) {
|
||||
|
@ -1808,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
|
|||
break;
|
||||
case 0x131a:
|
||||
case 0x132a:
|
||||
case 0x200a:
|
||||
case 0x201a:
|
||||
case 0x211a:
|
||||
case 0x212a:
|
||||
count = 1;
|
||||
break;
|
||||
|
|
|
@ -3862,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
|
|||
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
|
||||
else
|
||||
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv) && ret)
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
pipe_config->lane_lat_optim_mask =
|
||||
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
|
||||
|
||||
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1886,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
|
|||
int pipe_bpp;
|
||||
int ret;
|
||||
|
||||
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
||||
intel_dp_supports_fec(intel_dp, pipe_config);
|
||||
|
||||
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -2116,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return -EINVAL;
|
||||
|
||||
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
||||
intel_dp_supports_fec(intel_dp, pipe_config);
|
||||
|
||||
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
|
|||
if (disable_partial)
|
||||
ipu_plane_disable(ipu_crtc->plane[1], true);
|
||||
if (disable_full)
|
||||
ipu_plane_disable(ipu_crtc->plane[0], false);
|
||||
ipu_plane_disable(ipu_crtc->plane[0], true);
|
||||
}
|
||||
|
||||
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
|
|
|
@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
|
|||
EXPORT_SYMBOL(drm_sched_increase_karma);
|
||||
|
||||
/**
|
||||
* drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
|
||||
* drm_sched_stop - stop the scheduler
|
||||
*
|
||||
* @sched: scheduler instance
|
||||
* @bad: bad scheduler job
|
||||
*
|
||||
*/
|
||||
void drm_sched_stop(struct drm_gpu_scheduler *sched)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/of_reserved_mem.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_fb_cma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
|
@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
|
|||
ret = -ENOMEM;
|
||||
goto free_drm;
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, drm);
|
||||
drm->dev_private = drv;
|
||||
INIT_LIST_HEAD(&drv->frontend_list);
|
||||
INIT_LIST_HEAD(&drv->engine_list);
|
||||
|
@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
|
|||
|
||||
drm_dev_unregister(drm);
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
component_unbind_all(dev, NULL);
|
||||
of_reserved_mem_device_release(dev);
|
||||
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
|
@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
|
|||
|
||||
static int sun4i_drv_remove(struct platform_device *pdev)
|
||||
{
|
||||
component_master_del(&pdev->dev, &sun4i_drv_master_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
|||
* ttm_global_mutex - protecting the global BO state
|
||||
*/
|
||||
DEFINE_MUTEX(ttm_global_mutex);
|
||||
struct ttm_bo_global ttm_bo_glob = {
|
||||
.use_count = 0
|
||||
};
|
||||
unsigned ttm_bo_glob_use_count;
|
||||
struct ttm_bo_global ttm_bo_glob;
|
||||
|
||||
static struct attribute ttm_bo_count = {
|
||||
.name = "bo_count",
|
||||
|
@ -1531,12 +1530,13 @@ static void ttm_bo_global_release(void)
|
|||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||
|
||||
mutex_lock(&ttm_global_mutex);
|
||||
if (--glob->use_count > 0)
|
||||
if (--ttm_bo_glob_use_count > 0)
|
||||
goto out;
|
||||
|
||||
kobject_del(&glob->kobj);
|
||||
kobject_put(&glob->kobj);
|
||||
ttm_mem_global_release(&ttm_mem_glob);
|
||||
memset(glob, 0, sizeof(*glob));
|
||||
out:
|
||||
mutex_unlock(&ttm_global_mutex);
|
||||
}
|
||||
|
@ -1548,7 +1548,7 @@ static int ttm_bo_global_init(void)
|
|||
unsigned i;
|
||||
|
||||
mutex_lock(&ttm_global_mutex);
|
||||
if (++glob->use_count > 1)
|
||||
if (++ttm_bo_glob_use_count > 1)
|
||||
goto out;
|
||||
|
||||
ret = ttm_mem_global_init(&ttm_mem_glob);
|
||||
|
|
|
@ -461,8 +461,8 @@ out_no_zone:
|
|||
|
||||
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
unsigned int i;
|
||||
|
||||
/* let the page allocator first stop the shrink work. */
|
||||
ttm_page_alloc_fini();
|
||||
|
@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
|
|||
zone = glob->zones[i];
|
||||
kobject_del(&zone->kobj);
|
||||
kobject_put(&zone->kobj);
|
||||
}
|
||||
}
|
||||
kobject_del(&glob->kobj);
|
||||
kobject_put(&glob->kobj);
|
||||
memset(glob, 0, sizeof(*glob));
|
||||
}
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||
|
|
|
@ -1042,7 +1042,7 @@ static void
|
|||
vc4_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
if (crtc->state)
|
||||
__drm_atomic_helper_crtc_destroy_state(crtc->state);
|
||||
vc4_crtc_destroy_state(crtc, crtc->state);
|
||||
|
||||
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
|
||||
if (crtc->state)
|
||||
|
|
|
@ -545,30 +545,14 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|||
dev_priv->initial_height = height;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_assume_iommu - Figure out whether coherent dma-remapping might be
|
||||
* taking place.
|
||||
* @dev: Pointer to the struct drm_device.
|
||||
*
|
||||
* Return: true if iommu present, false otherwise.
|
||||
*/
|
||||
static bool vmw_assume_iommu(struct drm_device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev->dev);
|
||||
|
||||
return !dma_is_direct(ops) && ops &&
|
||||
ops->map_page != dma_direct_map_page;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
|
||||
* system.
|
||||
*
|
||||
* @dev_priv: Pointer to a struct vmw_private
|
||||
*
|
||||
* This functions tries to determine the IOMMU setup and what actions
|
||||
* need to be taken by the driver to make system pages visible to the
|
||||
* device.
|
||||
* This functions tries to determine what actions need to be taken by the
|
||||
* driver to make system pages visible to the device.
|
||||
* If this function decides that DMA is not possible, it returns -EINVAL.
|
||||
* The driver may then try to disable features of the device that require
|
||||
* DMA.
|
||||
|
@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
|||
static const char *names[vmw_dma_map_max] = {
|
||||
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
||||
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
||||
[vmw_dma_map_populate] = "Keeping DMA mappings.",
|
||||
[vmw_dma_map_populate] = "Caching DMA mappings.",
|
||||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||
|
||||
if (vmw_force_coherent)
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
else if (vmw_assume_iommu(dev_priv->dev))
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
else if (!vmw_force_iommu)
|
||||
dev_priv->map_mode = vmw_dma_phys;
|
||||
else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
else if (vmw_restrict_iommu)
|
||||
dev_priv->map_mode = vmw_dma_map_bind;
|
||||
else
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
|
||||
if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
|
||||
dev_priv->map_mode = vmw_dma_map_bind;
|
||||
|
||||
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
|
||||
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
|
||||
(dev_priv->map_mode == vmw_dma_alloc_coherent))
|
||||
|
|
|
@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
|
|||
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
|
||||
DP_COM_CONF_CSC_DEF_BOTH);
|
||||
} else {
|
||||
if (flow->foreground.in_cs == flow->out_cs)
|
||||
if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
|
||||
flow->foreground.in_cs == flow->out_cs)
|
||||
/*
|
||||
* foreground identical to output, apply color
|
||||
* conversion on background
|
||||
|
@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
|
|||
struct ipu_dp_priv *priv = flow->priv;
|
||||
u32 reg, csc;
|
||||
|
||||
dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
|
||||
|
||||
if (!dp->foreground)
|
||||
return;
|
||||
|
||||
|
@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
|
|||
|
||||
reg = readl(flow->base + DP_COM_CONF);
|
||||
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
|
||||
if (csc == DP_COM_CONF_CSC_DEF_FG)
|
||||
reg &= ~DP_COM_CONF_CSC_DEF_MASK;
|
||||
reg &= ~DP_COM_CONF_CSC_DEF_MASK;
|
||||
if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
|
||||
reg |= DP_COM_CONF_CSC_DEF_BG;
|
||||
|
||||
reg &= ~DP_COM_CONF_FG_EN;
|
||||
writel(reg, flow->base + DP_COM_CONF);
|
||||
|
@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
|
|||
mutex_init(&priv->mutex);
|
||||
|
||||
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
|
||||
priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
|
||||
priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
|
||||
priv->flow[i].foreground.foreground = true;
|
||||
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
|
||||
priv->flow[i].priv = priv;
|
||||
|
|
|
@ -160,6 +160,7 @@ struct ib_uverbs_file {
|
|||
|
||||
struct mutex umap_lock;
|
||||
struct list_head umaps;
|
||||
struct page *disassociate_page;
|
||||
|
||||
struct idr idr;
|
||||
/* spinlock protects write access to idr */
|
||||
|
|
|
@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
|
|||
kref_put(&file->async_file->ref,
|
||||
ib_uverbs_release_async_event_file);
|
||||
put_device(&file->device->dev);
|
||||
|
||||
if (file->disassociate_page)
|
||||
__free_pages(file->disassociate_page, 0);
|
||||
kfree(file);
|
||||
}
|
||||
|
||||
|
@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
|
|||
kfree(priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* Once the zap_vma_ptes has been called touches to the VMA will come here and
|
||||
* we return a dummy writable zero page for all the pfns.
|
||||
*/
|
||||
static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
|
||||
struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
|
||||
vm_fault_t ret = 0;
|
||||
|
||||
if (!priv)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
/* Read only pages can just use the system zero page. */
|
||||
if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
|
||||
vmf->page = ZERO_PAGE(vmf->vm_start);
|
||||
get_page(vmf->page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&ufile->umap_lock);
|
||||
if (!ufile->disassociate_page)
|
||||
ufile->disassociate_page =
|
||||
alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
|
||||
|
||||
if (ufile->disassociate_page) {
|
||||
/*
|
||||
* This VMA is forced to always be shared so this doesn't have
|
||||
* to worry about COW.
|
||||
*/
|
||||
vmf->page = ufile->disassociate_page;
|
||||
get_page(vmf->page);
|
||||
} else {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
}
|
||||
mutex_unlock(&ufile->umap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct rdma_umap_ops = {
|
||||
.open = rdma_umap_open,
|
||||
.close = rdma_umap_close,
|
||||
.fault = rdma_umap_fault,
|
||||
};
|
||||
|
||||
static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
|
||||
|
@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
|
|||
struct ib_uverbs_file *ufile = ucontext->ufile;
|
||||
struct rdma_umap_priv *priv;
|
||||
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (vma->vm_end - vma->vm_start != size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -992,7 +1039,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|||
* at a time to get the lock ordering right. Typically there
|
||||
* will only be one mm, so no big deal.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
down_read(&mm->mmap_sem);
|
||||
if (!mmget_still_valid(mm))
|
||||
goto skip_mm;
|
||||
mutex_lock(&ufile->umap_lock);
|
||||
|
@ -1006,11 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|||
|
||||
zap_vma_ptes(vma, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start);
|
||||
vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
|
||||
}
|
||||
mutex_unlock(&ufile->umap_lock);
|
||||
skip_mm:
|
||||
up_write(&mm->mmap_sem);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -533,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
|
|||
|
||||
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
|
||||
{
|
||||
if (attr->qp_type == IB_QPT_XRC_TGT)
|
||||
if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -1119,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
if (MLX5_CAP_GEN(mdev, qp_packet_based))
|
||||
resp.flags |=
|
||||
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
|
||||
|
||||
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), sw_parsing_caps,
|
||||
|
@ -2066,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
|
|||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
if (!dev->mdev->clock_info_page)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -2231,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
|||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
/* Don't expose to user-space information it shouldn't have */
|
||||
if (PAGE_SIZE > 4096)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
pfn = (dev->mdev->iseg_base +
|
||||
offsetof(struct mlx5_init_seg, internal_timer_h)) >>
|
||||
PAGE_SHIFT;
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
break;
|
||||
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
|
||||
PAGE_SIZE,
|
||||
pgprot_noncached(vma->vm_page_prot));
|
||||
case MLX5_IB_MMAP_CLOCK_INFO:
|
||||
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
|
||||
|
||||
|
|
|
@ -1818,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
|
|||
|
||||
rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
|
||||
|
||||
if (rcqe_sz == 128) {
|
||||
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
||||
if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
|
||||
if (rcqe_sz == 128)
|
||||
MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (init_attr->qp_type != MLX5_IB_QPT_DCT)
|
||||
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
|
||||
MLX5_SET(qpc, qpc, cs_res,
|
||||
rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
|
||||
MLX5_RES_SCAT_DATA32_CQE);
|
||||
}
|
||||
|
||||
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
||||
|
|
|
@ -608,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|||
if (unlikely(mapped_segs == mr->mr.max_segs))
|
||||
return -ENOMEM;
|
||||
|
||||
if (mr->mr.length == 0) {
|
||||
mr->mr.user_base = addr;
|
||||
mr->mr.iova = addr;
|
||||
}
|
||||
|
||||
m = mapped_segs / RVT_SEGSZ;
|
||||
n = mapped_segs % RVT_SEGSZ;
|
||||
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
|
||||
|
@ -630,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|||
* @sg_nents: number of entries in sg
|
||||
* @sg_offset: offset in bytes into sg
|
||||
*
|
||||
* Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
|
||||
*
|
||||
* Return: number of sg elements mapped to the memory region
|
||||
*/
|
||||
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
int sg_nents, unsigned int *sg_offset)
|
||||
{
|
||||
struct rvt_mr *mr = to_imr(ibmr);
|
||||
int ret;
|
||||
|
||||
mr->mr.length = 0;
|
||||
mr->mr.page_shift = PAGE_SHIFT;
|
||||
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
||||
rvt_set_page);
|
||||
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
|
||||
mr->mr.user_base = ibmr->iova;
|
||||
mr->mr.iova = ibmr->iova;
|
||||
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
|
||||
mr->mr.length = (size_t)ibmr->length;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -671,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
|
|||
ibmr->rkey = key;
|
||||
mr->mr.lkey = key;
|
||||
mr->mr.access_flags = access;
|
||||
mr->mr.iova = ibmr->iova;
|
||||
atomic_set(&mr->mr.lkey_invalid, 0);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -420,7 +420,7 @@ config KEYBOARD_MPR121
|
|||
|
||||
config KEYBOARD_SNVS_PWRKEY
|
||||
tristate "IMX SNVS Power Key Driver"
|
||||
depends on SOC_IMX6SX || SOC_IMX7D
|
||||
depends on ARCH_MXC || COMPILE_TEST
|
||||
depends on OF
|
||||
help
|
||||
This is the snvs powerkey driver for the Freescale i.MX application
|
||||
|
|
|
@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
|
|||
|
||||
error = rmi_register_function(fn);
|
||||
if (error)
|
||||
goto err_put_fn;
|
||||
return error;
|
||||
|
||||
if (pdt->function_number == 0x01)
|
||||
data->f01_container = fn;
|
||||
|
@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
|
|||
list_add_tail(&fn->node, &data->function_list);
|
||||
|
||||
return RMI_SCAN_CONTINUE;
|
||||
|
||||
err_put_fn:
|
||||
put_device(&fn->dev);
|
||||
return error;
|
||||
}
|
||||
|
||||
void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
|
||||
|
|
|
@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
|||
}
|
||||
|
||||
rc = f11_write_control_regs(fn, &f11->sens_query,
|
||||
&f11->dev_controls, fn->fd.query_base_addr);
|
||||
&f11->dev_controls, fn->fd.control_base_addr);
|
||||
if (rc)
|
||||
dev_warn(&fn->dev, "Failed to write control registers\n");
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ out_fail:
|
|||
void
|
||||
slhc_free(struct slcompress *comp)
|
||||
{
|
||||
if ( comp == NULLSLCOMPR )
|
||||
if ( IS_ERR_OR_NULL(comp) )
|
||||
return;
|
||||
|
||||
if ( comp->tstate != NULLSLSTATE )
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
|
@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
|||
unsigned long this_sum_bytes = 0;
|
||||
int i;
|
||||
u64 offset;
|
||||
unsigned nofs_flag;
|
||||
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
|
||||
GFP_KERNEL);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
|
||||
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
|
||||
GFP_NOFS);
|
||||
if (!sums)
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
|
@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
|||
|
||||
bytes_left = bio->bi_iter.bi_size - total_bytes;
|
||||
|
||||
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
|
||||
GFP_NOFS);
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
|
||||
bytes_left), GFP_KERNEL);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
BUG_ON(!sums); /* -ENOMEM */
|
||||
sums->len = bytes_left;
|
||||
ordered = btrfs_lookup_ordered_extent(inode,
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include "ctree.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
|
@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
|
|||
cur = entry->list.next;
|
||||
sum = list_entry(cur, struct btrfs_ordered_sum, list);
|
||||
list_del(&sum->list);
|
||||
kfree(sum);
|
||||
kvfree(sum);
|
||||
}
|
||||
kmem_cache_free(btrfs_ordered_extent_cache, entry);
|
||||
}
|
||||
|
|
|
@ -2877,7 +2877,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
|
|||
struct cifs_tcon *tcon;
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
struct dentry *dentry = ctx->cfile->dentry;
|
||||
unsigned int i;
|
||||
int rc;
|
||||
|
||||
tcon = tlink_tcon(ctx->cfile->tlink);
|
||||
|
@ -2941,10 +2940,6 @@ restart_loop:
|
|||
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
|
||||
}
|
||||
|
||||
if (!ctx->direct_io)
|
||||
for (i = 0; i < ctx->npages; i++)
|
||||
put_page(ctx->bv[i].bv_page);
|
||||
|
||||
cifs_stats_bytes_written(tcon, ctx->total_len);
|
||||
set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
|
||||
|
||||
|
@ -3582,7 +3577,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
|
|||
struct iov_iter *to = &ctx->iter;
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
struct cifs_tcon *tcon;
|
||||
unsigned int i;
|
||||
int rc;
|
||||
|
||||
tcon = tlink_tcon(ctx->cfile->tlink);
|
||||
|
@ -3666,15 +3660,8 @@ again:
|
|||
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
|
||||
}
|
||||
|
||||
if (!ctx->direct_io) {
|
||||
for (i = 0; i < ctx->npages; i++) {
|
||||
if (ctx->should_dirty)
|
||||
set_page_dirty(ctx->bv[i].bv_page);
|
||||
put_page(ctx->bv[i].bv_page);
|
||||
}
|
||||
|
||||
if (!ctx->direct_io)
|
||||
ctx->total_len = ctx->len - iov_iter_count(to);
|
||||
}
|
||||
|
||||
/* mask nodata case */
|
||||
if (rc == -ENODATA)
|
||||
|
|
|
@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
|
|||
if (rc == 0 || rc != -EBUSY)
|
||||
goto do_rename_exit;
|
||||
|
||||
/* Don't fall back to using SMB on SMB 2+ mount */
|
||||
if (server->vals->protocol_id != 0)
|
||||
goto do_rename_exit;
|
||||
|
||||
/* open-file renames don't work across directories */
|
||||
if (to_dentry->d_parent != from_dentry->d_parent)
|
||||
goto do_rename_exit;
|
||||
|
|
|
@ -789,6 +789,11 @@ cifs_aio_ctx_alloc(void)
|
|||
{
|
||||
struct cifs_aio_ctx *ctx;
|
||||
|
||||
/*
|
||||
* Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
|
||||
* to false so that we know when we have to unreference pages within
|
||||
* cifs_aio_ctx_release()
|
||||
*/
|
||||
ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
@ -807,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
|
|||
struct cifs_aio_ctx, refcount);
|
||||
|
||||
cifsFileInfo_put(ctx->cfile);
|
||||
kvfree(ctx->bv);
|
||||
|
||||
/*
|
||||
* ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
|
||||
* which means that iov_iter_get_pages() was a success and thus that
|
||||
* we have taken reference on pages.
|
||||
*/
|
||||
if (ctx->bv) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ctx->npages; i++) {
|
||||
if (ctx->should_dirty)
|
||||
set_page_dirty(ctx->bv[i].bv_page);
|
||||
put_page(ctx->bv[i].bv_page);
|
||||
}
|
||||
kvfree(ctx->bv);
|
||||
}
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -3466,6 +3466,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|||
io_parms->tcon->tid, ses->Suid,
|
||||
io_parms->offset, 0);
|
||||
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
|
||||
cifs_small_buf_release(req);
|
||||
return rc == -ENODATA ? 0 : rc;
|
||||
} else
|
||||
trace_smb3_read_done(xid, req->PersistentFileId,
|
||||
|
|
|
@ -740,7 +740,7 @@ static bool io_file_supports_async(struct file *file)
|
|||
}
|
||||
|
||||
static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
|
||||
bool force_nonblock, struct io_submit_state *state)
|
||||
bool force_nonblock)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = s->sqe;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
@ -938,7 +938,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
|
|||
}
|
||||
|
||||
static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
|
||||
bool force_nonblock, struct io_submit_state *state)
|
||||
bool force_nonblock)
|
||||
{
|
||||
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
|
||||
struct kiocb *kiocb = &req->rw;
|
||||
|
@ -947,7 +947,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
|
|||
size_t iov_count;
|
||||
int ret;
|
||||
|
||||
ret = io_prep_rw(req, s, force_nonblock, state);
|
||||
ret = io_prep_rw(req, s, force_nonblock);
|
||||
if (ret)
|
||||
return ret;
|
||||
file = kiocb->ki_filp;
|
||||
|
@ -985,7 +985,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
|
|||
}
|
||||
|
||||
static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
|
||||
bool force_nonblock, struct io_submit_state *state)
|
||||
bool force_nonblock)
|
||||
{
|
||||
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
|
||||
struct kiocb *kiocb = &req->rw;
|
||||
|
@ -994,7 +994,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
|
|||
size_t iov_count;
|
||||
int ret;
|
||||
|
||||
ret = io_prep_rw(req, s, force_nonblock, state);
|
||||
ret = io_prep_rw(req, s, force_nonblock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1336,8 +1336,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
}
|
||||
|
||||
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
const struct sqe_submit *s, bool force_nonblock,
|
||||
struct io_submit_state *state)
|
||||
const struct sqe_submit *s, bool force_nonblock)
|
||||
{
|
||||
int ret, opcode;
|
||||
|
||||
|
@ -1353,18 +1352,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
|||
case IORING_OP_READV:
|
||||
if (unlikely(s->sqe->buf_index))
|
||||
return -EINVAL;
|
||||
ret = io_read(req, s, force_nonblock, state);
|
||||
ret = io_read(req, s, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_WRITEV:
|
||||
if (unlikely(s->sqe->buf_index))
|
||||
return -EINVAL;
|
||||
ret = io_write(req, s, force_nonblock, state);
|
||||
ret = io_write(req, s, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_READ_FIXED:
|
||||
ret = io_read(req, s, force_nonblock, state);
|
||||
ret = io_read(req, s, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_WRITE_FIXED:
|
||||
ret = io_write(req, s, force_nonblock, state);
|
||||
ret = io_write(req, s, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_FSYNC:
|
||||
ret = io_fsync(req, s->sqe, force_nonblock);
|
||||
|
@ -1457,7 +1456,7 @@ restart:
|
|||
s->has_user = cur_mm != NULL;
|
||||
s->needs_lock = true;
|
||||
do {
|
||||
ret = __io_submit_sqe(ctx, req, s, false, NULL);
|
||||
ret = __io_submit_sqe(ctx, req, s, false);
|
||||
/*
|
||||
* We can get EAGAIN for polled IO even though
|
||||
* we're forcing a sync submission from here,
|
||||
|
@ -1623,7 +1622,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
|
|||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
ret = __io_submit_sqe(ctx, req, s, true, state);
|
||||
ret = __io_submit_sqe(ctx, req, s, true);
|
||||
if (ret == -EAGAIN) {
|
||||
struct io_uring_sqe *sqe_copy;
|
||||
|
||||
|
@ -1739,7 +1738,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
|
|||
head = ctx->cached_sq_head;
|
||||
/* See comment at the top of this file */
|
||||
smp_rmb();
|
||||
if (head == READ_ONCE(ring->r.tail))
|
||||
/* make sure SQ entry isn't read before tail */
|
||||
if (head == smp_load_acquire(&ring->r.tail))
|
||||
return false;
|
||||
|
||||
head = READ_ONCE(ring->array[head & ctx->sq_mask]);
|
||||
|
@ -1864,7 +1864,8 @@ static int io_sq_thread(void *data)
|
|||
|
||||
/* Tell userspace we may need a wakeup call */
|
||||
ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
|
||||
smp_wmb();
|
||||
/* make sure to read SQ tail after writing flags */
|
||||
smp_mb();
|
||||
|
||||
if (!io_get_sqring(ctx, &sqes[0])) {
|
||||
if (kthread_should_stop()) {
|
||||
|
@ -2574,7 +2575,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
|
|||
poll_wait(file, &ctx->cq_wait, wait);
|
||||
/* See comment at the top of this file */
|
||||
smp_rmb();
|
||||
if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
|
||||
if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
|
||||
ctx->sq_ring->ring_entries)
|
||||
mask |= EPOLLOUT | EPOLLWRNORM;
|
||||
if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
@ -2934,6 +2936,14 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We're inside the ring mutex, if the ref is already dying, then
|
||||
* someone else killed the ctx or is already going through
|
||||
* io_uring_register().
|
||||
*/
|
||||
if (percpu_ref_is_dying(&ctx->refs))
|
||||
return -ENXIO;
|
||||
|
||||
percpu_ref_kill(&ctx->refs);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
|
|||
if (--header->nreg)
|
||||
return;
|
||||
|
||||
if (parent)
|
||||
if (parent) {
|
||||
put_links(header);
|
||||
start_unregistering(header);
|
||||
start_unregistering(header);
|
||||
}
|
||||
|
||||
if (!--header->count)
|
||||
kfree_rcu(header, rcu);
|
||||
|
||||
|
|
|
@ -330,8 +330,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
|
|||
.get = generic_pipe_buf_get,
|
||||
};
|
||||
|
||||
static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -420,7 +420,6 @@ extern struct ttm_bo_global {
|
|||
/**
|
||||
* Protected by ttm_global_mutex.
|
||||
*/
|
||||
unsigned int use_count;
|
||||
struct list_head device_list;
|
||||
|
||||
/**
|
||||
|
|
|
@ -176,6 +176,7 @@ void free_pipe_info(struct pipe_inode_info *);
|
|||
bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
|
||||
|
||||
|
|
|
@ -238,6 +238,7 @@ enum mlx5_ib_query_dev_resp_flags {
|
|||
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
|
||||
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
|
||||
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
|
||||
MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
|
||||
};
|
||||
|
||||
enum mlx5_ib_tunnel_offloads {
|
||||
|
|
|
@ -2007,6 +2007,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
|
|||
if (p->last_task_numa_placement) {
|
||||
delta = runtime - p->last_sum_exec_runtime;
|
||||
*period = now - p->last_task_numa_placement;
|
||||
|
||||
/* Avoid time going backwards, prevent potential divide error: */
|
||||
if (unlikely((s64)*period < 0))
|
||||
*period = 0;
|
||||
} else {
|
||||
delta = p->se.avg.load_sum;
|
||||
*period = LOAD_AVG_MAX;
|
||||
|
|
|
@ -762,7 +762,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
|
|||
|
||||
preempt_disable_notrace();
|
||||
time = rb_time_stamp(buffer);
|
||||
preempt_enable_no_resched_notrace();
|
||||
preempt_enable_notrace();
|
||||
|
||||
return time;
|
||||
}
|
||||
|
|
|
@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
|
|||
* not modified.
|
||||
*/
|
||||
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
|
||||
if (!pid_list)
|
||||
if (!pid_list) {
|
||||
trace_parser_put(&parser);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pid_list->pid_max = READ_ONCE(pid_max);
|
||||
|
||||
|
@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
|
|||
|
||||
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
|
||||
if (!pid_list->pids) {
|
||||
trace_parser_put(&parser);
|
||||
kfree(pid_list);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -7025,19 +7028,23 @@ struct buffer_ref {
|
|||
struct ring_buffer *buffer;
|
||||
void *page;
|
||||
int cpu;
|
||||
int ref;
|
||||
refcount_t refcount;
|
||||
};
|
||||
|
||||
static void buffer_ref_release(struct buffer_ref *ref)
|
||||
{
|
||||
if (!refcount_dec_and_test(&ref->refcount))
|
||||
return;
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
}
|
||||
|
||||
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
|
||||
|
||||
if (--ref->ref)
|
||||
return;
|
||||
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
buffer_ref_release(ref);
|
||||
buf->private = 0;
|
||||
}
|
||||
|
||||
|
@ -7046,10 +7053,10 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
|
|||
{
|
||||
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
|
||||
|
||||
if (ref->ref > INT_MAX/2)
|
||||
if (refcount_read(&ref->refcount) > INT_MAX/2)
|
||||
return false;
|
||||
|
||||
ref->ref++;
|
||||
refcount_inc(&ref->refcount);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -7057,7 +7064,7 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
|
|||
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
|
||||
.confirm = generic_pipe_buf_confirm,
|
||||
.release = buffer_pipe_buf_release,
|
||||
.steal = generic_pipe_buf_steal,
|
||||
.steal = generic_pipe_buf_nosteal,
|
||||
.get = buffer_pipe_buf_get,
|
||||
};
|
||||
|
||||
|
@ -7070,11 +7077,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
|
|||
struct buffer_ref *ref =
|
||||
(struct buffer_ref *)spd->partial[i].private;
|
||||
|
||||
if (--ref->ref)
|
||||
return;
|
||||
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
buffer_ref_release(ref);
|
||||
spd->partial[i].private = 0;
|
||||
}
|
||||
|
||||
|
@ -7129,7 +7132,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
break;
|
||||
}
|
||||
|
||||
ref->ref = 1;
|
||||
refcount_set(&ref->refcount, 1);
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (IS_ERR(ref->page)) {
|
||||
|
|
|
@ -1929,6 +1929,7 @@ config TEST_KMOD
|
|||
depends on m
|
||||
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
|
||||
depends on NETDEVICES && NET_CORE && INET # for TUN
|
||||
depends on BLOCK
|
||||
select TEST_LKM
|
||||
select XFS_FS
|
||||
select TUN
|
||||
|
|
|
@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
|
|||
static int test_func(void *private)
|
||||
{
|
||||
struct test_driver *t = private;
|
||||
cpumask_t newmask = CPU_MASK_NONE;
|
||||
int random_array[ARRAY_SIZE(test_case_array)];
|
||||
int index, i, j, ret;
|
||||
ktime_t kt;
|
||||
u64 delta;
|
||||
|
||||
cpumask_set_cpu(t->cpu, &newmask);
|
||||
set_cpus_allowed_ptr(current, &newmask);
|
||||
ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
|
||||
if (ret < 0)
|
||||
pr_err("Failed to set affinity to %d CPU\n", t->cpu);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
|
||||
random_array[i] = i;
|
||||
|
|
|
@ -874,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
|
|||
*/
|
||||
mem = find_memory_block(__pfn_to_section(pfn));
|
||||
nid = mem->nid;
|
||||
put_device(&mem->dev);
|
||||
|
||||
/* associate pfn range with the zone */
|
||||
zone = move_pfn_range(online_type, nid, pfn, nr_pages);
|
||||
|
|
|
@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
|
|||
|
||||
int min_free_kbytes = 1024;
|
||||
int user_min_free_kbytes = -1;
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
/*
|
||||
* DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
|
||||
* are not on separate NUMA nodes. Functionally this works but with
|
||||
* watermark_boost_factor, it can reclaim prematurely as the ranges can be
|
||||
* quite small. By default, do not boost watermarks on discontigmem as in
|
||||
* many cases very high-order allocations like THP are likely to be
|
||||
* unsupported and the premature reclaim offsets the advantage of long-term
|
||||
* fragmentation avoidance.
|
||||
*/
|
||||
int watermark_boost_factor __read_mostly;
|
||||
#else
|
||||
int watermark_boost_factor __read_mostly = 15000;
|
||||
#endif
|
||||
int watermark_scale_factor = 10;
|
||||
|
||||
static unsigned long nr_kernel_pages __initdata;
|
||||
|
@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
|
|||
alloc_flags |= ALLOC_KSWAPD;
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (!zone)
|
||||
return alloc_flags;
|
||||
|
||||
if (zone_idx(zone) != ZONE_NORMAL)
|
||||
goto out;
|
||||
return alloc_flags;
|
||||
|
||||
/*
|
||||
* If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
|
||||
|
@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
|
|||
*/
|
||||
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
|
||||
if (nr_online_nodes > 1 && !populated_zone(--zone))
|
||||
goto out;
|
||||
return alloc_flags;
|
||||
|
||||
out:
|
||||
alloc_flags |= ALLOC_NOFRAGMENT;
|
||||
#endif /* CONFIG_ZONE_DMA32 */
|
||||
return alloc_flags;
|
||||
}
|
||||
|
@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|||
memalloc_noreclaim_restore(noreclaim_flag);
|
||||
psi_memstall_leave(&pflags);
|
||||
|
||||
if (*compact_result <= COMPACT_INACTIVE) {
|
||||
WARN_ON_ONCE(page);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* At least in one zone compaction wasn't deferred or skipped, so let's
|
||||
* count a compaction stall
|
||||
|
|
Загрузка…
Ссылка в новой задаче