Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
aa2eaa8c27
|
@ -107,10 +107,13 @@ ForEachMacros:
|
|||
- 'css_for_each_descendant_post'
|
||||
- 'css_for_each_descendant_pre'
|
||||
- 'device_for_each_child_node'
|
||||
- 'dma_fence_chain_for_each'
|
||||
- 'drm_atomic_crtc_for_each_plane'
|
||||
- 'drm_atomic_crtc_state_for_each_plane'
|
||||
- 'drm_atomic_crtc_state_for_each_plane_state'
|
||||
- 'drm_atomic_for_each_plane_damage'
|
||||
- 'drm_client_for_each_connector_iter'
|
||||
- 'drm_client_for_each_modeset'
|
||||
- 'drm_connector_for_each_possible_encoder'
|
||||
- 'drm_for_each_connector_iter'
|
||||
- 'drm_for_each_crtc'
|
||||
|
@ -126,6 +129,7 @@ ForEachMacros:
|
|||
- 'drm_mm_for_each_node_in_range'
|
||||
- 'drm_mm_for_each_node_safe'
|
||||
- 'flow_action_for_each'
|
||||
- 'for_each_active_dev_scope'
|
||||
- 'for_each_active_drhd_unit'
|
||||
- 'for_each_active_iommu'
|
||||
- 'for_each_available_child_of_node'
|
||||
|
@ -153,6 +157,8 @@ ForEachMacros:
|
|||
- 'for_each_cpu_not'
|
||||
- 'for_each_cpu_wrap'
|
||||
- 'for_each_dev_addr'
|
||||
- 'for_each_dev_scope'
|
||||
- 'for_each_displayid_db'
|
||||
- 'for_each_dma_cap_mask'
|
||||
- 'for_each_dpcm_be'
|
||||
- 'for_each_dpcm_be_rollback'
|
||||
|
@ -169,6 +175,8 @@ ForEachMacros:
|
|||
- 'for_each_evictable_lru'
|
||||
- 'for_each_fib6_node_rt_rcu'
|
||||
- 'for_each_fib6_walker_rt'
|
||||
- 'for_each_free_mem_pfn_range_in_zone'
|
||||
- 'for_each_free_mem_pfn_range_in_zone_from'
|
||||
- 'for_each_free_mem_range'
|
||||
- 'for_each_free_mem_range_reverse'
|
||||
- 'for_each_func_rsrc'
|
||||
|
@ -178,6 +186,7 @@ ForEachMacros:
|
|||
- 'for_each_ip_tunnel_rcu'
|
||||
- 'for_each_irq_nr'
|
||||
- 'for_each_link_codecs'
|
||||
- 'for_each_link_platforms'
|
||||
- 'for_each_lru'
|
||||
- 'for_each_matching_node'
|
||||
- 'for_each_matching_node_and_match'
|
||||
|
@ -302,7 +311,10 @@ ForEachMacros:
|
|||
- 'ide_port_for_each_present_dev'
|
||||
- 'idr_for_each_entry'
|
||||
- 'idr_for_each_entry_continue'
|
||||
- 'idr_for_each_entry_continue_ul'
|
||||
- 'idr_for_each_entry_ul'
|
||||
- 'in_dev_for_each_ifa_rcu'
|
||||
- 'in_dev_for_each_ifa_rtnl'
|
||||
- 'inet_bind_bucket_for_each'
|
||||
- 'inet_lhash2_for_each_icsk_rcu'
|
||||
- 'key_for_each'
|
||||
|
@ -343,8 +355,6 @@ ForEachMacros:
|
|||
- 'media_device_for_each_intf'
|
||||
- 'media_device_for_each_link'
|
||||
- 'media_device_for_each_pad'
|
||||
- 'mp_bvec_for_each_page'
|
||||
- 'mp_bvec_for_each_segment'
|
||||
- 'nanddev_io_for_each_page'
|
||||
- 'netdev_for_each_lower_dev'
|
||||
- 'netdev_for_each_lower_private'
|
||||
|
@ -381,18 +391,19 @@ ForEachMacros:
|
|||
- 'radix_tree_for_each_slot'
|
||||
- 'radix_tree_for_each_tagged'
|
||||
- 'rbtree_postorder_for_each_entry_safe'
|
||||
- 'rdma_for_each_block'
|
||||
- 'rdma_for_each_port'
|
||||
- 'resource_list_for_each_entry'
|
||||
- 'resource_list_for_each_entry_safe'
|
||||
- 'rhl_for_each_entry_rcu'
|
||||
- 'rhl_for_each_rcu'
|
||||
- 'rht_for_each'
|
||||
- 'rht_for_each_from'
|
||||
- 'rht_for_each_entry'
|
||||
- 'rht_for_each_entry_from'
|
||||
- 'rht_for_each_entry_rcu'
|
||||
- 'rht_for_each_entry_rcu_from'
|
||||
- 'rht_for_each_entry_safe'
|
||||
- 'rht_for_each_from'
|
||||
- 'rht_for_each_rcu'
|
||||
- 'rht_for_each_rcu_from'
|
||||
- '__rq_for_each_bio'
|
||||
|
|
|
@ -159,7 +159,7 @@ Mitigation development
|
|||
|
||||
The initial response team sets up an encrypted mailing-list or repurposes
|
||||
an existing one if appropriate. The disclosing party should provide a list
|
||||
of contacts for all other parties who have already been, or should be
|
||||
of contacts for all other parties who have already been, or should be,
|
||||
informed about the issue. The response team contacts these parties so they
|
||||
can name experts who should be subscribed to the mailing-list.
|
||||
|
||||
|
@ -217,11 +217,11 @@ an involved disclosed party. The current ambassadors list:
|
|||
AMD
|
||||
IBM
|
||||
Intel
|
||||
Qualcomm
|
||||
Qualcomm Trilok Soni <tsoni@codeaurora.org>
|
||||
|
||||
Microsoft
|
||||
Microsoft Sasha Levin <sashal@kernel.org>
|
||||
VMware
|
||||
XEN
|
||||
Xen Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
Canonical Tyler Hicks <tyhicks@canonical.com>
|
||||
Debian Ben Hutchings <ben@decadent.org.uk>
|
||||
|
@ -230,8 +230,8 @@ an involved disclosed party. The current ambassadors list:
|
|||
SUSE Jiri Kosina <jkosina@suse.cz>
|
||||
|
||||
Amazon
|
||||
Google
|
||||
============== ========================================================
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
============= ========================================================
|
||||
|
||||
If you want your organization to be added to the ambassadors list, please
|
||||
contact the hardware security team. The nominated ambassador has to
|
||||
|
|
|
@ -18,7 +18,7 @@ The following 64-byte header is present in decompressed Linux kernel image.
|
|||
u32 res1 = 0; /* Reserved */
|
||||
u64 res2 = 0; /* Reserved */
|
||||
u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
|
||||
u32 res3; /* Reserved for additional RISC-V specific header */
|
||||
u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */
|
||||
u32 res4; /* Reserved for PE COFF offset */
|
||||
|
||||
This header format is compliant with PE/COFF header and largely inspired from
|
||||
|
@ -37,13 +37,14 @@ Notes:
|
|||
Bits 16:31 - Major version
|
||||
|
||||
This preserves compatibility across newer and older version of the header.
|
||||
The current version is defined as 0.1.
|
||||
The current version is defined as 0.2.
|
||||
|
||||
- res3 is reserved for offset to any other additional fields. This makes the
|
||||
header extendible in future. One example would be to accommodate ISA
|
||||
extension for RISC-V in future. For current version, it is set to be zero.
|
||||
- The "magic" field is deprecated as of version 0.2. In a future
|
||||
release, it may be removed. This originally should have matched up
|
||||
with the ARM64 header "magic" field, but unfortunately does not.
|
||||
The "magic2" field replaces it, matching up with the ARM64 header.
|
||||
|
||||
- In current header, the flag field has only one field.
|
||||
- In current header, the flags field has only one field.
|
||||
Bit 0: Kernel endianness. 1 if BE, 0 if LE.
|
||||
|
||||
- Image size is mandatory for boot loader to load kernel image. Booting will
|
||||
|
|
|
@ -17732,8 +17732,7 @@ F: include/uapi/linux/dqblk_xfs.h
|
|||
F: include/uapi/linux/fsmap.h
|
||||
|
||||
XILINX AXI ETHERNET DRIVER
|
||||
M: Anirudha Sarangi <anirudh@xilinx.com>
|
||||
M: John Linn <John.Linn@xilinx.com>
|
||||
M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/xilinx/xilinx_axienet*
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Bobtail Squid
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -279,6 +279,7 @@
|
|||
mmc-hs200-1_8v;
|
||||
non-removable;
|
||||
fixed-emmc-driver-type = <1>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&usb_extal_clk {
|
||||
|
|
|
@ -97,7 +97,7 @@
|
|||
reg = <0x0 0x48000000 0x0 0x18000000>;
|
||||
};
|
||||
|
||||
reg_1p8v: regulator0 {
|
||||
reg_1p8v: regulator-1p8v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "fixed-1.8V";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
|
@ -106,7 +106,7 @@
|
|||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_3p3v: regulator1 {
|
||||
reg_3p3v: regulator-3p3v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "fixed-3.3V";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
|
@ -115,7 +115,7 @@
|
|||
regulator-always-on;
|
||||
};
|
||||
|
||||
reg_12p0v: regulator1 {
|
||||
reg_12p0v: regulator-12p0v {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "D12.0V";
|
||||
regulator-min-microvolt = <12000000>;
|
||||
|
|
|
@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
|
|||
}
|
||||
}
|
||||
|
||||
static bool tm_active_with_fp(struct task_struct *tsk)
|
||||
{
|
||||
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
||||
(tsk->thread.ckpt_regs.msr & MSR_FP);
|
||||
}
|
||||
|
||||
static bool tm_active_with_altivec(struct task_struct *tsk)
|
||||
{
|
||||
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
|
||||
(tsk->thread.ckpt_regs.msr & MSR_VEC);
|
||||
}
|
||||
#else
|
||||
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
|
||||
static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
|
||||
static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
bool strict_msr_control;
|
||||
|
@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
|
|||
|
||||
static int restore_fp(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
|
||||
if (tsk->thread.load_fp) {
|
||||
load_fp_state(¤t->thread.fp_state);
|
||||
current->thread.load_fp++;
|
||||
return 1;
|
||||
|
@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
|||
|
||||
static int restore_altivec(struct task_struct *tsk)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
|
||||
(tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
|
||||
load_vr_state(&tsk->thread.vr_state);
|
||||
tsk->thread.used_vr = 1;
|
||||
tsk->thread.load_vec++;
|
||||
|
@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
|
|||
if (!tsk->thread.regs)
|
||||
return;
|
||||
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
usermsr = tsk->thread.regs->msr;
|
||||
|
||||
if ((usermsr & msr_all_available) == 0)
|
||||
return;
|
||||
|
||||
msr_check_and_set(msr_all_available);
|
||||
check_if_tm_restore_required(tsk);
|
||||
|
||||
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
|
||||
|
||||
|
|
|
@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
|
|||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
|
||||
unsigned int num_cams;
|
||||
int __maybe_unused cpu = smp_processor_id();
|
||||
bool map = true;
|
||||
|
||||
/* use a quarter of the TLBCAM for bolted linear map */
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
#ifndef __ASM_IMAGE_H
|
||||
#define __ASM_IMAGE_H
|
||||
|
||||
#define RISCV_IMAGE_MAGIC "RISCV"
|
||||
#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
|
||||
#define RISCV_IMAGE_MAGIC2 "RSC\x05"
|
||||
|
||||
#define RISCV_IMAGE_FLAG_BE_SHIFT 0
|
||||
#define RISCV_IMAGE_FLAG_BE_MASK 0x1
|
||||
|
@ -23,7 +24,7 @@
|
|||
#define __HEAD_FLAGS (__HEAD_FLAG(BE))
|
||||
|
||||
#define RISCV_HEADER_VERSION_MAJOR 0
|
||||
#define RISCV_HEADER_VERSION_MINOR 1
|
||||
#define RISCV_HEADER_VERSION_MINOR 2
|
||||
|
||||
#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
|
||||
RISCV_HEADER_VERSION_MINOR)
|
||||
|
@ -39,9 +40,8 @@
|
|||
* @version: version
|
||||
* @res1: reserved
|
||||
* @res2: reserved
|
||||
* @magic: Magic number
|
||||
* @res3: reserved (will be used for additional RISC-V specific
|
||||
* header)
|
||||
* @magic: Magic number (RISC-V specific; deprecated)
|
||||
* @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
|
||||
* @res4: reserved (will be used for PE COFF offset)
|
||||
*
|
||||
* The intention is for this header format to be shared between multiple
|
||||
|
@ -58,7 +58,7 @@ struct riscv_image_header {
|
|||
u32 res1;
|
||||
u64 res2;
|
||||
u64 magic;
|
||||
u32 res3;
|
||||
u32 magic2;
|
||||
u32 res4;
|
||||
};
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -39,9 +39,9 @@ ENTRY(_start)
|
|||
.word RISCV_HEADER_VERSION
|
||||
.word 0
|
||||
.dword 0
|
||||
.asciz RISCV_IMAGE_MAGIC
|
||||
.word 0
|
||||
.ascii RISCV_IMAGE_MAGIC
|
||||
.balign 4
|
||||
.ascii RISCV_IMAGE_MAGIC2
|
||||
.word 0
|
||||
|
||||
.global _start_kernel
|
||||
|
|
|
@ -1961,6 +1961,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
|
|||
case KVM_S390_MCHK:
|
||||
irq->u.mchk.mcic = s390int->parm64;
|
||||
break;
|
||||
case KVM_S390_INT_PFAULT_INIT:
|
||||
irq->u.ext.ext_params = s390int->parm;
|
||||
irq->u.ext.ext_params2 = s390int->parm64;
|
||||
break;
|
||||
case KVM_S390_RESTART:
|
||||
case KVM_S390_INT_CLOCK_COMP:
|
||||
case KVM_S390_INT_CPU_TIMER:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1018,6 +1018,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
|
|||
/* mark all the pages in active slots as dirty */
|
||||
for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
|
||||
ms = slots->memslots + slotnr;
|
||||
if (!ms->dirty_bitmap)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* The second half of the bitmap is only used on x86,
|
||||
* and would be wasted otherwise, so we put it to good
|
||||
|
@ -4323,7 +4325,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
|
|||
}
|
||||
case KVM_S390_INTERRUPT: {
|
||||
struct kvm_s390_interrupt s390int;
|
||||
struct kvm_s390_irq s390irq;
|
||||
struct kvm_s390_irq s390irq = {};
|
||||
|
||||
if (copy_from_user(&s390int, argp, sizeof(s390int)))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -336,23 +336,26 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
|
|||
{
|
||||
long err;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SYSVIPC))
|
||||
return -ENOSYS;
|
||||
|
||||
/* No need for backward compatibility. We can start fresh... */
|
||||
if (call <= SEMTIMEDOP) {
|
||||
switch (call) {
|
||||
case SEMOP:
|
||||
err = sys_semtimedop(first, ptr,
|
||||
err = ksys_semtimedop(first, ptr,
|
||||
(unsigned int)second, NULL);
|
||||
goto out;
|
||||
case SEMTIMEDOP:
|
||||
err = sys_semtimedop(first, ptr, (unsigned int)second,
|
||||
err = ksys_semtimedop(first, ptr, (unsigned int)second,
|
||||
(const struct __kernel_timespec __user *)
|
||||
(unsigned long) fifth);
|
||||
goto out;
|
||||
case SEMGET:
|
||||
err = sys_semget(first, (int)second, (int)third);
|
||||
err = ksys_semget(first, (int)second, (int)third);
|
||||
goto out;
|
||||
case SEMCTL: {
|
||||
err = sys_semctl(first, second,
|
||||
err = ksys_old_semctl(first, second,
|
||||
(int)third | IPC_64,
|
||||
(unsigned long) ptr);
|
||||
goto out;
|
||||
|
@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
|
|||
if (call <= MSGCTL) {
|
||||
switch (call) {
|
||||
case MSGSND:
|
||||
err = sys_msgsnd(first, ptr, (size_t)second,
|
||||
err = ksys_msgsnd(first, ptr, (size_t)second,
|
||||
(int)third);
|
||||
goto out;
|
||||
case MSGRCV:
|
||||
err = sys_msgrcv(first, ptr, (size_t)second, fifth,
|
||||
err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
|
||||
(int)third);
|
||||
goto out;
|
||||
case MSGGET:
|
||||
err = sys_msgget((key_t)first, (int)second);
|
||||
err = ksys_msgget((key_t)first, (int)second);
|
||||
goto out;
|
||||
case MSGCTL:
|
||||
err = sys_msgctl(first, (int)second | IPC_64, ptr);
|
||||
err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
|
||||
goto out;
|
||||
default:
|
||||
err = -ENOSYS;
|
||||
|
@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
|
|||
goto out;
|
||||
}
|
||||
case SHMDT:
|
||||
err = sys_shmdt(ptr);
|
||||
err = ksys_shmdt(ptr);
|
||||
goto out;
|
||||
case SHMGET:
|
||||
err = sys_shmget(first, (size_t)second, (int)third);
|
||||
err = ksys_shmget(first, (size_t)second, (int)third);
|
||||
goto out;
|
||||
case SHMCTL:
|
||||
err = sys_shmctl(first, (int)second | IPC_64, ptr);
|
||||
err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
|
||||
goto out;
|
||||
default:
|
||||
err = -ENOSYS;
|
||||
|
|
|
@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
|
|||
* Lower 12 bits encode the number of additional
|
||||
* pages to flush (in addition to the 'cur' page).
|
||||
*/
|
||||
if (diff >= HV_TLB_FLUSH_UNIT)
|
||||
if (diff >= HV_TLB_FLUSH_UNIT) {
|
||||
gva_list[gva_n] |= ~PAGE_MASK;
|
||||
else if (diff)
|
||||
gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
|
||||
|
||||
cur += HV_TLB_FLUSH_UNIT;
|
||||
} else if (diff) {
|
||||
gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
|
||||
cur = end;
|
||||
}
|
||||
|
||||
gva_n++;
|
||||
|
||||
} while (cur < end);
|
||||
|
|
|
@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
|||
BOOT_PARAM_PRESERVE(eddbuf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
|
||||
BOOT_PARAM_PRESERVE(secure_boot),
|
||||
BOOT_PARAM_PRESERVE(hdr),
|
||||
BOOT_PARAM_PRESERVE(e820_table),
|
||||
BOOT_PARAM_PRESERVE(eddbuf),
|
||||
|
|
|
@ -335,6 +335,7 @@ struct kvm_mmu_page {
|
|||
int root_count; /* Currently serving as active root */
|
||||
unsigned int unsync_children;
|
||||
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
|
||||
unsigned long mmu_valid_gen;
|
||||
DECLARE_BITMAP(unsync_child_bitmap, 512);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -856,6 +857,7 @@ struct kvm_arch {
|
|||
unsigned long n_requested_mmu_pages;
|
||||
unsigned long n_max_mmu_pages;
|
||||
unsigned int indirect_shadow_pages;
|
||||
unsigned long mmu_valid_gen;
|
||||
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
||||
/*
|
||||
* Hash table of struct kvm_mmu_page.
|
||||
|
|
|
@ -444,8 +444,10 @@ __pu_label: \
|
|||
({ \
|
||||
int __gu_err; \
|
||||
__inttype(*(ptr)) __gu_val; \
|
||||
__typeof__(ptr) __gu_ptr = (ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
__uaccess_begin_nospec(); \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
|
||||
__get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
|
||||
__uaccess_end(); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
|
|
|
@ -834,6 +834,10 @@ bool __init apic_needs_pit(void)
|
|||
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||
return true;
|
||||
|
||||
/* Virt guests may lack ARAT, but still have DEADLINE */
|
||||
if (!boot_cpu_has(X86_FEATURE_ARAT))
|
||||
return true;
|
||||
|
||||
/* Deadline timer is based on TSC so no further PIT action required */
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
|
||||
return false;
|
||||
|
@ -1179,10 +1183,6 @@ void clear_local_APIC(void)
|
|||
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
|
||||
v = apic_read(APIC_LVT1);
|
||||
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
|
||||
if (!x2apic_enabled()) {
|
||||
v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
apic_write(APIC_LDR, v);
|
||||
}
|
||||
if (maxlvt >= 4) {
|
||||
v = apic_read(APIC_LVTPC);
|
||||
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
|
||||
|
|
|
@ -2095,6 +2095,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
|
|||
if (!direct)
|
||||
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
|
||||
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
|
||||
|
||||
/*
|
||||
* active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
|
||||
* depends on valid pages being added to the head of the list. See
|
||||
* comments in kvm_zap_obsolete_pages().
|
||||
*/
|
||||
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
|
||||
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
|
||||
return sp;
|
||||
|
@ -2244,7 +2250,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
|||
#define for_each_valid_sp(_kvm, _sp, _gfn) \
|
||||
hlist_for_each_entry(_sp, \
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
|
||||
if ((_sp)->role.invalid) { \
|
||||
if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
|
||||
} else
|
||||
|
||||
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
|
||||
|
@ -2301,6 +2307,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
|
|||
static void mmu_audit_disable(void) { }
|
||||
#endif
|
||||
|
||||
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||
{
|
||||
return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
|
||||
}
|
||||
|
||||
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct list_head *invalid_list)
|
||||
{
|
||||
|
@ -2525,6 +2536,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|||
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
|
||||
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
|
||||
}
|
||||
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
|
||||
clear_page(sp->spt);
|
||||
trace_kvm_mmu_get_page(sp, true);
|
||||
|
||||
|
@ -4233,6 +4245,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
|
|||
return false;
|
||||
|
||||
if (cached_root_available(vcpu, new_cr3, new_role)) {
|
||||
/*
|
||||
* It is possible that the cached previous root page is
|
||||
* obsolete because of a change in the MMU generation
|
||||
* number. However, changing the generation number is
|
||||
* accompanied by KVM_REQ_MMU_RELOAD, which will free
|
||||
* the root set here and allocate a new one.
|
||||
*/
|
||||
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
|
||||
if (!skip_tlb_flush) {
|
||||
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
||||
|
@ -5649,11 +5668,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
|
|||
return alloc_mmu_pages(vcpu);
|
||||
}
|
||||
|
||||
|
||||
static void kvm_zap_obsolete_pages(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_mmu_page *sp, *node;
|
||||
LIST_HEAD(invalid_list);
|
||||
int ign;
|
||||
|
||||
restart:
|
||||
list_for_each_entry_safe_reverse(sp, node,
|
||||
&kvm->arch.active_mmu_pages, link) {
|
||||
/*
|
||||
* No obsolete valid page exists before a newly created page
|
||||
* since active_mmu_pages is a FIFO list.
|
||||
*/
|
||||
if (!is_obsolete_sp(kvm, sp))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Do not repeatedly zap a root page to avoid unnecessary
|
||||
* KVM_REQ_MMU_RELOAD, otherwise we may not be able to
|
||||
* progress:
|
||||
* vcpu 0 vcpu 1
|
||||
* call vcpu_enter_guest():
|
||||
* 1): handle KVM_REQ_MMU_RELOAD
|
||||
* and require mmu-lock to
|
||||
* load mmu
|
||||
* repeat:
|
||||
* 1): zap root page and
|
||||
* send KVM_REQ_MMU_RELOAD
|
||||
*
|
||||
* 2): if (cond_resched_lock(mmu-lock))
|
||||
*
|
||||
* 2): hold mmu-lock and load mmu
|
||||
*
|
||||
* 3): see KVM_REQ_MMU_RELOAD bit
|
||||
* on vcpu->requests is set
|
||||
* then return 1 to call
|
||||
* vcpu_enter_guest() again.
|
||||
* goto repeat;
|
||||
*
|
||||
* Since we are reversely walking the list and the invalid
|
||||
* list will be moved to the head, skip the invalid page
|
||||
* can help us to avoid the infinity list walking.
|
||||
*/
|
||||
if (sp->role.invalid)
|
||||
continue;
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
|
||||
goto restart;
|
||||
}
|
||||
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fast invalidate all shadow pages and use lock-break technique
|
||||
* to zap obsolete pages.
|
||||
*
|
||||
* It's required when memslot is being deleted or VM is being
|
||||
* destroyed, in these cases, we should ensure that KVM MMU does
|
||||
* not use any resource of the being-deleted slot or all slots
|
||||
* after calling the function.
|
||||
*/
|
||||
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
|
||||
{
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
kvm->arch.mmu_valid_gen++;
|
||||
|
||||
kvm_zap_obsolete_pages(kvm);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
struct kvm_page_track_notifier_node *node)
|
||||
{
|
||||
kvm_mmu_zap_all(kvm);
|
||||
kvm_mmu_zap_all_fast(kvm);
|
||||
}
|
||||
|
||||
void kvm_mmu_init_vm(struct kvm *kvm)
|
||||
|
|
|
@ -4540,6 +4540,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
|
|||
int len;
|
||||
gva_t gva = 0;
|
||||
struct vmcs12 *vmcs12;
|
||||
struct x86_exception e;
|
||||
short offset;
|
||||
|
||||
if (!nested_vmx_check_permission(vcpu))
|
||||
|
@ -4588,7 +4589,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
|
|||
vmx_instruction_info, true, len, &gva))
|
||||
return 1;
|
||||
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
|
||||
kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
|
||||
if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
|
||||
kvm_inject_page_fault(vcpu, &e);
|
||||
}
|
||||
|
||||
return nested_vmx_succeed(vcpu);
|
||||
|
|
|
@ -5312,6 +5312,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
|
|||
/* kvm_write_guest_virt_system can pull in tons of pages. */
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
/*
|
||||
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
|
||||
* is returned, but our callers are not ready for that and they blindly
|
||||
* call kvm_inject_page_fault. Ensure that they at least do not leak
|
||||
* uninitialized kernel stack memory into cr2 and error code.
|
||||
*/
|
||||
memset(exception, 0, sizeof(*exception));
|
||||
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
|
||||
PFERR_WRITE_MASK, exception);
|
||||
}
|
||||
|
|
|
@ -18,37 +18,40 @@ targets += purgatory.ro
|
|||
KASAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# These are adjustments to the compiler flags used for objects that
|
||||
# make up the standalone purgatory.ro
|
||||
|
||||
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
|
||||
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
|
||||
|
||||
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
|
||||
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
|
||||
# sure how to relocate those.
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
|
||||
PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_STACKPROTECTOR
|
||||
CFLAGS_REMOVE_sha256.o += -fstack-protector
|
||||
CFLAGS_REMOVE_purgatory.o += -fstack-protector
|
||||
CFLAGS_REMOVE_string.o += -fstack-protector
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
|
||||
PURGATORY_CFLAGS_REMOVE += -fstack-protector
|
||||
endif
|
||||
|
||||
ifdef CONFIG_STACKPROTECTOR_STRONG
|
||||
CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
|
||||
CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
|
||||
CFLAGS_REMOVE_string.o += -fstack-protector-strong
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
|
||||
PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
|
||||
endif
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
|
||||
CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
|
||||
CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
|
||||
PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
|
||||
endif
|
||||
|
||||
CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_string.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
usb_free_urb(urb);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
|
||||
|
|
|
@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8822CE Bluetooth devices */
|
||||
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Silicon Wave based devices */
|
||||
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
|
||||
|
||||
|
@ -1201,10 +1204,6 @@ static int btusb_open(struct hci_dev *hdev)
|
|||
}
|
||||
|
||||
data->intf->needs_remote_wakeup = 1;
|
||||
/* device specific wakeup source enabled and required for USB
|
||||
* remote wakeup while host is suspended
|
||||
*/
|
||||
device_wakeup_enable(&data->udev->dev);
|
||||
|
||||
/* Disable device remote wakeup when host is suspended
|
||||
* For Realtek chips, global suspend without
|
||||
|
@ -1281,7 +1280,6 @@ static int btusb_close(struct hci_dev *hdev)
|
|||
if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags))
|
||||
data->intf->needs_remote_wakeup = 1;
|
||||
|
||||
device_wakeup_disable(&data->udev->dev);
|
||||
usb_autopm_put_interface(data->intf);
|
||||
|
||||
failed:
|
||||
|
|
|
@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work)
|
|||
ws_awake_device);
|
||||
struct hci_uart *hu = qca->hu;
|
||||
unsigned long retrans_delay;
|
||||
unsigned long flags;
|
||||
|
||||
BT_DBG("hu %p wq awake device", hu);
|
||||
|
||||
/* Vote for serial clock */
|
||||
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
|
||||
|
||||
spin_lock(&qca->hci_ibs_lock);
|
||||
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||
|
||||
/* Send wake indication to device */
|
||||
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
|
||||
|
@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work)
|
|||
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
|
||||
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
|
||||
|
||||
spin_unlock(&qca->hci_ibs_lock);
|
||||
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||
|
||||
/* Actually send the packets */
|
||||
hci_uart_tx_wakeup(hu);
|
||||
|
@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work)
|
|||
struct qca_data *qca = container_of(work, struct qca_data,
|
||||
ws_awake_rx);
|
||||
struct hci_uart *hu = qca->hu;
|
||||
unsigned long flags;
|
||||
|
||||
BT_DBG("hu %p wq awake rx", hu);
|
||||
|
||||
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
|
||||
|
||||
spin_lock(&qca->hci_ibs_lock);
|
||||
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
|
||||
|
||||
/* Always acknowledge device wake up,
|
||||
|
@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
|
|||
|
||||
qca->ibs_sent_wacks++;
|
||||
|
||||
spin_unlock(&qca->hci_ibs_lock);
|
||||
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
|
||||
|
||||
/* Actually send the packets */
|
||||
hci_uart_tx_wakeup(hu);
|
||||
|
|
|
@ -192,6 +192,7 @@ struct rcar_dmac_chan {
|
|||
* @iomem: remapped I/O memory base
|
||||
* @n_channels: number of available channels
|
||||
* @channels: array of DMAC channels
|
||||
* @channels_mask: bitfield of which DMA channels are managed by this driver
|
||||
* @modules: bitmask of client modules in use
|
||||
*/
|
||||
struct rcar_dmac {
|
||||
|
@ -202,6 +203,7 @@ struct rcar_dmac {
|
|||
|
||||
unsigned int n_channels;
|
||||
struct rcar_dmac_chan *channels;
|
||||
unsigned int channels_mask;
|
||||
|
||||
DECLARE_BITMAP(modules, 256);
|
||||
};
|
||||
|
@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
|
|||
u16 dmaor;
|
||||
|
||||
/* Clear all channels and enable the DMAC globally. */
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
|
||||
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
|
||||
rcar_dmac_write(dmac, RCAR_DMAOR,
|
||||
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
|
||||
|
||||
|
@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
|
|||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
struct rcar_dmac_chan *chan = &dmac->channels[i];
|
||||
|
||||
if (!(dmac->channels_mask & BIT(i)))
|
||||
continue;
|
||||
|
||||
/* Stop and reinitialize the channel. */
|
||||
spin_lock_irq(&chan->lock);
|
||||
rcar_dmac_chan_halt(chan);
|
||||
|
@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define RCAR_DMAC_MAX_CHANNELS 32
|
||||
|
||||
static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
|
||||
{
|
||||
struct device_node *np = dev->of_node;
|
||||
|
@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
|
||||
/* The hardware and driver don't support more than 32 bits in CHCLR */
|
||||
if (dmac->n_channels <= 0 ||
|
||||
dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
|
||||
dev_err(dev, "invalid number of channels %u\n",
|
||||
dmac->n_channels);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
|
||||
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
|
||||
unsigned int channels_offset = 0;
|
||||
struct dma_device *engine;
|
||||
struct rcar_dmac *dmac;
|
||||
struct resource *mem;
|
||||
|
@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
* level we can't disable it selectively, so ignore channel 0 for now if
|
||||
* the device is part of an IOMMU group.
|
||||
*/
|
||||
if (device_iommu_mapped(&pdev->dev)) {
|
||||
dmac->n_channels--;
|
||||
channels_offset = 1;
|
||||
}
|
||||
if (device_iommu_mapped(&pdev->dev))
|
||||
dmac->channels_mask &= ~BIT(0);
|
||||
|
||||
dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
|
||||
sizeof(*dmac->channels), GFP_KERNEL);
|
||||
|
@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||
INIT_LIST_HEAD(&engine->channels);
|
||||
|
||||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
|
||||
i + channels_offset);
|
||||
if (!(dmac->channels_mask & BIT(i)))
|
||||
continue;
|
||||
|
||||
ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
|
|
|
@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
|
||||
struct dma_slave_config *slave_cfg = &schan->slave_cfg;
|
||||
dma_addr_t src = 0, dst = 0;
|
||||
dma_addr_t start_src = 0, start_dst = 0;
|
||||
struct sprd_dma_desc *sdesc;
|
||||
struct scatterlist *sg;
|
||||
u32 len = 0;
|
||||
|
@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
dst = sg_dma_address(sg);
|
||||
}
|
||||
|
||||
if (!i) {
|
||||
start_src = src;
|
||||
start_dst = dst;
|
||||
}
|
||||
|
||||
/*
|
||||
* The link-list mode needs at least 2 link-list
|
||||
* configurations. If there is only one sg, it doesn't
|
||||
|
@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
}
|
||||
}
|
||||
|
||||
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
|
||||
dir, flags, slave_cfg);
|
||||
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
|
||||
start_dst, len, dir, flags, slave_cfg);
|
||||
if (ret) {
|
||||
kfree(sdesc);
|
||||
return NULL;
|
||||
|
|
|
@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
|
|||
|
||||
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
|
||||
nelm * 2);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(rsv_events);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < nelm; i++) {
|
||||
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
|
||||
|
|
|
@ -1540,9 +1540,11 @@ static int omap_dma_probe(struct platform_device *pdev)
|
|||
|
||||
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
|
||||
IRQF_SHARED, "omap-dma-engine", od);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
omap_dma_free(od);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
|
||||
od->ll123_supported = true;
|
||||
|
|
|
@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
|
|||
.read = gpio_mockup_debugfs_read,
|
||||
.write = gpio_mockup_debugfs_write,
|
||||
.llseek = no_llseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static void gpio_mockup_debugfs_setup(struct device *dev,
|
||||
|
|
|
@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
|
|||
u8 new_irqs;
|
||||
int level, i;
|
||||
u8 invert_irq_mask[MAX_BANK];
|
||||
int reg_direction[MAX_BANK];
|
||||
u8 reg_direction[MAX_BANK];
|
||||
|
||||
regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
|
||||
NBANK(chip));
|
||||
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
|
||||
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
/* Enable latch on interrupt-enabled inputs */
|
||||
|
@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
|
|||
bool pending_seen = false;
|
||||
bool trigger_seen = false;
|
||||
u8 trigger[MAX_BANK];
|
||||
int reg_direction[MAX_BANK];
|
||||
u8 reg_direction[MAX_BANK];
|
||||
int ret, i;
|
||||
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
|
@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
|
|||
return false;
|
||||
|
||||
/* Remove output pins from the equation */
|
||||
regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
|
||||
NBANK(chip));
|
||||
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
|
||||
for (i = 0; i < NBANK(chip); i++)
|
||||
cur_stat[i] &= reg_direction[i];
|
||||
|
||||
|
@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
|
|||
{
|
||||
struct i2c_client *client = chip->client;
|
||||
struct irq_chip *irq_chip = &chip->irq_chip;
|
||||
int reg_direction[MAX_BANK];
|
||||
u8 reg_direction[MAX_BANK];
|
||||
int ret, i;
|
||||
|
||||
if (!client->irq)
|
||||
|
@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
|
|||
* interrupt. We have to rely on the previous read for
|
||||
* this purpose.
|
||||
*/
|
||||
regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
|
||||
NBANK(chip));
|
||||
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
|
||||
for (i = 0; i < NBANK(chip); i++)
|
||||
chip->irq_stat[i] &= reg_direction[i];
|
||||
mutex_init(&chip->irq_lock);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
* Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
|
@ -19,6 +20,11 @@
|
|||
|
||||
#include "gpiolib.h"
|
||||
|
||||
static int run_edge_events_on_boot = -1;
|
||||
module_param(run_edge_events_on_boot, int, 0444);
|
||||
MODULE_PARM_DESC(run_edge_events_on_boot,
|
||||
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
|
||||
|
||||
/**
|
||||
* struct acpi_gpio_event - ACPI GPIO event handler data
|
||||
*
|
||||
|
@ -170,11 +176,14 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
|
|||
event->irq_requested = true;
|
||||
|
||||
/* Make sure we trigger the initial state of edge-triggered IRQs */
|
||||
if (run_edge_events_on_boot &&
|
||||
(event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
|
||||
value = gpiod_get_raw_value_cansleep(event->desc);
|
||||
if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
|
||||
((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
|
||||
event->handler(event->irq, event);
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
|
||||
{
|
||||
|
@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
|
|||
}
|
||||
/* We must use _sync so that this runs after the first deferred_probe run */
|
||||
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
|
||||
|
||||
static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
|
||||
}
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
static int acpi_gpio_setup_params(void)
|
||||
{
|
||||
if (run_edge_events_on_boot < 0) {
|
||||
if (dmi_check_system(run_edge_events_on_boot_blacklist))
|
||||
run_edge_events_on_boot = 0;
|
||||
else
|
||||
run_edge_events_on_boot = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Directly after dmi_setup() which runs as core_initcall() */
|
||||
postcore_initcall(acpi_gpio_setup_params);
|
||||
|
|
|
@ -343,36 +343,27 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
|
|||
|
||||
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
|
||||
&of_flags);
|
||||
/*
|
||||
* -EPROBE_DEFER in our case means that we found a
|
||||
* valid GPIO property, but no controller has been
|
||||
* registered so far.
|
||||
*
|
||||
* This means we don't need to look any further for
|
||||
* alternate name conventions, and we should really
|
||||
* preserve the return code for our user to be able to
|
||||
* retry probing later.
|
||||
*/
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
|
||||
return desc;
|
||||
|
||||
if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
|
||||
if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
|
||||
/* Special handling for SPI GPIOs if used */
|
||||
if (IS_ERR(desc))
|
||||
desc = of_find_spi_gpio(dev, con_id, &of_flags);
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
|
||||
}
|
||||
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
|
||||
/* This quirk looks up flags and all */
|
||||
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
|
||||
if (!IS_ERR(desc))
|
||||
return desc;
|
||||
}
|
||||
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
|
||||
/* Special handling for regulator GPIOs if used */
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
|
||||
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
|
||||
}
|
||||
|
||||
if (IS_ERR(desc))
|
||||
return desc;
|
||||
|
|
|
@ -535,6 +535,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
|||
if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Do not allow both INPUT & OUTPUT flags to be set as they are
|
||||
* contradictory.
|
||||
*/
|
||||
if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
|
||||
(lflags & GPIOHANDLE_REQUEST_OUTPUT))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
|
||||
* the hardware actually supports enabling both at the same time the
|
||||
|
@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
|||
}
|
||||
|
||||
/* This is just wrong: we don't look for events on output lines */
|
||||
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
|
||||
if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
|
||||
(lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
|
||||
(lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
|
||||
ret = -EINVAL;
|
||||
goto out_free_label;
|
||||
}
|
||||
|
@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
|||
|
||||
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
|
||||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
|
||||
ret = gpiod_direction_input(desc);
|
||||
if (ret)
|
||||
|
|
|
@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
|
|||
}
|
||||
|
||||
static int drm_mode_parse_cmdline_extra(const char *str, int length,
|
||||
bool freestanding,
|
||||
const struct drm_connector *connector,
|
||||
struct drm_cmdline_mode *mode)
|
||||
{
|
||||
|
@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
|
|||
for (i = 0; i < length; i++) {
|
||||
switch (str[i]) {
|
||||
case 'i':
|
||||
if (freestanding)
|
||||
return -EINVAL;
|
||||
|
||||
mode->interlace = true;
|
||||
break;
|
||||
case 'm':
|
||||
if (freestanding)
|
||||
return -EINVAL;
|
||||
|
||||
mode->margins = true;
|
||||
break;
|
||||
case 'D':
|
||||
|
@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
|
|||
if (extras) {
|
||||
int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
|
||||
1,
|
||||
false,
|
||||
connector,
|
||||
mode);
|
||||
if (ret)
|
||||
|
@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const char * const drm_named_modes_whitelist[] = {
|
||||
"NTSC",
|
||||
"PAL",
|
||||
};
|
||||
|
||||
static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
|
||||
if (!strncmp(mode, drm_named_modes_whitelist[i], size))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
|
||||
* @mode_option: optional per connector mode option
|
||||
|
@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
|||
* bunch of things:
|
||||
* - We need to make sure that the first character (which
|
||||
* would be our resolution in X) is a digit.
|
||||
* - However, if the X resolution is missing, then we end up
|
||||
* with something like x<yres>, with our first character
|
||||
* being an alpha-numerical character, which would be
|
||||
* considered a named mode.
|
||||
* - If not, then it's either a named mode or a force on/off.
|
||||
* To distinguish between the two, we need to run the
|
||||
* extra parsing function, and if not, then we consider it
|
||||
* a named mode.
|
||||
*
|
||||
* If this isn't enough, we should add more heuristics here,
|
||||
* and matching unit-tests.
|
||||
*/
|
||||
if (!isdigit(name[0]) && name[0] != 'x')
|
||||
if (!isdigit(name[0]) && name[0] != 'x') {
|
||||
unsigned int namelen = strlen(name);
|
||||
|
||||
/*
|
||||
* Only the force on/off options can be in that case,
|
||||
* and they all take a single character.
|
||||
*/
|
||||
if (namelen == 1) {
|
||||
ret = drm_mode_parse_cmdline_extra(name, namelen, true,
|
||||
connector, mode);
|
||||
if (!ret)
|
||||
return true;
|
||||
}
|
||||
|
||||
named_mode = true;
|
||||
}
|
||||
|
||||
/* Try to locate the bpp and refresh specifiers, if any */
|
||||
bpp_ptr = strchr(name, '-');
|
||||
|
@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
|||
if (named_mode) {
|
||||
if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
|
||||
return false;
|
||||
|
||||
if (!drm_named_mode_is_in_whitelist(name, mode_end))
|
||||
return false;
|
||||
|
||||
strscpy(mode->name, name, mode_end + 1);
|
||||
} else {
|
||||
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
|
||||
|
@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
|||
extra_ptr != options_ptr) {
|
||||
int len = strlen(name) - (extra_ptr - name);
|
||||
|
||||
ret = drm_mode_parse_cmdline_extra(extra_ptr, len,
|
||||
ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
|
||||
connector, mode);
|
||||
if (ret)
|
||||
return false;
|
||||
|
|
|
@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
|||
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
limits.min_bpp = intel_dp_min_bpp(pipe_config);
|
||||
limits.max_bpp = pipe_config->pipe_bpp;
|
||||
/*
|
||||
* FIXME: If all the streams can't fit into the link with
|
||||
* their current pipe_bpp we should reduce pipe_bpp across
|
||||
* the board until things start to fit. Until then we
|
||||
* limit to <= 8bpc since that's what was hardcoded for all
|
||||
* MST streams previously. This hack should be removed once
|
||||
* we have the proper retry logic in place.
|
||||
*/
|
||||
limits.max_bpp = min(pipe_config->pipe_bpp, 24);
|
||||
|
||||
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
|
||||
|
||||
|
|
|
@ -664,15 +664,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
|
|||
|
||||
for_each_sgt_page(page, sgt_iter, pages) {
|
||||
if (obj->mm.dirty)
|
||||
/*
|
||||
* As this may not be anonymous memory (e.g. shmem)
|
||||
* but exist on a real mapping, we have to lock
|
||||
* the page in order to dirty it -- holding
|
||||
* the page reference is not sufficient to
|
||||
* prevent the inode from being truncated.
|
||||
* Play safe and take the lock.
|
||||
*/
|
||||
set_page_dirty_lock(page);
|
||||
set_page_dirty(page);
|
||||
|
||||
mark_page_accessed(page);
|
||||
put_page(page);
|
||||
|
|
|
@ -308,11 +308,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
FLOW_CONTROL_ENABLE |
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
||||
|
||||
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
|
||||
if (!IS_COFFEELAKE(i915))
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
|
||||
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
|
||||
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
|
||||
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
|
||||
|
|
|
@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (panel) {
|
||||
if (panel)
|
||||
bridge = devm_drm_panel_bridge_add(dev, panel,
|
||||
DRM_MODE_CONNECTOR_Unknown);
|
||||
}
|
||||
DRM_MODE_CONNECTOR_DPI);
|
||||
|
||||
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
|
||||
&priv->dma_hwdesc_phys,
|
||||
|
|
|
@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
|
|||
timeout = drm_timeout_abs_to_jiffies(timeout_ns);
|
||||
|
||||
ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
|
||||
if (ret == 0)
|
||||
if (ret == -ETIME)
|
||||
ret = timeout ? -ETIMEDOUT : -EBUSY;
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
|
|||
MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
|
||||
|
@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
|
|||
MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
|
||||
|
@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
|
|||
MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
|
||||
|
@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
|
|||
MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
|
||||
MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
|
||||
|
|
|
@ -9,6 +9,13 @@
|
|||
|
||||
#define cmdline_test(test) selftest(test, test)
|
||||
|
||||
cmdline_test(drm_cmdline_test_force_d_only)
|
||||
cmdline_test(drm_cmdline_test_force_D_only_dvi)
|
||||
cmdline_test(drm_cmdline_test_force_D_only_hdmi)
|
||||
cmdline_test(drm_cmdline_test_force_D_only_not_digital)
|
||||
cmdline_test(drm_cmdline_test_force_e_only)
|
||||
cmdline_test(drm_cmdline_test_margin_only)
|
||||
cmdline_test(drm_cmdline_test_interlace_only)
|
||||
cmdline_test(drm_cmdline_test_res)
|
||||
cmdline_test(drm_cmdline_test_res_missing_x)
|
||||
cmdline_test(drm_cmdline_test_res_missing_y)
|
||||
|
|
|
@ -17,6 +17,136 @@
|
|||
|
||||
static const struct drm_connector no_connector = {};
|
||||
|
||||
static int drm_cmdline_test_force_e_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
|
||||
&no_connector,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
|
||||
&no_connector,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector connector_hdmi = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_HDMIB,
|
||||
};
|
||||
|
||||
static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
|
||||
&connector_hdmi,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector connector_dvi = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_DVII,
|
||||
};
|
||||
|
||||
static int drm_cmdline_test_force_D_only_dvi(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
|
||||
&connector_dvi,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_force_d_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
|
||||
&no_connector,
|
||||
&mode));
|
||||
FAIL_ON(mode.specified);
|
||||
FAIL_ON(mode.refresh_specified);
|
||||
FAIL_ON(mode.bpp_specified);
|
||||
|
||||
FAIL_ON(mode.rb);
|
||||
FAIL_ON(mode.cvt);
|
||||
FAIL_ON(mode.interlace);
|
||||
FAIL_ON(mode.margins);
|
||||
FAIL_ON(mode.force != DRM_FORCE_OFF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_margin_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(drm_mode_parse_command_line_for_connector("m",
|
||||
&no_connector,
|
||||
&mode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_interlace_only(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
||||
FAIL_ON(drm_mode_parse_command_line_for_connector("i",
|
||||
&no_connector,
|
||||
&mode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_cmdline_test_res(void *ignored)
|
||||
{
|
||||
struct drm_cmdline_mode mode = { };
|
||||
|
|
|
@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
|||
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
|
||||
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
|
||||
kfree(reply);
|
||||
|
||||
reply = NULL;
|
||||
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
|
||||
/* A checkpoint occurred. Retry. */
|
||||
continue;
|
||||
|
@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
|||
|
||||
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
|
||||
kfree(reply);
|
||||
|
||||
reply = NULL;
|
||||
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
|
||||
/* A checkpoint occurred. Retry. */
|
||||
continue;
|
||||
|
@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
|||
break;
|
||||
}
|
||||
|
||||
if (retries == RETRIES) {
|
||||
kfree(reply);
|
||||
if (!reply)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*msg_len = reply_len;
|
||||
*msg = reply;
|
||||
|
|
|
@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
|
|||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
|
||||
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
||||
dom_id, 1);
|
||||
iommu_queue_command(iommu, &cmd);
|
||||
|
||||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
static void amd_iommu_flush_all(struct amd_iommu *iommu)
|
||||
{
|
||||
struct iommu_cmd cmd;
|
||||
|
@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
|
|||
* another level increases the size of the address space by 9 bits to a size up
|
||||
* to 64 bits.
|
||||
*/
|
||||
static bool increase_address_space(struct protection_domain *domain,
|
||||
static void increase_address_space(struct protection_domain *domain,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 *pte;
|
||||
|
||||
if (domain->mode == PAGE_MODE_6_LEVEL)
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
|
||||
/* address space already 64 bit large */
|
||||
return false;
|
||||
goto out;
|
||||
|
||||
pte = (void *)get_zeroed_page(gfp);
|
||||
if (!pte)
|
||||
return false;
|
||||
goto out;
|
||||
|
||||
*pte = PM_LEVEL_PDE(domain->mode,
|
||||
iommu_virt_to_phys(domain->pt_root));
|
||||
|
@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
|
|||
domain->mode += 1;
|
||||
domain->updated = true;
|
||||
|
||||
return true;
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static u64 *alloc_pte(struct protection_domain *domain,
|
||||
|
@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
|
|||
{
|
||||
u64 pte_root = 0;
|
||||
u64 flags = 0;
|
||||
u32 old_domid;
|
||||
|
||||
if (domain->mode != PAGE_MODE_NONE)
|
||||
pte_root = iommu_virt_to_phys(domain->pt_root);
|
||||
|
@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
|
|||
flags &= ~DEV_DOMID_MASK;
|
||||
flags |= domain->id;
|
||||
|
||||
old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
|
||||
amd_iommu_dev_table[devid].data[1] = flags;
|
||||
amd_iommu_dev_table[devid].data[0] = pte_root;
|
||||
|
||||
/*
|
||||
* A kdump kernel might be replacing a domain ID that was copied from
|
||||
* the previous kernel--if so, it needs to flush the translation cache
|
||||
* entries for the old domain ID that is being overwritten
|
||||
*/
|
||||
if (old_domid) {
|
||||
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
|
||||
|
||||
amd_iommu_flush_tlb_domid(iommu, old_domid);
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_dte_entry(u16 devid)
|
||||
|
|
|
@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain);
|
|||
static void domain_remove_dev_info(struct dmar_domain *domain);
|
||||
static void dmar_remove_one_dev_info(struct device *dev);
|
||||
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
|
||||
static void domain_context_clear(struct intel_iommu *iommu,
|
||||
struct device *dev);
|
||||
static int domain_detach_iommu(struct dmar_domain *domain,
|
||||
struct intel_iommu *iommu);
|
||||
static bool device_is_rmrr_locked(struct device *dev);
|
||||
|
@ -2105,9 +2107,26 @@ out_unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct domain_context_mapping_data {
|
||||
struct dmar_domain *domain;
|
||||
struct intel_iommu *iommu;
|
||||
struct pasid_table *table;
|
||||
};
|
||||
|
||||
static int domain_context_mapping_cb(struct pci_dev *pdev,
|
||||
u16 alias, void *opaque)
|
||||
{
|
||||
struct domain_context_mapping_data *data = opaque;
|
||||
|
||||
return domain_context_mapping_one(data->domain, data->iommu,
|
||||
data->table, PCI_BUS_NUM(alias),
|
||||
alias & 0xff);
|
||||
}
|
||||
|
||||
static int
|
||||
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
|
||||
{
|
||||
struct domain_context_mapping_data data;
|
||||
struct pasid_table *table;
|
||||
struct intel_iommu *iommu;
|
||||
u8 bus, devfn;
|
||||
|
@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
|
|||
return -ENODEV;
|
||||
|
||||
table = intel_pasid_get_table(dev);
|
||||
return domain_context_mapping_one(domain, iommu, table, bus, devfn);
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
return domain_context_mapping_one(domain, iommu, table,
|
||||
bus, devfn);
|
||||
|
||||
data.domain = domain;
|
||||
data.iommu = iommu;
|
||||
data.table = table;
|
||||
|
||||
return pci_for_each_dma_alias(to_pci_dev(dev),
|
||||
&domain_context_mapping_cb, &data);
|
||||
}
|
||||
|
||||
static int domain_context_mapped_cb(struct pci_dev *pdev,
|
||||
|
@ -4759,6 +4788,28 @@ out_free_dmar:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
|
||||
{
|
||||
struct intel_iommu *iommu = opaque;
|
||||
|
||||
domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* NB - intel-iommu lacks any sort of reference counting for the users of
|
||||
* dependent devices. If multiple endpoints have intersecting dependent
|
||||
* devices, unbinding the driver from any one of them will possibly leave
|
||||
* the others unable to operate.
|
||||
*/
|
||||
static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
|
||||
{
|
||||
if (!iommu || !dev || !dev_is_pci(dev))
|
||||
return;
|
||||
|
||||
pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
|
||||
}
|
||||
|
||||
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
|
|||
PASID_RID2PASID);
|
||||
|
||||
iommu_disable_dev_iotlb(info);
|
||||
domain_context_clear_one(iommu, info->bus, info->devfn);
|
||||
domain_context_clear(iommu, info->dev);
|
||||
intel_pasid_free_table(info->dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -100,20 +100,15 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
|
|||
}
|
||||
|
||||
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
|
||||
unsigned long address, unsigned long pages, int ih, int gl)
|
||||
unsigned long address, unsigned long pages, int ih)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
if (pages == -1) {
|
||||
/* For global kernel pages we have to flush them in *all* PASIDs
|
||||
* because that's the only option the hardware gives us. Despite
|
||||
* the fact that they are actually only accessible through one. */
|
||||
if (gl)
|
||||
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
|
||||
QI_EIOTLB_DID(sdev->did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
|
||||
QI_EIOTLB_TYPE;
|
||||
else
|
||||
/*
|
||||
* Do PASID granu IOTLB invalidation if page selective capability is
|
||||
* not available.
|
||||
*/
|
||||
if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
|
||||
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
|
||||
QI_EIOTLB_DID(sdev->did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
|
||||
|
@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
|
|||
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
desc.qw1 = QI_EIOTLB_ADDR(address) |
|
||||
QI_EIOTLB_GL(gl) |
|
||||
QI_EIOTLB_IH(ih) |
|
||||
QI_EIOTLB_AM(mask);
|
||||
}
|
||||
|
@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
|
|||
}
|
||||
|
||||
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
|
||||
unsigned long pages, int ih, int gl)
|
||||
unsigned long pages, int ih)
|
||||
{
|
||||
struct intel_svm_dev *sdev;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
||||
intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
|
||||
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
|
|||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
|
||||
intel_flush_svm_range(svm, start,
|
||||
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
|
||||
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
|
||||
}
|
||||
|
||||
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
|
@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list) {
|
||||
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
|
|||
* large and has to be physically contiguous. So it's
|
||||
* hard to be as defensive as we might like. */
|
||||
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
|
||||
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
|
|
|
@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
|
|||
if (!cdev->ap.applid)
|
||||
return -ENODEV;
|
||||
|
||||
if (count < CAPIMSG_BASELEN)
|
||||
return -EINVAL;
|
||||
|
||||
skb = alloc_skb(count, GFP_USER);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
|
|||
}
|
||||
mlen = CAPIMSG_LEN(skb->data);
|
||||
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
|
||||
if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
|
||||
if (count < CAPI_DATA_B3_REQ_LEN ||
|
||||
(size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
|
|||
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
|
||||
|
||||
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
|
||||
if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&cdev->lock);
|
||||
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
|
||||
mutex_unlock(&cdev->lock);
|
||||
|
|
|
@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|||
if (index == EXT_CSD_SANITIZE_START)
|
||||
cmd.sanitize_busy = true;
|
||||
|
||||
err = mmc_wait_for_cmd(host, &cmd, 0);
|
||||
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
|
|||
struct dma_chan *terminate_chan = NULL;
|
||||
struct mmc_request *mrq;
|
||||
|
||||
cancel_delayed_work_sync(&host->timeout_work);
|
||||
cancel_delayed_work(&host->timeout_work);
|
||||
|
||||
mrq = host->mrq;
|
||||
|
||||
|
|
|
@ -774,8 +774,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
|
|||
/* All SDHI have SDIO status bits which must be 1 */
|
||||
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = renesas_sdhi_clk_enable(host);
|
||||
if (ret)
|
||||
goto efree;
|
||||
|
@ -856,8 +854,6 @@ edisclk:
|
|||
efree:
|
||||
tmio_mmc_host_free(host);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
|
||||
|
@ -869,8 +865,6 @@ int renesas_sdhi_remove(struct platform_device *pdev)
|
|||
tmio_mmc_host_remove(host);
|
||||
renesas_sdhi_clk_disable(host);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
|
||||
|
|
|
@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
|
|||
mmc_hostname(host->mmc));
|
||||
host->flags &= ~SDHCI_SIGNALING_330;
|
||||
host->flags |= SDHCI_SIGNALING_180;
|
||||
host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
|
||||
host->mmc->caps2 |= MMC_CAP2_NO_SD;
|
||||
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
|
||||
pci_write_config_dword(chip->pdev,
|
||||
|
@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
|
|||
const struct sdhci_pci_fixes sdhci_o2 = {
|
||||
.probe = sdhci_pci_o2_probe,
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
|
||||
.probe_slot = sdhci_pci_o2_probe_slot,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.resume = sdhci_pci_o2_resume,
|
||||
|
|
|
@ -172,8 +172,6 @@ static int tmio_mmc_probe(struct platform_device *pdev)
|
|||
host->mmc->f_max = pdata->hclk;
|
||||
host->mmc->f_min = pdata->hclk / 512;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = tmio_mmc_host_probe(host);
|
||||
if (ret)
|
||||
goto host_free;
|
||||
|
@ -193,7 +191,6 @@ host_remove:
|
|||
tmio_mmc_host_remove(host);
|
||||
host_free:
|
||||
tmio_mmc_host_free(host);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
cell_disable:
|
||||
if (cell->disable)
|
||||
cell->disable(pdev);
|
||||
|
@ -210,8 +207,6 @@ static int tmio_mmc_remove(struct platform_device *pdev)
|
|||
if (cell->disable)
|
||||
cell->disable(pdev);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -163,6 +163,7 @@ struct tmio_mmc_host {
|
|||
unsigned long last_req_ts;
|
||||
struct mutex ios_lock; /* protect set_ios() context */
|
||||
bool native_hotplug;
|
||||
bool runtime_synced;
|
||||
bool sdio_irq_enabled;
|
||||
|
||||
/* Mandatory callback */
|
||||
|
|
|
@ -1153,15 +1153,6 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
|
||||
|
||||
/**
|
||||
* tmio_mmc_host_probe() - Common probe for all implementations
|
||||
* @_host: Host to probe
|
||||
*
|
||||
* Perform tasks common to all implementations probe functions.
|
||||
*
|
||||
* The caller should have called pm_runtime_enable() prior to calling
|
||||
* the common probe function.
|
||||
*/
|
||||
int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
|
||||
{
|
||||
struct platform_device *pdev = _host->pdev;
|
||||
|
@ -1257,19 +1248,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
|
|||
/* See if we also get DMA */
|
||||
tmio_mmc_request_dma(_host, pdata);
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
ret = mmc_add_host(mmc);
|
||||
if (ret)
|
||||
goto remove_host;
|
||||
|
||||
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
remove_host:
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
tmio_mmc_host_remove(_host);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1280,12 +1274,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
|
|||
struct platform_device *pdev = host->pdev;
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
|
||||
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
|
||||
|
||||
if (!host->native_hotplug)
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
dev_pm_qos_hide_latency_limit(&pdev->dev);
|
||||
|
||||
mmc_remove_host(mmc);
|
||||
|
@ -1294,7 +1287,10 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
|
|||
tmio_mmc_release_dma(host);
|
||||
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
if (host->native_hotplug)
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
|
||||
|
||||
|
@ -1337,6 +1333,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
|
|||
{
|
||||
struct tmio_mmc_host *host = dev_get_drvdata(dev);
|
||||
|
||||
if (!host->runtime_synced) {
|
||||
host->runtime_synced = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
tmio_mmc_clk_enable(host);
|
||||
tmio_mmc_hw_reset(host->mmc);
|
||||
|
||||
|
|
|
@ -631,7 +631,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
|
|||
host->clk_disable = uniphier_sd_clk_disable;
|
||||
host->set_clock = uniphier_sd_set_clock;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
ret = uniphier_sd_clk_enable(host);
|
||||
if (ret)
|
||||
goto free_host;
|
||||
|
@ -653,7 +652,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
|
|||
|
||||
free_host:
|
||||
tmio_mmc_host_free(host);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -664,7 +662,6 @@ static int uniphier_sd_remove(struct platform_device *pdev)
|
|||
|
||||
tmio_mmc_host_remove(host);
|
||||
uniphier_sd_clk_disable(host);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
|
|||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
|
||||
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
|
||||
.reset_level = HNAE3_GLOBAL_RESET },
|
||||
|
|
|
@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
rwi = get_next_rwi(adapter);
|
||||
while (rwi) {
|
||||
if (adapter->state == VNIC_REMOVING ||
|
||||
adapter->state == VNIC_REMOVED)
|
||||
goto out;
|
||||
adapter->state == VNIC_REMOVED) {
|
||||
kfree(rwi);
|
||||
rc = EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
if (adapter->force_reset_recovery) {
|
||||
adapter->force_reset_recovery = false;
|
||||
|
@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
netdev_dbg(adapter->netdev, "Reset failed\n");
|
||||
free_all_rwi(adapter);
|
||||
}
|
||||
out:
|
||||
|
||||
adapter->resetting = false;
|
||||
if (we_lock_rtnl)
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <net/vxlan.h>
|
||||
#include <net/mpls.h>
|
||||
#include <net/xdp_sock.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include "ixgbe.h"
|
||||
#include "ixgbe_common.h"
|
||||
|
@ -2623,7 +2624,7 @@ adjust_by_size:
|
|||
/* 16K ints/sec to 9.2K ints/sec */
|
||||
avg_wire_size *= 15;
|
||||
avg_wire_size += 11452;
|
||||
} else if (avg_wire_size <= 1980) {
|
||||
} else if (avg_wire_size < 1968) {
|
||||
/* 9.2K ints/sec to 8K ints/sec */
|
||||
avg_wire_size *= 5;
|
||||
avg_wire_size += 22420;
|
||||
|
@ -2656,6 +2657,8 @@ adjust_by_size:
|
|||
case IXGBE_LINK_SPEED_2_5GB_FULL:
|
||||
case IXGBE_LINK_SPEED_1GB_FULL:
|
||||
case IXGBE_LINK_SPEED_10_FULL:
|
||||
if (avg_wire_size > 8064)
|
||||
avg_wire_size = 8064;
|
||||
itr += DIV_ROUND_UP(avg_wire_size,
|
||||
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
|
||||
IXGBE_ITR_ADAPTIVE_MIN_INC;
|
||||
|
@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|||
#endif /* IXGBE_FCOE */
|
||||
|
||||
#ifdef CONFIG_IXGBE_IPSEC
|
||||
if (secpath_exists(skb) &&
|
||||
if (xfrm_offload(skb) &&
|
||||
!ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
|
||||
goto out_drop;
|
||||
#endif
|
||||
|
|
|
@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
|
|||
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
struct ixgbe_ring *tx_ring, int napi_budget)
|
||||
{
|
||||
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
|
||||
unsigned int total_packets = 0, total_bytes = 0;
|
||||
u32 i = tx_ring->next_to_clean, xsk_frames = 0;
|
||||
unsigned int budget = q_vector->tx.work_limit;
|
||||
struct xdp_umem *umem = tx_ring->xsk_umem;
|
||||
union ixgbe_adv_tx_desc *tx_desc;
|
||||
struct ixgbe_tx_buffer *tx_bi;
|
||||
bool xmit_done;
|
||||
u32 xsk_frames = 0;
|
||||
|
||||
tx_bi = &tx_ring->tx_buffer_info[i];
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, i);
|
||||
i -= tx_ring->count;
|
||||
tx_bi = &tx_ring->tx_buffer_info[ntc];
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
|
||||
|
||||
do {
|
||||
while (ntc != ntu) {
|
||||
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
|
||||
break;
|
||||
|
||||
|
@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
|
||||
tx_bi++;
|
||||
tx_desc++;
|
||||
i++;
|
||||
if (unlikely(!i)) {
|
||||
i -= tx_ring->count;
|
||||
ntc++;
|
||||
if (unlikely(ntc == tx_ring->count)) {
|
||||
ntc = 0;
|
||||
tx_bi = tx_ring->tx_buffer_info;
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
|
||||
}
|
||||
|
||||
/* issue prefetch for next Tx descriptor */
|
||||
prefetch(tx_desc);
|
||||
}
|
||||
|
||||
/* update budget accounting */
|
||||
budget--;
|
||||
} while (likely(budget));
|
||||
|
||||
i += tx_ring->count;
|
||||
tx_ring->next_to_clean = i;
|
||||
tx_ring->next_to_clean = ntc;
|
||||
|
||||
u64_stats_update_begin(&tx_ring->syncp);
|
||||
tx_ring->stats.bytes += total_bytes;
|
||||
|
@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
|
||||
}
|
||||
|
||||
xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
|
||||
|
||||
return budget > 0 && xmit_done;
|
||||
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
|
||||
}
|
||||
|
||||
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/bpf.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include "ixgbevf.h"
|
||||
|
||||
|
@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
|
|||
first->protocol = vlan_get_protocol(skb);
|
||||
|
||||
#ifdef CONFIG_IXGBEVF_IPSEC
|
||||
if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
|
||||
if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
|
||||
goto out_drop;
|
||||
#endif
|
||||
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
|
||||
|
|
|
@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
|
|||
for (i = 1; i <= dev->caps.num_ports; i++) {
|
||||
if (mlx4_dev_port(dev, i, &port_cap)) {
|
||||
mlx4_err(dev,
|
||||
"QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
|
||||
"QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
|
||||
} else if ((dev->caps.dmfs_high_steer_mode !=
|
||||
MLX4_STEERING_DMFS_A0_DEFAULT) &&
|
||||
(port_cap.dmfs_optimized_state ==
|
||||
|
|
|
@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
|
||||
if (!laddr) {
|
||||
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_BUSY;
|
||||
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
|
||||
|
|
|
@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
|
|||
|
||||
type = cmsg_hdr->type;
|
||||
switch (type) {
|
||||
case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
|
||||
nfp_flower_cmsg_portreify_rx(app, skb);
|
||||
break;
|
||||
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
|
||||
nfp_flower_cmsg_portmod_rx(app, skb);
|
||||
break;
|
||||
|
@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
|
|||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct sk_buff_head *skb_head;
|
||||
|
||||
if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
|
||||
type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
|
||||
if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
|
||||
skb_head = &priv->cmsg_skbs_high;
|
||||
else
|
||||
skb_head = &priv->cmsg_skbs_low;
|
||||
|
@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
|
|||
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
|
||||
/* Acks from the NFP that the route is added - ignore. */
|
||||
dev_consume_skb_any(skb);
|
||||
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
|
||||
/* Handle REIFY acks outside wq to prevent RTNL conflict. */
|
||||
nfp_flower_cmsg_portreify_rx(app, skb);
|
||||
dev_consume_skb_any(skb);
|
||||
} else {
|
||||
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
|
||||
}
|
||||
|
|
|
@ -713,6 +713,21 @@ struct nv_skb_map {
|
|||
struct nv_skb_map *next_tx_ctx;
|
||||
};
|
||||
|
||||
struct nv_txrx_stats {
|
||||
u64 stat_rx_packets;
|
||||
u64 stat_rx_bytes; /* not always available in HW */
|
||||
u64 stat_rx_missed_errors;
|
||||
u64 stat_rx_dropped;
|
||||
u64 stat_tx_packets; /* not always available in HW */
|
||||
u64 stat_tx_bytes;
|
||||
u64 stat_tx_dropped;
|
||||
};
|
||||
|
||||
#define nv_txrx_stats_inc(member) \
|
||||
__this_cpu_inc(np->txrx_stats->member)
|
||||
#define nv_txrx_stats_add(member, count) \
|
||||
__this_cpu_add(np->txrx_stats->member, (count))
|
||||
|
||||
/*
|
||||
* SMP locking:
|
||||
* All hardware access under netdev_priv(dev)->lock, except the performance
|
||||
|
@ -797,10 +812,7 @@ struct fe_priv {
|
|||
|
||||
/* RX software stats */
|
||||
struct u64_stats_sync swstats_rx_syncp;
|
||||
u64 stat_rx_packets;
|
||||
u64 stat_rx_bytes; /* not always available in HW */
|
||||
u64 stat_rx_missed_errors;
|
||||
u64 stat_rx_dropped;
|
||||
struct nv_txrx_stats __percpu *txrx_stats;
|
||||
|
||||
/* media detection workaround.
|
||||
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
|
||||
|
@ -826,9 +838,6 @@ struct fe_priv {
|
|||
|
||||
/* TX software stats */
|
||||
struct u64_stats_sync swstats_tx_syncp;
|
||||
u64 stat_tx_packets; /* not always available in HW */
|
||||
u64 stat_tx_bytes;
|
||||
u64 stat_tx_dropped;
|
||||
|
||||
/* msi/msi-x fields */
|
||||
u32 msi_flags;
|
||||
|
@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void nv_get_stats(int cpu, struct fe_priv *np,
|
||||
struct rtnl_link_stats64 *storage)
|
||||
{
|
||||
struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
|
||||
unsigned int syncp_start;
|
||||
u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
|
||||
u64 tx_packets, tx_bytes, tx_dropped;
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
|
||||
rx_packets = src->stat_rx_packets;
|
||||
rx_bytes = src->stat_rx_bytes;
|
||||
rx_dropped = src->stat_rx_dropped;
|
||||
rx_missed_errors = src->stat_rx_missed_errors;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
|
||||
|
||||
storage->rx_packets += rx_packets;
|
||||
storage->rx_bytes += rx_bytes;
|
||||
storage->rx_dropped += rx_dropped;
|
||||
storage->rx_missed_errors += rx_missed_errors;
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
|
||||
tx_packets = src->stat_tx_packets;
|
||||
tx_bytes = src->stat_tx_bytes;
|
||||
tx_dropped = src->stat_tx_dropped;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
|
||||
|
||||
storage->tx_packets += tx_packets;
|
||||
storage->tx_bytes += tx_bytes;
|
||||
storage->tx_dropped += tx_dropped;
|
||||
}
|
||||
|
||||
/*
|
||||
* nv_get_stats64: dev->ndo_get_stats64 function
|
||||
* Get latest stats value from the nic.
|
||||
|
@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
|
|||
__releases(&netdev_priv(dev)->hwstats_lock)
|
||||
{
|
||||
struct fe_priv *np = netdev_priv(dev);
|
||||
unsigned int syncp_start;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Note: because HW stats are not always available and for
|
||||
|
@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
|
|||
*/
|
||||
|
||||
/* software stats */
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
|
||||
storage->rx_packets = np->stat_rx_packets;
|
||||
storage->rx_bytes = np->stat_rx_bytes;
|
||||
storage->rx_dropped = np->stat_rx_dropped;
|
||||
storage->rx_missed_errors = np->stat_rx_missed_errors;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
|
||||
storage->tx_packets = np->stat_tx_packets;
|
||||
storage->tx_bytes = np->stat_tx_bytes;
|
||||
storage->tx_dropped = np->stat_tx_dropped;
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
|
||||
for_each_online_cpu(cpu)
|
||||
nv_get_stats(cpu, np, storage);
|
||||
|
||||
/* If the nic supports hw counters then retrieve latest values */
|
||||
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
|
||||
|
@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
|
|||
} else {
|
||||
packet_dropped:
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_dropped++;
|
||||
nv_txrx_stats_inc(stat_rx_dropped);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
|
|||
} else {
|
||||
packet_dropped:
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_dropped++;
|
||||
nv_txrx_stats_inc(stat_rx_dropped);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
return 1;
|
||||
}
|
||||
|
@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
|
|||
}
|
||||
if (nv_release_txskb(np, &np->tx_skb[i])) {
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
np->tx_skb[i].dma = 0;
|
||||
|
@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* on DMA mapping error - drop the packet */
|
||||
dev_kfree_skb_any(skb);
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
dev_kfree_skb_any(skb);
|
||||
np->put_tx_ctx = start_tx_ctx;
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
/* on DMA mapping error - drop the packet */
|
||||
dev_kfree_skb_any(skb);
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
|
|||
dev_kfree_skb_any(skb);
|
||||
np->put_tx_ctx = start_tx_ctx;
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_dropped++;
|
||||
nv_txrx_stats_inc(stat_tx_dropped);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
|
|||
&& !(flags & NV_TX_RETRYCOUNT_MASK))
|
||||
nv_legacybackoff_reseed(dev);
|
||||
} else {
|
||||
unsigned int len;
|
||||
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_packets++;
|
||||
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_inc(stat_tx_packets);
|
||||
len = np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_add(stat_tx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
bytes_compl += np->get_tx_ctx->skb->len;
|
||||
|
@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
|
|||
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
|
||||
nv_legacybackoff_reseed(dev);
|
||||
} else {
|
||||
unsigned int len;
|
||||
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_packets++;
|
||||
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_inc(stat_tx_packets);
|
||||
len = np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_add(stat_tx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
bytes_compl += np->get_tx_ctx->skb->len;
|
||||
|
@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
|
|||
nv_legacybackoff_reseed(dev);
|
||||
}
|
||||
} else {
|
||||
unsigned int len;
|
||||
|
||||
u64_stats_update_begin(&np->swstats_tx_syncp);
|
||||
np->stat_tx_packets++;
|
||||
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_inc(stat_tx_packets);
|
||||
len = np->get_tx_ctx->skb->len;
|
||||
nv_txrx_stats_add(stat_tx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_tx_syncp);
|
||||
}
|
||||
|
||||
|
@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
|
|||
}
|
||||
}
|
||||
|
||||
static void rx_missing_handler(u32 flags, struct fe_priv *np)
|
||||
{
|
||||
if (flags & NV_RX_MISSEDFRAME) {
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
nv_txrx_stats_inc(stat_rx_missed_errors);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
}
|
||||
}
|
||||
|
||||
static int nv_rx_process(struct net_device *dev, int limit)
|
||||
{
|
||||
struct fe_priv *np = netdev_priv(dev);
|
||||
|
@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
|
|||
}
|
||||
/* the rest are hard errors */
|
||||
else {
|
||||
if (flags & NV_RX_MISSEDFRAME) {
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_missed_errors++;
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
}
|
||||
rx_missing_handler(flags, np);
|
||||
dev_kfree_skb(skb);
|
||||
goto next_pkt;
|
||||
}
|
||||
|
@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
|
|||
skb->protocol = eth_type_trans(skb, dev);
|
||||
napi_gro_receive(&np->napi, skb);
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_packets++;
|
||||
np->stat_rx_bytes += len;
|
||||
nv_txrx_stats_inc(stat_rx_packets);
|
||||
nv_txrx_stats_add(stat_rx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
next_pkt:
|
||||
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
|
||||
|
@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
|
|||
}
|
||||
napi_gro_receive(&np->napi, skb);
|
||||
u64_stats_update_begin(&np->swstats_rx_syncp);
|
||||
np->stat_rx_packets++;
|
||||
np->stat_rx_bytes += len;
|
||||
nv_txrx_stats_inc(stat_rx_packets);
|
||||
nv_txrx_stats_add(stat_rx_bytes, len);
|
||||
u64_stats_update_end(&np->swstats_rx_syncp);
|
||||
} else {
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
|
|||
SET_NETDEV_DEV(dev, &pci_dev->dev);
|
||||
u64_stats_init(&np->swstats_rx_syncp);
|
||||
u64_stats_init(&np->swstats_tx_syncp);
|
||||
np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
|
||||
if (!np->txrx_stats) {
|
||||
pr_err("np->txrx_stats, alloc memory error.\n");
|
||||
err = -ENOMEM;
|
||||
goto out_alloc_percpu;
|
||||
}
|
||||
|
||||
timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
|
||||
timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
|
||||
|
@ -6060,6 +6110,8 @@ out_relreg:
|
|||
out_disable:
|
||||
pci_disable_device(pci_dev);
|
||||
out_free:
|
||||
free_percpu(np->txrx_stats);
|
||||
out_alloc_percpu:
|
||||
free_netdev(dev);
|
||||
out:
|
||||
return err;
|
||||
|
@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
|
|||
static void nv_remove(struct pci_dev *pci_dev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pci_dev);
|
||||
struct fe_priv *np = netdev_priv(dev);
|
||||
|
||||
free_percpu(np->txrx_stats);
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
|
|
|
@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
|
|||
int ret;
|
||||
u32 reg, val;
|
||||
|
||||
regmap_field_read(gmac->regmap_field, &val);
|
||||
ret = regmap_field_read(gmac->regmap_field, &val);
|
||||
if (ret) {
|
||||
dev_err(priv->device, "Fail to read from regmap field.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg = gmac->variant->default_syscon_value;
|
||||
if (reg != val)
|
||||
dev_warn(priv->device,
|
||||
|
|
|
@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd)
|
|||
|
||||
sp->dev->stats.rx_bytes += count;
|
||||
|
||||
if ((skb = dev_alloc_skb(count)) == NULL)
|
||||
if ((skb = dev_alloc_skb(count + 1)) == NULL)
|
||||
goto out_mem;
|
||||
|
||||
ptr = skb_put(skb, count);
|
||||
ptr = skb_put(skb, count + 1);
|
||||
*ptr++ = cmd; /* KISS command */
|
||||
|
||||
memcpy(ptr, sp->cooked_buf + 1, count);
|
||||
|
|
|
@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
|
|||
* Local device Link partner
|
||||
* Pause AsymDir Pause AsymDir Result
|
||||
* 1 X 1 X TX+RX
|
||||
* 0 1 1 1 RX
|
||||
* 1 1 0 1 TX
|
||||
* 0 1 1 1 TX
|
||||
* 1 1 0 1 RX
|
||||
*/
|
||||
static void phylink_resolve_flow(struct phylink *pl,
|
||||
struct phylink_link_state *state)
|
||||
|
@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl,
|
|||
new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
|
||||
else if (pause & MLO_PAUSE_ASYM)
|
||||
new_pause = state->pause & MLO_PAUSE_SYM ?
|
||||
MLO_PAUSE_RX : MLO_PAUSE_TX;
|
||||
MLO_PAUSE_TX : MLO_PAUSE_RX;
|
||||
} else {
|
||||
new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
|
||||
}
|
||||
|
|
|
@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev)
|
|||
}
|
||||
|
||||
static int tun_attach(struct tun_struct *tun, struct file *file,
|
||||
bool skip_filter, bool napi, bool napi_frags)
|
||||
bool skip_filter, bool napi, bool napi_frags,
|
||||
bool publish_tun)
|
||||
{
|
||||
struct tun_file *tfile = file->private_data;
|
||||
struct net_device *dev = tun->dev;
|
||||
|
@ -870,6 +871,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
|||
* initialized tfile; otherwise we risk using half-initialized
|
||||
* object.
|
||||
*/
|
||||
if (publish_tun)
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
|
||||
tun->numqueues++;
|
||||
|
@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
|
||||
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
|
||||
ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS);
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS);
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
|
||||
if (err < 0)
|
||||
goto err_free_flow;
|
||||
|
||||
err = register_netdevice(tun->dev);
|
||||
if (err < 0)
|
||||
goto err_detach;
|
||||
/* free_netdev() won't check refcnt, to aovid race
|
||||
* with dev_put() we need publish tun after registration.
|
||||
*/
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
}
|
||||
|
||||
netif_carrier_on(tun->dev);
|
||||
|
@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
|
|||
if (ret < 0)
|
||||
goto unlock;
|
||||
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
|
||||
tun->flags & IFF_NAPI_FRAGS);
|
||||
tun->flags & IFF_NAPI_FRAGS, true);
|
||||
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
|
||||
|
|
|
@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
goto bad_desc;
|
||||
}
|
||||
skip:
|
||||
if (rndis && header.usb_cdc_acm_descriptor &&
|
||||
/* Communcation class functions with bmCapabilities are not
|
||||
* RNDIS. But some Wireless class RNDIS functions use
|
||||
* bmCapabilities for their own purpose. The failsafe is
|
||||
* therefore applied only to Communication class RNDIS
|
||||
* functions. The rndis test is redundant, but a cheap
|
||||
* optimization.
|
||||
*/
|
||||
if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
|
||||
header.usb_cdc_acm_descriptor &&
|
||||
header.usb_cdc_acm_descriptor->bmCapabilities) {
|
||||
dev_dbg(&intf->dev,
|
||||
"ACM capabilities %02x, not really RNDIS?\n",
|
||||
|
|
|
@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
|||
}
|
||||
}
|
||||
|
||||
if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
|
|
|
@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
|
|||
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
|
||||
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
|
||||
|
||||
lmc_trace(dev, "lmc_runnin_reset_out");
|
||||
lmc_trace(dev, "lmc_running_reset_out");
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
|
|||
"%d\n", result);
|
||||
result = 0;
|
||||
error_cmd:
|
||||
kfree(cmd);
|
||||
kfree_skb(ack_skb);
|
||||
error_msg_to_dev:
|
||||
error_alloc:
|
||||
|
|
|
@ -1114,18 +1114,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
/* same thing for QuZ... */
|
||||
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
|
||||
if (cfg == &iwl_ax101_cfg_qu_hr)
|
||||
cfg = &iwl_ax101_cfg_quz_hr;
|
||||
else if (cfg == &iwl_ax201_cfg_qu_hr)
|
||||
cfg = &iwl_ax201_cfg_quz_hr;
|
||||
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
||||
cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
||||
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
|
||||
iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
|
||||
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
|
||||
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
|
||||
else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
|
||||
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
|
||||
iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
|
|||
}
|
||||
|
||||
vs_ie = (struct ieee_types_header *)vendor_ie;
|
||||
if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
|
||||
IEEE_MAX_IE_SIZE)
|
||||
return -EINVAL;
|
||||
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
|
||||
vs_ie, vs_ie->len + 2);
|
||||
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
|
||||
|
|
|
@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
|
|||
|
||||
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
|
||||
if (rate_ie) {
|
||||
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
|
||||
return;
|
||||
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
|
||||
rate_len = rate_ie->len;
|
||||
}
|
||||
|
@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
|
|||
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
|
||||
params->beacon.tail,
|
||||
params->beacon.tail_len);
|
||||
if (rate_ie)
|
||||
if (rate_ie) {
|
||||
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
|
||||
return;
|
||||
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
|
|||
params->beacon.tail_len);
|
||||
if (vendor_ie) {
|
||||
wmm_ie = vendor_ie;
|
||||
if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
|
||||
return;
|
||||
memcpy(&bss_cfg->wmm_info, wmm_ie +
|
||||
sizeof(struct ieee_types_header), *(wmm_ie + 1));
|
||||
priv->wmm_enabled = 1;
|
||||
|
|
|
@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
|
|||
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
|
||||
}
|
||||
|
||||
if (is_mt7630(dev)) {
|
||||
dev->mt76.cap.has_5ghz = false;
|
||||
dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
|
||||
}
|
||||
|
||||
if (!mt76x02_field_valid(nic_conf1 & 0xff))
|
||||
nic_conf1 &= 0xff00;
|
||||
|
||||
|
|
|
@ -51,6 +51,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
|
|||
mt76x0e_stop_hw(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
if (is_mt7630(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mt76x02_set_key(hw, cmd, vif, sta, key);
|
||||
}
|
||||
|
||||
static void
|
||||
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
u32 queues, bool drop)
|
||||
|
@ -67,7 +80,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
|
|||
.configure_filter = mt76x02_configure_filter,
|
||||
.bss_info_changed = mt76x02_bss_info_changed,
|
||||
.sta_state = mt76_sta_state,
|
||||
.set_key = mt76x02_set_key,
|
||||
.set_key = mt76x0e_set_key,
|
||||
.conf_tx = mt76x02_conf_tx,
|
||||
.sw_scan_start = mt76_sw_scan,
|
||||
.sw_scan_complete = mt76x02_sw_scan_complete,
|
||||
|
|
|
@ -1654,6 +1654,7 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
|
|||
|
||||
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
|
||||
|
||||
if (crypto->cmd == SET_KEY) {
|
||||
rt2800_register_multiread(rt2x00dev, offset,
|
||||
&iveiv_entry, sizeof(iveiv_entry));
|
||||
if ((crypto->cipher == CIPHER_TKIP) ||
|
||||
|
@ -1661,6 +1662,10 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
|
|||
(crypto->cipher == CIPHER_AES))
|
||||
iveiv_entry.iv[3] |= 0x20;
|
||||
iveiv_entry.iv[3] |= key->keyidx << 6;
|
||||
} else {
|
||||
memset(&iveiv_entry, 0, sizeof(iveiv_entry));
|
||||
}
|
||||
|
||||
rt2800_register_multiwrite(rt2x00dev, offset,
|
||||
&iveiv_entry, sizeof(iveiv_entry));
|
||||
}
|
||||
|
@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
|
|||
switch (rt2x00dev->default_ant.rx_chain_num) {
|
||||
case 3:
|
||||
/* Turn on tertiary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
|
||||
/* fall-through */
|
||||
case 2:
|
||||
/* Turn on secondary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
|
||||
/* fall-through */
|
||||
case 1:
|
||||
/* Turn on primary LNAs */
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
|
||||
rf->channel > 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
|
||||
rf->channel <= 14);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
|
||||
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -645,7 +645,6 @@ fail_rx:
|
|||
kfree(rsi_dev->tx_buffer);
|
||||
|
||||
fail_eps:
|
||||
kfree(rsi_dev);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context)
|
|||
&echo_response);
|
||||
if (result) {
|
||||
dev_err(&st95context->spicontext.spidev->dev,
|
||||
"err: echo response receieve error = 0x%x\n", result);
|
||||
"err: echo response receive error = 0x%x\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
resource_size_t start, size;
|
||||
struct nd_region *nd_region;
|
||||
unsigned long npfns, align;
|
||||
u32 end_trunc;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
phys_addr_t offset;
|
||||
const char *sig;
|
||||
|
@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
size = resource_size(&nsio->res);
|
||||
npfns = PHYS_PFN(size - SZ_8K);
|
||||
align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
|
||||
end_trunc = start + size - ALIGN_DOWN(start + size, align);
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
/*
|
||||
* The altmap should be padded out to the block size used
|
||||
|
@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
npfns = PHYS_PFN(size - offset);
|
||||
npfns = PHYS_PFN(size - offset - end_trunc);
|
||||
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
|
||||
pfn_sb->dataoff = cpu_to_le64(offset);
|
||||
pfn_sb->npfns = cpu_to_le64(npfns);
|
||||
|
@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|||
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
|
||||
pfn_sb->version_major = cpu_to_le16(1);
|
||||
pfn_sb->version_minor = cpu_to_le16(3);
|
||||
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
|
||||
pfn_sb->align = cpu_to_le32(nd_pfn->align);
|
||||
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
||||
pfn_sb->checksum = cpu_to_le64(checksum);
|
||||
|
|
|
@ -2552,7 +2552,7 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
|
|||
if (IS_ERR(map))
|
||||
return map;
|
||||
} else
|
||||
map = ERR_PTR(-ENODEV);
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
ctx->maps[ASPEED_IP_LPC] = map;
|
||||
dev_dbg(ctx->dev, "Acquired LPC regmap");
|
||||
|
@ -2562,6 +2562,33 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr,
|
||||
bool enabled)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < expr->ndescs; i++) {
|
||||
const struct aspeed_sig_desc *desc = &expr->descs[i];
|
||||
struct regmap *map;
|
||||
|
||||
map = aspeed_g5_acquire_regmap(ctx, desc->ip);
|
||||
if (IS_ERR(map)) {
|
||||
dev_err(ctx->dev,
|
||||
"Failed to acquire regmap for IP block %d\n",
|
||||
desc->ip);
|
||||
return PTR_ERR(map);
|
||||
}
|
||||
|
||||
ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a pin's signal by applying an expression's descriptor state for
|
||||
* all descriptors in the expression.
|
||||
|
@ -2647,6 +2674,7 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
|
|||
}
|
||||
|
||||
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
|
||||
.eval = aspeed_g5_sig_expr_eval,
|
||||
.set = aspeed_g5_sig_expr_set,
|
||||
};
|
||||
|
||||
|
|
|
@ -78,11 +78,14 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
|
|||
* neither the enabled nor disabled state. Thus we must explicitly test for
|
||||
* either condition as required.
|
||||
*/
|
||||
int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
|
||||
int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr, bool enabled)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (ctx->ops->eval)
|
||||
return ctx->ops->eval(ctx, expr, enabled);
|
||||
|
||||
for (i = 0; i < expr->ndescs; i++) {
|
||||
const struct aspeed_sig_desc *desc = &expr->descs[i];
|
||||
|
|
|
@ -702,6 +702,8 @@ struct aspeed_pin_function {
|
|||
struct aspeed_pinmux_data;
|
||||
|
||||
struct aspeed_pinmux_ops {
|
||||
int (*eval)(struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr, bool enabled);
|
||||
int (*set)(struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr, bool enabled);
|
||||
};
|
||||
|
@ -722,9 +724,8 @@ struct aspeed_pinmux_data {
|
|||
int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
|
||||
struct regmap *map);
|
||||
|
||||
int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr,
|
||||
bool enabled);
|
||||
int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr, bool enabled);
|
||||
|
||||
static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx,
|
||||
const struct aspeed_sig_expr *expr,
|
||||
|
|
|
@ -169,16 +169,16 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
|
|||
reg = ACT8945A_DCDC3_CTRL;
|
||||
break;
|
||||
case ACT8945A_ID_LDO1:
|
||||
reg = ACT8945A_LDO1_SUS;
|
||||
reg = ACT8945A_LDO1_CTRL;
|
||||
break;
|
||||
case ACT8945A_ID_LDO2:
|
||||
reg = ACT8945A_LDO2_SUS;
|
||||
reg = ACT8945A_LDO2_CTRL;
|
||||
break;
|
||||
case ACT8945A_ID_LDO3:
|
||||
reg = ACT8945A_LDO3_SUS;
|
||||
reg = ACT8945A_LDO3_CTRL;
|
||||
break;
|
||||
case ACT8945A_ID_LDO4:
|
||||
reg = ACT8945A_LDO4_SUS;
|
||||
reg = ACT8945A_LDO4_CTRL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -205,7 +205,7 @@ static int slg51000_of_parse_cb(struct device_node *np,
|
|||
ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
|
||||
"enable-gpios", 0,
|
||||
gflags, "gpio-en-ldo");
|
||||
if (ena_gpiod) {
|
||||
if (!IS_ERR(ena_gpiod)) {
|
||||
config->ena_gpiod = ena_gpiod;
|
||||
devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
|
||||
}
|
||||
|
@ -459,7 +459,7 @@ static int slg51000_i2c_probe(struct i2c_client *client,
|
|||
GPIOD_OUT_HIGH
|
||||
| GPIOD_FLAGS_BIT_NONEXCLUSIVE,
|
||||
"slg51000-cs");
|
||||
if (cs_gpiod) {
|
||||
if (!IS_ERR(cs_gpiod)) {
|
||||
dev_info(dev, "Found chip selector property\n");
|
||||
chip->cs_gpiod = cs_gpiod;
|
||||
}
|
||||
|
|
|
@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
|
|||
2500, 2750,
|
||||
};
|
||||
|
||||
/* 600mV to 1450mV in 12.5 mV steps */
|
||||
static const struct regulator_linear_range VDD1_ranges[] = {
|
||||
REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
|
||||
};
|
||||
|
||||
/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
|
||||
static const struct regulator_linear_range VDD2_ranges[] = {
|
||||
REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
|
||||
REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
|
||||
};
|
||||
|
||||
static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
|
||||
{
|
||||
struct twlreg_info *info = rdev_get_drvdata(rdev);
|
||||
|
@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
|
|||
}
|
||||
|
||||
static const struct regulator_ops twl4030smps_ops = {
|
||||
.list_voltage = regulator_list_voltage_linear_range,
|
||||
|
||||
.set_voltage = twl4030smps_set_voltage,
|
||||
.get_voltage = twl4030smps_get_voltage,
|
||||
};
|
||||
|
@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
|
|||
}, \
|
||||
}
|
||||
|
||||
#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
|
||||
#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
|
||||
n_volt) \
|
||||
static const struct twlreg_info TWL4030_INFO_##label = { \
|
||||
.base = offset, \
|
||||
.id = num, \
|
||||
|
@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
|
|||
.owner = THIS_MODULE, \
|
||||
.enable_time = turnon_delay, \
|
||||
.of_map_mode = twl4030reg_map_mode, \
|
||||
.n_voltages = n_volt, \
|
||||
.n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
|
||||
.linear_ranges = label ## _ranges, \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
|
|||
TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
|
||||
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
|
||||
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
|
||||
TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
|
||||
TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
|
||||
TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
|
||||
TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
|
||||
/* VUSBCP is managed *only* by the USB subchip */
|
||||
TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
|
||||
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
|
||||
|
|
|
@ -5715,7 +5715,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
|
|||
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
|
||||
* 1,128 = Manually specify the maximum nr_hw_queue value to be set,
|
||||
*
|
||||
* Value range is [0,128]. Default value is 8.
|
||||
* Value range is [0,256]. Default value is 8.
|
||||
*/
|
||||
LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
|
||||
LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
/* FCP MQ queue count limiting */
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MAX 128
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MAX 256
|
||||
#define LPFC_FCP_MQ_THRESHOLD_DEF 8
|
||||
|
||||
/* Common buffer size to accomidate SCSI and NVME IO buffers */
|
||||
|
|
|
@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
|
|||
struct geni_wrapper *wrapper = se->wrapper;
|
||||
u32 val;
|
||||
|
||||
if (!wrapper)
|
||||
return -EINVAL;
|
||||
|
||||
*iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(wrapper->dev, *iova))
|
||||
return -EIO;
|
||||
|
@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
|
|||
struct geni_wrapper *wrapper = se->wrapper;
|
||||
u32 val;
|
||||
|
||||
if (!wrapper)
|
||||
return -EINVAL;
|
||||
|
||||
*iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(wrapper->dev, *iova))
|
||||
return -EIO;
|
||||
|
|
|
@ -22,6 +22,12 @@
|
|||
* Using this limit prevents one virtqueue from starving others. */
|
||||
#define VHOST_TEST_WEIGHT 0x80000
|
||||
|
||||
/* Max number of packets transferred before requeueing the job.
|
||||
* Using this limit prevents one virtqueue from starving others with
|
||||
* pkts.
|
||||
*/
|
||||
#define VHOST_TEST_PKT_WEIGHT 256
|
||||
|
||||
enum {
|
||||
VHOST_TEST_VQ = 0,
|
||||
VHOST_TEST_VQ_MAX = 1,
|
||||
|
@ -80,11 +86,9 @@ static void handle_vq(struct vhost_test *n)
|
|||
}
|
||||
vhost_add_used_and_signal(&n->dev, vq, head, 0);
|
||||
total_len += len;
|
||||
if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
|
||||
vhost_poll_queue(&vq->poll);
|
||||
if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
}
|
||||
|
@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
|
|||
dev = &n->dev;
|
||||
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
|
||||
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
|
||||
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
|
||||
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
|
||||
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
|
||||
|
||||
f->private_data = n;
|
||||
|
||||
|
|
|
@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
|
|||
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
|
||||
{
|
||||
__poll_t mask;
|
||||
int ret = 0;
|
||||
|
||||
if (poll->wqh)
|
||||
return 0;
|
||||
|
@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
|
|||
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
|
||||
if (mask & EPOLLERR) {
|
||||
vhost_poll_stop(poll);
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_poll_start);
|
||||
|
||||
|
@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
|
|||
__vhost_vq_meta_reset(d->vqs[i]);
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
static void vhost_map_unprefetch(struct vhost_map *map)
|
||||
{
|
||||
kfree(map->pages);
|
||||
map->pages = NULL;
|
||||
map->npages = 0;
|
||||
map->addr = NULL;
|
||||
}
|
||||
|
||||
static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct vhost_map *map[VHOST_NUM_ADDRS];
|
||||
int i;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++) {
|
||||
map[i] = rcu_dereference_protected(vq->maps[i],
|
||||
lockdep_is_held(&vq->mmu_lock));
|
||||
if (map[i])
|
||||
rcu_assign_pointer(vq->maps[i], NULL);
|
||||
}
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++)
|
||||
if (map[i])
|
||||
vhost_map_unprefetch(map[i]);
|
||||
|
||||
}
|
||||
|
||||
static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
|
||||
{
|
||||
int i;
|
||||
|
||||
vhost_uninit_vq_maps(vq);
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++)
|
||||
vq->uaddrs[i].size = 0;
|
||||
}
|
||||
|
||||
static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (unlikely(!uaddr->size))
|
||||
return false;
|
||||
|
||||
return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
|
||||
}
|
||||
|
||||
static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
|
||||
int index,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct vhost_uaddr *uaddr = &vq->uaddrs[index];
|
||||
struct vhost_map *map;
|
||||
int i;
|
||||
|
||||
if (!vhost_map_range_overlap(uaddr, start, end))
|
||||
return;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
++vq->invalidate_count;
|
||||
|
||||
map = rcu_dereference_protected(vq->maps[index],
|
||||
lockdep_is_held(&vq->mmu_lock));
|
||||
if (map) {
|
||||
if (uaddr->write) {
|
||||
for (i = 0; i < map->npages; i++)
|
||||
set_page_dirty(map->pages[i]);
|
||||
}
|
||||
rcu_assign_pointer(vq->maps[index], NULL);
|
||||
}
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
|
||||
if (map) {
|
||||
synchronize_rcu();
|
||||
vhost_map_unprefetch(map);
|
||||
}
|
||||
}
|
||||
|
||||
static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
|
||||
int index,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
|
||||
return;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
--vq->invalidate_count;
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
}
|
||||
|
||||
static int vhost_invalidate_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
|
||||
mmu_notifier);
|
||||
int i, j;
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return -EAGAIN;
|
||||
|
||||
for (i = 0; i < dev->nvqs; i++) {
|
||||
struct vhost_virtqueue *vq = dev->vqs[i];
|
||||
|
||||
for (j = 0; j < VHOST_NUM_ADDRS; j++)
|
||||
vhost_invalidate_vq_start(vq, j,
|
||||
range->start,
|
||||
range->end);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_invalidate_range_end(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
|
||||
mmu_notifier);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < dev->nvqs; i++) {
|
||||
struct vhost_virtqueue *vq = dev->vqs[i];
|
||||
|
||||
for (j = 0; j < VHOST_NUM_ADDRS; j++)
|
||||
vhost_invalidate_vq_end(vq, j,
|
||||
range->start,
|
||||
range->end);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
|
||||
.invalidate_range_start = vhost_invalidate_range_start,
|
||||
.invalidate_range_end = vhost_invalidate_range_end,
|
||||
};
|
||||
|
||||
static void vhost_init_maps(struct vhost_dev *dev)
|
||||
{
|
||||
struct vhost_virtqueue *vq;
|
||||
int i, j;
|
||||
|
||||
dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
|
||||
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
vq = dev->vqs[i];
|
||||
for (j = 0; j < VHOST_NUM_ADDRS; j++)
|
||||
RCU_INIT_POINTER(vq->maps[j], NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
|
@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
|
|||
vq->busyloop_timeout = 0;
|
||||
vq->umem = NULL;
|
||||
vq->iotlb = NULL;
|
||||
vq->invalidate_count = 0;
|
||||
__vhost_vq_meta_reset(vq);
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_reset_vq_maps(vq);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int vhost_worker(void *data)
|
||||
|
@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev,
|
|||
INIT_LIST_HEAD(&dev->read_list);
|
||||
INIT_LIST_HEAD(&dev->pending_list);
|
||||
spin_lock_init(&dev->iotlb_lock);
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_init_maps(dev);
|
||||
#endif
|
||||
|
||||
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
vq = dev->vqs[i];
|
||||
|
@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev,
|
|||
vq->heads = NULL;
|
||||
vq->dev = dev;
|
||||
mutex_init(&vq->mutex);
|
||||
spin_lock_init(&vq->mmu_lock);
|
||||
vhost_vq_reset(dev, vq);
|
||||
if (vq->handle_kick)
|
||||
vhost_poll_init(&vq->poll, vq->handle_kick,
|
||||
|
@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
|
|||
if (err)
|
||||
goto err_cgroup;
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
|
||||
if (err)
|
||||
goto err_mmu_notifier;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
err_mmu_notifier:
|
||||
vhost_dev_free_iovecs(dev);
|
||||
#endif
|
||||
err_cgroup:
|
||||
kthread_stop(worker);
|
||||
dev->worker = NULL;
|
||||
|
@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev)
|
|||
spin_unlock(&dev->iotlb_lock);
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
|
||||
int index, unsigned long uaddr,
|
||||
size_t size, bool write)
|
||||
{
|
||||
struct vhost_uaddr *addr = &vq->uaddrs[index];
|
||||
|
||||
addr->uaddr = uaddr;
|
||||
addr->size = size;
|
||||
addr->write = write;
|
||||
}
|
||||
|
||||
static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
|
||||
{
|
||||
vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
|
||||
(unsigned long)vq->desc,
|
||||
vhost_get_desc_size(vq, vq->num),
|
||||
false);
|
||||
vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
|
||||
(unsigned long)vq->avail,
|
||||
vhost_get_avail_size(vq, vq->num),
|
||||
false);
|
||||
vhost_setup_uaddr(vq, VHOST_ADDR_USED,
|
||||
(unsigned long)vq->used,
|
||||
vhost_get_used_size(vq, vq->num),
|
||||
true);
|
||||
}
|
||||
|
||||
static int vhost_map_prefetch(struct vhost_virtqueue *vq,
|
||||
int index)
|
||||
{
|
||||
struct vhost_map *map;
|
||||
struct vhost_uaddr *uaddr = &vq->uaddrs[index];
|
||||
struct page **pages;
|
||||
int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
|
||||
int npinned;
|
||||
void *vaddr, *v;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
spin_lock(&vq->mmu_lock);
|
||||
|
||||
err = -EFAULT;
|
||||
if (vq->invalidate_count)
|
||||
goto err;
|
||||
|
||||
err = -ENOMEM;
|
||||
map = kmalloc(sizeof(*map), GFP_ATOMIC);
|
||||
if (!map)
|
||||
goto err;
|
||||
|
||||
pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
|
||||
if (!pages)
|
||||
goto err_pages;
|
||||
|
||||
err = EFAULT;
|
||||
npinned = __get_user_pages_fast(uaddr->uaddr, npages,
|
||||
uaddr->write, pages);
|
||||
if (npinned > 0)
|
||||
release_pages(pages, npinned);
|
||||
if (npinned != npages)
|
||||
goto err_gup;
|
||||
|
||||
for (i = 0; i < npinned; i++)
|
||||
if (PageHighMem(pages[i]))
|
||||
goto err_gup;
|
||||
|
||||
vaddr = v = page_address(pages[0]);
|
||||
|
||||
/* For simplicity, fallback to userspace address if VA is not
|
||||
* contigious.
|
||||
*/
|
||||
for (i = 1; i < npinned; i++) {
|
||||
v += PAGE_SIZE;
|
||||
if (v != page_address(pages[i]))
|
||||
goto err_gup;
|
||||
}
|
||||
|
||||
map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
|
||||
map->npages = npages;
|
||||
map->pages = pages;
|
||||
|
||||
rcu_assign_pointer(vq->maps[index], map);
|
||||
/* No need for a synchronize_rcu(). This function should be
|
||||
* called by dev->worker so we are serialized with all
|
||||
* readers.
|
||||
*/
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_gup:
|
||||
kfree(pages);
|
||||
err_pages:
|
||||
kfree(map);
|
||||
err:
|
||||
spin_unlock(&vq->mmu_lock);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
void vhost_dev_cleanup(struct vhost_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
|
|||
kthread_stop(dev->worker);
|
||||
dev->worker = NULL;
|
||||
}
|
||||
if (dev->mm) {
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
|
||||
#endif
|
||||
if (dev->mm)
|
||||
mmput(dev->mm);
|
||||
}
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
for (i = 0; i < dev->nvqs; i++)
|
||||
vhost_uninit_vq_maps(dev->vqs[i]);
|
||||
#endif
|
||||
dev->mm = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
|
||||
|
@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
|
|||
|
||||
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
*((__virtio16 *)&used->ring[vq->num]) =
|
||||
cpu_to_vhost16(vq, vq->avail_idx);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
|
||||
vhost_avail_event(vq));
|
||||
}
|
||||
|
@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
|
|||
struct vring_used_elem *head, int idx,
|
||||
int count)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
size_t size;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
size = count * sizeof(*head);
|
||||
memcpy(used->ring + idx, head, size);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_copy_to_user(vq, vq->used->ring + idx, head,
|
||||
count * sizeof(*head));
|
||||
}
|
||||
|
@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
|
|||
static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
|
||||
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
used->flags = cpu_to_vhost16(vq, vq->used_flags);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
|
||||
&vq->used->flags);
|
||||
}
|
||||
|
@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
|
|||
static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
|
||||
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
|
||||
&vq->used->idx);
|
||||
}
|
||||
|
@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
|
|||
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
|
||||
__virtio16 *idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*idx = avail->idx;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *idx, &vq->avail->idx);
|
||||
}
|
||||
|
||||
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
|
||||
__virtio16 *head, int idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*head = avail->ring[idx & (vq->num - 1)];
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *head,
|
||||
&vq->avail->ring[idx & (vq->num - 1)]);
|
||||
}
|
||||
|
@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
|
|||
static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
|
||||
__virtio16 *flags)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*flags = avail->flags;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *flags, &vq->avail->flags);
|
||||
}
|
||||
|
||||
static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
|
||||
__virtio16 *event)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_avail *avail;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
|
||||
if (likely(map)) {
|
||||
avail = map->addr;
|
||||
*event = (__virtio16)avail->ring[vq->num];
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_avail(vq, *event, vhost_used_event(vq));
|
||||
}
|
||||
|
||||
static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
|
||||
__virtio16 *idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_used *used;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
|
||||
if (likely(map)) {
|
||||
used = map->addr;
|
||||
*idx = used->idx;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_get_used(vq, *idx, &vq->used->idx);
|
||||
}
|
||||
|
||||
static inline int vhost_get_desc(struct vhost_virtqueue *vq,
|
||||
struct vring_desc *desc, int idx)
|
||||
{
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
struct vhost_map *map;
|
||||
struct vring_desc *d;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
rcu_read_lock();
|
||||
|
||||
map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
|
||||
if (likely(map)) {
|
||||
d = map->addr;
|
||||
*desc = *(d + idx);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
|
||||
}
|
||||
|
||||
|
@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
|
|||
return true;
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
|
||||
{
|
||||
struct vhost_map __rcu *map;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VHOST_NUM_ADDRS; i++) {
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(vq->maps[i]);
|
||||
rcu_read_unlock();
|
||||
if (unlikely(!map))
|
||||
vhost_map_prefetch(vq, i);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int vq_meta_prefetch(struct vhost_virtqueue *vq)
|
||||
{
|
||||
unsigned int num = vq->num;
|
||||
|
||||
if (!vq->iotlb) {
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_vq_map_prefetch(vq);
|
||||
#endif
|
||||
if (!vq->iotlb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
|
||||
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
|
||||
|
@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
|
|||
|
||||
mutex_lock(&vq->mutex);
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
/* Unregister MMU notifer to allow invalidation callback
|
||||
* can access vq->uaddrs[] without holding a lock.
|
||||
*/
|
||||
if (d->mm)
|
||||
mmu_notifier_unregister(&d->mmu_notifier, d->mm);
|
||||
|
||||
vhost_uninit_vq_maps(vq);
|
||||
#endif
|
||||
|
||||
switch (ioctl) {
|
||||
case VHOST_SET_VRING_NUM:
|
||||
r = vhost_vring_set_num(d, vq, argp);
|
||||
|
@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
|
|||
BUG();
|
||||
}
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
vhost_setup_vq_uaddr(vq);
|
||||
|
||||
if (d->mm)
|
||||
mmu_notifier_register(&d->mmu_notifier, d->mm);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
|
||||
return r;
|
||||
|
@ -2688,7 +2178,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
|
|||
/* If this is an input descriptor, increment that count. */
|
||||
if (access == VHOST_ACCESS_WO) {
|
||||
*in_num += ret;
|
||||
if (unlikely(log)) {
|
||||
if (unlikely(log && ret)) {
|
||||
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
|
||||
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
|
||||
++*log_num;
|
||||
|
@ -2829,7 +2319,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
|
|||
/* If this is an input descriptor,
|
||||
* increment that count. */
|
||||
*in_num += ret;
|
||||
if (unlikely(log)) {
|
||||
if (unlikely(log && ret)) {
|
||||
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
|
||||
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
|
||||
++*log_num;
|
||||
|
|
|
@ -12,9 +12,6 @@
|
|||
#include <linux/virtio_config.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
struct vhost_work;
|
||||
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
|
||||
|
@ -83,24 +80,6 @@ enum vhost_uaddr_type {
|
|||
VHOST_NUM_ADDRS = 3,
|
||||
};
|
||||
|
||||
struct vhost_map {
|
||||
int npages;
|
||||
void *addr;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
struct vhost_uaddr {
|
||||
unsigned long uaddr;
|
||||
size_t size;
|
||||
bool write;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
|
||||
#else
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
|
||||
#endif
|
||||
|
||||
/* The virtqueue structure describes a queue attached to a device. */
|
||||
struct vhost_virtqueue {
|
||||
struct vhost_dev *dev;
|
||||
|
@ -111,22 +90,7 @@ struct vhost_virtqueue {
|
|||
struct vring_desc __user *desc;
|
||||
struct vring_avail __user *avail;
|
||||
struct vring_used __user *used;
|
||||
|
||||
#if VHOST_ARCH_CAN_ACCEL_UACCESS
|
||||
/* Read by memory accessors, modified by meta data
|
||||
* prefetching, MMU notifier and vring ioctl().
|
||||
* Synchonrized through mmu_lock (writers) and RCU (writers
|
||||
* and readers).
|
||||
*/
|
||||
struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
|
||||
/* Read by MMU notifier, modified by vring ioctl(),
|
||||
* synchronized through MMU notifier
|
||||
* registering/unregistering.
|
||||
*/
|
||||
struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
|
||||
#endif
|
||||
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
|
||||
|
||||
struct file *kick;
|
||||
struct eventfd_ctx *call_ctx;
|
||||
struct eventfd_ctx *error_ctx;
|
||||
|
@ -181,8 +145,6 @@ struct vhost_virtqueue {
|
|||
bool user_be;
|
||||
#endif
|
||||
u32 busyloop_timeout;
|
||||
spinlock_t mmu_lock;
|
||||
int invalidate_count;
|
||||
};
|
||||
|
||||
struct vhost_msg_node {
|
||||
|
@ -196,9 +158,6 @@ struct vhost_msg_node {
|
|||
|
||||
struct vhost_dev {
|
||||
struct mm_struct *mm;
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
struct mmu_notifier mmu_notifier;
|
||||
#endif
|
||||
struct mutex mutex;
|
||||
struct vhost_virtqueue **vqs;
|
||||
int nvqs;
|
||||
|
|
|
@ -566,13 +566,17 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
|
||||
unmap_release:
|
||||
err_idx = i;
|
||||
|
||||
if (indirect)
|
||||
i = 0;
|
||||
else
|
||||
i = head;
|
||||
|
||||
for (n = 0; n < total_sg; n++) {
|
||||
if (i == err_idx)
|
||||
break;
|
||||
vring_unmap_one_split(vq, &desc[i]);
|
||||
i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
|
||||
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
|
||||
}
|
||||
|
||||
if (indirect)
|
||||
|
|
|
@ -3628,6 +3628,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
|
|||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
static void end_extent_buffer_writeback(struct extent_buffer *eb)
|
||||
{
|
||||
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock eb pages and flush the bio if we can't the locks
|
||||
*
|
||||
|
@ -3699,8 +3706,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
|
|||
|
||||
if (!trylock_page(p)) {
|
||||
if (!flush) {
|
||||
ret = flush_write_bio(epd);
|
||||
if (ret < 0) {
|
||||
int err;
|
||||
|
||||
err = flush_write_bio(epd);
|
||||
if (err < 0) {
|
||||
ret = err;
|
||||
failed_page_nr = i;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
@ -3715,16 +3725,23 @@ err_unlock:
|
|||
/* Unlock already locked pages */
|
||||
for (i = 0; i < failed_page_nr; i++)
|
||||
unlock_page(eb->pages[i]);
|
||||
/*
|
||||
* Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
|
||||
* Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
|
||||
* be made and undo everything done before.
|
||||
*/
|
||||
btrfs_tree_lock(eb);
|
||||
spin_lock(&eb->refs_lock);
|
||||
set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
|
||||
end_extent_buffer_writeback(eb);
|
||||
spin_unlock(&eb->refs_lock);
|
||||
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
|
||||
btrfs_tree_unlock(eb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void end_extent_buffer_writeback(struct extent_buffer *eb)
|
||||
{
|
||||
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
|
||||
}
|
||||
|
||||
static void set_btree_ioerr(struct page *page)
|
||||
{
|
||||
struct extent_buffer *eb = (struct extent_buffer *)page->private;
|
||||
|
|
|
@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
|
|||
BTRFS_I(inode),
|
||||
LOG_OTHER_INODE_ALL,
|
||||
0, LLONG_MAX, ctx);
|
||||
iput(inode);
|
||||
btrfs_add_delayed_iput(inode);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
|
@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
|
||||
LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
|
||||
if (ret) {
|
||||
iput(inode);
|
||||
btrfs_add_delayed_iput(inode);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
|
|||
key.offset = 0;
|
||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||
if (ret < 0) {
|
||||
iput(inode);
|
||||
btrfs_add_delayed_iput(inode);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
path->slots[0]++;
|
||||
}
|
||||
iput(inode);
|
||||
btrfs_add_delayed_iput(inode);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -5689,7 +5689,7 @@ process_leaf:
|
|||
}
|
||||
|
||||
if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
|
||||
iput(di_inode);
|
||||
btrfs_add_delayed_iput(di_inode);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -5701,7 +5701,7 @@ process_leaf:
|
|||
if (!ret &&
|
||||
btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
|
||||
ret = 1;
|
||||
iput(di_inode);
|
||||
btrfs_add_delayed_iput(di_inode);
|
||||
if (ret)
|
||||
goto next_dir_inode;
|
||||
if (ctx->log_new_dentries) {
|
||||
|
@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
|
|||
if (!ret && ctx && ctx->log_new_dentries)
|
||||
ret = log_new_dir_dentries(trans, root,
|
||||
BTRFS_I(dir_inode), ctx);
|
||||
iput(dir_inode);
|
||||
btrfs_add_delayed_iput(dir_inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
|
||||
LOG_INODE_EXISTS,
|
||||
0, LLONG_MAX, ctx);
|
||||
iput(inode);
|
||||
btrfs_add_delayed_iput(inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -20,6 +20,15 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct configfs_fragment {
|
||||
atomic_t frag_count;
|
||||
struct rw_semaphore frag_sem;
|
||||
bool frag_dead;
|
||||
};
|
||||
|
||||
void put_fragment(struct configfs_fragment *);
|
||||
struct configfs_fragment *get_fragment(struct configfs_fragment *);
|
||||
|
||||
struct configfs_dirent {
|
||||
atomic_t s_count;
|
||||
int s_dependent_count;
|
||||
|
@ -34,6 +43,7 @@ struct configfs_dirent {
|
|||
#ifdef CONFIG_LOCKDEP
|
||||
int s_depth;
|
||||
#endif
|
||||
struct configfs_fragment *s_frag;
|
||||
};
|
||||
|
||||
#define CONFIGFS_ROOT 0x0001
|
||||
|
@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
|
|||
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
|
||||
extern int configfs_create_bin_file(struct config_item *,
|
||||
const struct configfs_bin_attribute *);
|
||||
extern int configfs_make_dirent(struct configfs_dirent *,
|
||||
struct dentry *, void *, umode_t, int);
|
||||
extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
|
||||
void *, umode_t, int, struct configfs_fragment *);
|
||||
extern int configfs_dirent_is_ready(struct configfs_dirent *);
|
||||
|
||||
extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
|
||||
|
@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
|
|||
{
|
||||
if (!(sd->s_type & CONFIGFS_ROOT)) {
|
||||
kfree(sd->s_iattr);
|
||||
put_fragment(sd->s_frag);
|
||||
kmem_cache_free(configfs_dir_cachep, sd);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
|
|||
|
||||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
static struct configfs_fragment *new_fragment(void)
|
||||
{
|
||||
struct configfs_fragment *p;
|
||||
|
||||
p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
|
||||
if (p) {
|
||||
atomic_set(&p->frag_count, 1);
|
||||
init_rwsem(&p->frag_sem);
|
||||
p->frag_dead = false;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
void put_fragment(struct configfs_fragment *frag)
|
||||
{
|
||||
if (frag && atomic_dec_and_test(&frag->frag_count))
|
||||
kfree(frag);
|
||||
}
|
||||
|
||||
struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
|
||||
{
|
||||
if (likely(frag))
|
||||
atomic_inc(&frag->frag_count);
|
||||
return frag;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
|
||||
*/
|
||||
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
|
||||
void *element, int type)
|
||||
void *element, int type,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
struct configfs_dirent * sd;
|
||||
|
||||
|
@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
|
|||
kmem_cache_free(configfs_dir_cachep, sd);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
sd->s_frag = get_fragment(frag);
|
||||
list_add(&sd->s_sibling, &parent_sd->s_children);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
|
||||
|
@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
|
|||
|
||||
int configfs_make_dirent(struct configfs_dirent * parent_sd,
|
||||
struct dentry * dentry, void * element,
|
||||
umode_t mode, int type)
|
||||
umode_t mode, int type, struct configfs_fragment *frag)
|
||||
{
|
||||
struct configfs_dirent * sd;
|
||||
|
||||
sd = configfs_new_dirent(parent_sd, element, type);
|
||||
sd = configfs_new_dirent(parent_sd, element, type, frag);
|
||||
if (IS_ERR(sd))
|
||||
return PTR_ERR(sd);
|
||||
|
||||
|
@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode)
|
|||
* until it is validated by configfs_dir_set_ready()
|
||||
*/
|
||||
|
||||
static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
|
||||
static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int error;
|
||||
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
|
||||
|
@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
|
|||
return error;
|
||||
|
||||
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
|
||||
CONFIGFS_DIR | CONFIGFS_USET_CREATING);
|
||||
CONFIGFS_DIR | CONFIGFS_USET_CREATING,
|
||||
frag);
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
|
||||
|
@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl,
|
|||
{
|
||||
int err = 0;
|
||||
umode_t mode = S_IFLNK | S_IRWXUGO;
|
||||
struct configfs_dirent *p = parent->d_fsdata;
|
||||
|
||||
err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
|
||||
CONFIGFS_ITEM_LINK);
|
||||
err = configfs_make_dirent(p, dentry, sl, mode,
|
||||
CONFIGFS_ITEM_LINK, p->s_frag);
|
||||
if (!err) {
|
||||
err = configfs_create(dentry, mode, init_symlink);
|
||||
if (err) {
|
||||
|
@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item)
|
|||
|
||||
static int configfs_attach_group(struct config_item *parent_item,
|
||||
struct config_item *item,
|
||||
struct dentry *dentry);
|
||||
struct dentry *dentry,
|
||||
struct configfs_fragment *frag);
|
||||
static void configfs_detach_group(struct config_item *item);
|
||||
|
||||
static void detach_groups(struct config_group *group)
|
||||
|
@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group)
|
|||
* try using vfs_mkdir. Just a thought.
|
||||
*/
|
||||
static int create_default_group(struct config_group *parent_group,
|
||||
struct config_group *group)
|
||||
struct config_group *group,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int ret;
|
||||
struct configfs_dirent *sd;
|
||||
|
@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group,
|
|||
d_add(child, NULL);
|
||||
|
||||
ret = configfs_attach_group(&parent_group->cg_item,
|
||||
&group->cg_item, child);
|
||||
&group->cg_item, child, frag);
|
||||
if (!ret) {
|
||||
sd = child->d_fsdata;
|
||||
sd->s_type |= CONFIGFS_USET_DEFAULT;
|
||||
|
@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int populate_groups(struct config_group *group)
|
||||
static int populate_groups(struct config_group *group,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
struct config_group *new_group;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(new_group, &group->default_groups, group_entry) {
|
||||
ret = create_default_group(group, new_group);
|
||||
ret = create_default_group(group, new_group, frag);
|
||||
if (ret) {
|
||||
detach_groups(group);
|
||||
break;
|
||||
|
@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
|
|||
*/
|
||||
static int configfs_attach_item(struct config_item *parent_item,
|
||||
struct config_item *item,
|
||||
struct dentry *dentry)
|
||||
struct dentry *dentry,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = configfs_create_dir(item, dentry);
|
||||
ret = configfs_create_dir(item, dentry, frag);
|
||||
if (!ret) {
|
||||
ret = populate_attrs(item);
|
||||
if (ret) {
|
||||
|
@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item)
|
|||
|
||||
static int configfs_attach_group(struct config_item *parent_item,
|
||||
struct config_item *item,
|
||||
struct dentry *dentry)
|
||||
struct dentry *dentry,
|
||||
struct configfs_fragment *frag)
|
||||
{
|
||||
int ret;
|
||||
struct configfs_dirent *sd;
|
||||
|
||||
ret = configfs_attach_item(parent_item, item, dentry);
|
||||
ret = configfs_attach_item(parent_item, item, dentry, frag);
|
||||
if (!ret) {
|
||||
sd = dentry->d_fsdata;
|
||||
sd->s_type |= CONFIGFS_USET_DIR;
|
||||
|
@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item,
|
|||
*/
|
||||
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
|
||||
configfs_adjust_dir_dirent_depth_before_populate(sd);
|
||||
ret = populate_groups(to_config_group(item));
|
||||
ret = populate_groups(to_config_group(item), frag);
|
||||
if (ret) {
|
||||
configfs_detach_item(item);
|
||||
d_inode(dentry)->i_flags |= S_DEAD;
|
||||
|
@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
|
|||
struct configfs_dirent *sd;
|
||||
const struct config_item_type *type;
|
||||
struct module *subsys_owner = NULL, *new_item_owner = NULL;
|
||||
struct configfs_fragment *frag;
|
||||
char *name;
|
||||
|
||||
sd = dentry->d_parent->d_fsdata;
|
||||
|
@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
|
|||
goto out;
|
||||
}
|
||||
|
||||
frag = new_fragment();
|
||||
if (!frag) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Get a working ref for the duration of this function */
|
||||
parent_item = configfs_get_config_item(dentry->d_parent);
|
||||
type = parent_item->ci_type;
|
||||
|
@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
|
|||
spin_unlock(&configfs_dirent_lock);
|
||||
|
||||
if (group)
|
||||
ret = configfs_attach_group(parent_item, item, dentry);
|
||||
ret = configfs_attach_group(parent_item, item, dentry, frag);
|
||||
else
|
||||
ret = configfs_attach_item(parent_item, item, dentry);
|
||||
ret = configfs_attach_item(parent_item, item, dentry, frag);
|
||||
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
|
||||
|
@ -1406,6 +1449,7 @@ out_put:
|
|||
* reference.
|
||||
*/
|
||||
config_item_put(parent_item);
|
||||
put_fragment(frag);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
struct config_item *item;
|
||||
struct configfs_subsystem *subsys;
|
||||
struct configfs_dirent *sd;
|
||||
struct configfs_fragment *frag;
|
||||
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
|
||||
int ret;
|
||||
|
||||
|
@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
}
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
frag = sd->s_frag;
|
||||
if (down_write_killable(&frag->frag_sem)) {
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
configfs_detach_rollback(dentry);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
return -EINTR;
|
||||
}
|
||||
frag->frag_dead = true;
|
||||
up_write(&frag->frag_sem);
|
||||
|
||||
/* Get a working ref for the duration of this function */
|
||||
item = configfs_get_config_item(dentry);
|
||||
|
||||
|
@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
|
|||
*/
|
||||
err = -ENOENT;
|
||||
if (configfs_dirent_is_ready(parent_sd)) {
|
||||
file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
|
||||
file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
|
||||
if (IS_ERR(file->private_data))
|
||||
err = PTR_ERR(file->private_data);
|
||||
else
|
||||
|
@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group,
|
|||
{
|
||||
struct configfs_subsystem *subsys = parent_group->cg_subsys;
|
||||
struct dentry *parent;
|
||||
struct configfs_fragment *frag;
|
||||
int ret;
|
||||
|
||||
frag = new_fragment();
|
||||
if (!frag)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
link_group(parent_group, group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
|
@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group,
|
|||
parent = parent_group->cg_item.ci_dentry;
|
||||
|
||||
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
|
||||
ret = create_default_group(parent_group, group);
|
||||
ret = create_default_group(parent_group, group, frag);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
|
@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group,
|
|||
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
inode_unlock(d_inode(parent));
|
||||
put_fragment(frag);
|
||||
return 0;
|
||||
err_out:
|
||||
inode_unlock(d_inode(parent));
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
unlink_group(group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
put_fragment(frag);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(configfs_register_group);
|
||||
|
@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group)
|
|||
struct configfs_subsystem *subsys = group->cg_subsys;
|
||||
struct dentry *dentry = group->cg_item.ci_dentry;
|
||||
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
|
||||
struct configfs_dirent *sd = dentry->d_fsdata;
|
||||
struct configfs_fragment *frag = sd->s_frag;
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
if (!group->cg_item.ci_parent->ci_group) {
|
||||
/*
|
||||
* The parent has already been unlinked and detached
|
||||
* due to a rmdir.
|
||||
*/
|
||||
goto unlink_group;
|
||||
}
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
down_write(&frag->frag_sem);
|
||||
frag->frag_dead = true;
|
||||
up_write(&frag->frag_sem);
|
||||
|
||||
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
|
@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group)
|
|||
dput(dentry);
|
||||
|
||||
mutex_lock(&subsys->su_mutex);
|
||||
unlink_group:
|
||||
unlink_group(group);
|
||||
mutex_unlock(&subsys->su_mutex);
|
||||
}
|
||||
|
@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
struct dentry *dentry;
|
||||
struct dentry *root;
|
||||
struct configfs_dirent *sd;
|
||||
struct configfs_fragment *frag;
|
||||
|
||||
frag = new_fragment();
|
||||
if (!frag)
|
||||
return -ENOMEM;
|
||||
|
||||
root = configfs_pin_fs();
|
||||
if (IS_ERR(root))
|
||||
if (IS_ERR(root)) {
|
||||
put_fragment(frag);
|
||||
return PTR_ERR(root);
|
||||
}
|
||||
|
||||
if (!group->cg_item.ci_name)
|
||||
group->cg_item.ci_name = group->cg_item.ci_namebuf;
|
||||
|
@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
d_add(dentry, NULL);
|
||||
|
||||
err = configfs_attach_group(sd->s_element, &group->cg_item,
|
||||
dentry);
|
||||
dentry, frag);
|
||||
if (err) {
|
||||
BUG_ON(d_inode(dentry));
|
||||
d_drop(dentry);
|
||||
|
@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
|
|||
unlink_group(group);
|
||||
configfs_release_fs();
|
||||
}
|
||||
put_fragment(frag);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
|
|||
struct config_group *group = &subsys->su_group;
|
||||
struct dentry *dentry = group->cg_item.ci_dentry;
|
||||
struct dentry *root = dentry->d_sb->s_root;
|
||||
struct configfs_dirent *sd = dentry->d_fsdata;
|
||||
struct configfs_fragment *frag = sd->s_frag;
|
||||
|
||||
if (dentry->d_parent != root) {
|
||||
pr_err("Tried to unregister non-subsystem!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
down_write(&frag->frag_sem);
|
||||
frag->frag_dead = true;
|
||||
up_write(&frag->frag_sem);
|
||||
|
||||
inode_lock_nested(d_inode(root),
|
||||
I_MUTEX_PARENT);
|
||||
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче