Merge branch '4.7-fixes' into mips-for-linux-next
This commit is contained in:
Коммит
4a89cf8101
|
@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
|
|||
MPX-instrumented.
|
||||
3) The kernel detects that the CPU has MPX, allows the new prctl() to
|
||||
succeed, and notes the location of the bounds directory. Userspace is
|
||||
expected to keep the bounds directory at that locationWe note it
|
||||
expected to keep the bounds directory at that location. We note it
|
||||
instead of reading it each time because the 'xsave' operation needed
|
||||
to access the bounds directory register is an expensive operation.
|
||||
4) If the application needs to spill bounds out of the 4 registers, it
|
||||
|
@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
|
|||
We need to decode MPX instructions to get violation address and
|
||||
set this address into extended struct siginfo.
|
||||
|
||||
The _sigfault feild of struct siginfo is extended as follow:
|
||||
The _sigfault field of struct siginfo is extended as follow:
|
||||
|
||||
87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
88 struct {
|
||||
|
@ -240,5 +240,5 @@ them at the same bounds table.
|
|||
This is allowed architecturally. See more information "Intel(R) Architecture
|
||||
Instruction Set Extensions Programming Reference" (9.3.4).
|
||||
|
||||
However, if users did this, the kernel might be fooled in to unmaping an
|
||||
However, if users did this, the kernel might be fooled in to unmapping an
|
||||
in-use bounds table since it does not recognize sharing.
|
||||
|
|
|
@ -5,7 +5,7 @@ memory, it has two choices:
|
|||
from areas other than the one we are trying to flush will be
|
||||
destroyed and must be refilled later, at some cost.
|
||||
2. Use the invlpg instruction to invalidate a single page at a
|
||||
time. This could potentialy cost many more instructions, but
|
||||
time. This could potentially cost many more instructions, but
|
||||
it is a much more precise operation, causing no collateral
|
||||
damage to other TLB entries.
|
||||
|
||||
|
@ -19,7 +19,7 @@ Which method to do depends on a few things:
|
|||
work.
|
||||
3. The size of the TLB. The larger the TLB, the more collateral
|
||||
damage we do with a full flush. So, the larger the TLB, the
|
||||
more attrative an individual flush looks. Data and
|
||||
more attractive an individual flush looks. Data and
|
||||
instructions have separate TLBs, as do different page sizes.
|
||||
4. The microarchitecture. The TLB has become a multi-level
|
||||
cache on modern CPUs, and the global flushes have become more
|
||||
|
|
|
@ -36,7 +36,7 @@ between all CPUs.
|
|||
|
||||
check_interval
|
||||
How often to poll for corrected machine check errors, in seconds
|
||||
(Note output is hexademical). Default 5 minutes. When the poller
|
||||
(Note output is hexadecimal). Default 5 minutes. When the poller
|
||||
finds MCEs it triggers an exponential speedup (poll more often) on
|
||||
the polling interval. When the poller stops finding MCEs, it
|
||||
triggers an exponential backoff (poll less often) on the polling
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -80,12 +80,14 @@
|
|||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
|
||||
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
|
||||
|
||||
#define BRCM_CPU_PART_VULCAN 0x516
|
||||
|
||||
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
|
||||
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -117,6 +117,8 @@ struct pt_regs {
|
|||
};
|
||||
u64 orig_x0;
|
||||
u64 syscallno;
|
||||
u64 orig_addr_limit;
|
||||
u64 unused; // maintain 16 byte alignment
|
||||
};
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
|
|
@ -60,6 +60,7 @@ int main(void)
|
|||
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
||||
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
|
||||
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
||||
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
BLANK();
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
|
||||
|
|
|
@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
MIDR_RANGE(MIDR_THUNDERX, 0x00,
|
||||
(1 << MIDR_VARIANT_SHIFT) | 1),
|
||||
},
|
||||
{
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
.desc = "Cavium erratum 27456",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_27456,
|
||||
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <asm/errno.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
|
@ -97,7 +98,14 @@
|
|||
mov x29, xzr // fp pointed to user-space
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
.endif
|
||||
get_thread_info tsk
|
||||
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
||||
ldr x20, [tsk, #TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #TASK_SIZE_64
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
stp lr, x21, [sp, #S_LR]
|
||||
|
@ -128,6 +136,14 @@
|
|||
.endm
|
||||
|
||||
.macro kernel_exit, el
|
||||
.if \el != 0
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
|
||||
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
||||
.endif
|
||||
|
||||
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
||||
.if \el == 0
|
||||
ct_user_enter
|
||||
|
@ -406,7 +422,6 @@ el1_irq:
|
|||
bl trace_hardirqs_off
|
||||
#endif
|
||||
|
||||
get_thread_info tsk
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
|
|
@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
}
|
||||
|
||||
if (permission_fault(esr) && (addr < USER_DS)) {
|
||||
if (get_fs() == KERNEL_DS)
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
|
||||
|
||||
if (!search_exception_tables(regs->pc))
|
||||
|
|
|
@ -1260,7 +1260,7 @@ static int octeon_irq_gpio_map(struct irq_domain *d,
|
|||
|
||||
line = (hw + gpiod->base_hwirq) >> 6;
|
||||
bit = (hw + gpiod->base_hwirq) & 63;
|
||||
if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
|
||||
if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
|
||||
octeon_irq_ciu_to_irq[line][bit] != 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -168,6 +168,7 @@ static inline unsigned int read_msa_##name(void) \
|
|||
unsigned int reg; \
|
||||
__asm__ __volatile__( \
|
||||
" .set push\n" \
|
||||
" .set fp=64\n" \
|
||||
" .set msa\n" \
|
||||
" cfcmsa %0, $" #cs "\n" \
|
||||
" .set pop\n" \
|
||||
|
@ -179,6 +180,7 @@ static inline void write_msa_##name(unsigned int val) \
|
|||
{ \
|
||||
__asm__ __volatile__( \
|
||||
" .set push\n" \
|
||||
" .set fp=64\n" \
|
||||
" .set msa\n" \
|
||||
" ctcmsa $" #cs ", %0\n" \
|
||||
" .set pop\n" \
|
||||
|
|
|
@ -276,12 +276,7 @@ int r4k_clockevent_init(void)
|
|||
CLOCK_EVT_FEAT_C3STOP |
|
||||
CLOCK_EVT_FEAT_PERCPU;
|
||||
|
||||
clockevent_set_clock(cd, mips_hpt_frequency);
|
||||
|
||||
/* Calculate the min / max delta */
|
||||
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
|
||||
min_delta = calculate_min_delta();
|
||||
cd->min_delta_ns = clockevent_delta2ns(min_delta, cd);
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
|
@ -289,7 +284,7 @@ int r4k_clockevent_init(void)
|
|||
cd->set_next_event = mips_next_event;
|
||||
cd->event_handler = mips_event_handler;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
|
||||
|
||||
if (cp0_timer_irq_installed)
|
||||
return 0;
|
||||
|
|
|
@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static u64 notrace r4k_read_sched_clock(void)
|
||||
static u64 __maybe_unused notrace r4k_read_sched_clock(void)
|
||||
{
|
||||
return read_c0_count();
|
||||
}
|
||||
|
@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
|
|||
|
||||
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
|
||||
|
||||
#ifndef CONFIG_CPU_FREQ
|
||||
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -704,6 +704,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
|
|||
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
|
||||
{
|
||||
struct siginfo si = { 0 };
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
switch (sig) {
|
||||
case 0:
|
||||
|
@ -744,7 +745,8 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
|
|||
si.si_addr = fault_addr;
|
||||
si.si_signo = sig;
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
if (find_vma(current->mm, (unsigned long)fault_addr))
|
||||
vma = find_vma(current->mm, (unsigned long)fault_addr);
|
||||
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
|
||||
si.si_code = SEGV_ACCERR;
|
||||
else
|
||||
si.si_code = SEGV_MAPERR;
|
||||
|
|
|
@ -344,7 +344,7 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
|||
if (hw == ltq_eiu_irq[i])
|
||||
chip = <q_eiu_type;
|
||||
|
||||
irq_set_chip_and_handler(hw, chip, handle_level_irq);
|
||||
irq_set_chip_and_handler(irq, chip, handle_level_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
#define SMBUS_PCI_REG64 0x64
|
||||
#define SMBUS_PCI_REGB4 0xb4
|
||||
|
||||
#define HPET_MIN_CYCLES 64
|
||||
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
|
||||
#define HPET_MIN_CYCLES 16
|
||||
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
|
||||
|
||||
static DEFINE_SPINLOCK(hpet_lock);
|
||||
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
|
||||
|
@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
|
|||
static int hpet_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned int cnt;
|
||||
int res;
|
||||
u32 cnt;
|
||||
s32 res;
|
||||
|
||||
cnt = hpet_read(HPET_COUNTER);
|
||||
cnt += delta;
|
||||
cnt += (u32) delta;
|
||||
hpet_write(HPET_T0_CMP, cnt);
|
||||
|
||||
res = (int)(cnt - hpet_read(HPET_COUNTER));
|
||||
res = (s32)(cnt - hpet_read(HPET_COUNTER));
|
||||
|
||||
return res < HPET_MIN_CYCLES ? -ETIME : 0;
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
|
|||
|
||||
cd = &per_cpu(hpet_clockevent_device, cpu);
|
||||
cd->name = "hpet";
|
||||
cd->rating = 320;
|
||||
cd->rating = 100;
|
||||
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
|
||||
cd->set_state_shutdown = hpet_set_state_shutdown;
|
||||
cd->set_state_periodic = hpet_set_state_periodic;
|
||||
|
|
|
@ -161,7 +161,7 @@ static void rm7k_tc_disable(void)
|
|||
local_irq_save(flags);
|
||||
blast_rm7k_tcache();
|
||||
clear_c0_config(RM7K_CONF_TE);
|
||||
local_irq_save(flags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void rm7k_sc_disable(void)
|
||||
|
|
|
@ -1199,7 +1199,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
|
|||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
|
||||
ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
|
||||
ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
|
||||
if (ctx.offsets == NULL)
|
||||
return;
|
||||
|
||||
|
|
|
@ -2319,7 +2319,7 @@ void
|
|||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct stack_frame frame;
|
||||
const void __user *fp;
|
||||
const unsigned long __user *fp;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
/* TODO: We don't support guest os callchain now */
|
||||
|
@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
|||
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
|
||||
return;
|
||||
|
||||
fp = (void __user *)regs->bp;
|
||||
fp = (unsigned long __user *)regs->bp;
|
||||
|
||||
perf_callchain_store(entry, regs->ip);
|
||||
|
||||
|
@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
|||
pagefault_disable();
|
||||
while (entry->nr < entry->max_stack) {
|
||||
unsigned long bytes;
|
||||
|
||||
frame.next_frame = NULL;
|
||||
frame.return_address = 0;
|
||||
|
||||
if (!access_ok(VERIFY_READ, fp, 16))
|
||||
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
|
||||
break;
|
||||
|
||||
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
|
||||
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
|
||||
if (bytes != 0)
|
||||
break;
|
||||
bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
|
||||
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
|
||||
if (bytes != 0)
|
||||
break;
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
|
||||
intel-rapl-objs := rapl.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
|
||||
intel-rapl-perf-objs := rapl.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
|
||||
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
|
||||
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
|
||||
|
|
|
@ -115,6 +115,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
|
|||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
|
||||
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||
|
||||
/*
|
||||
* When HT is off these events can only run on the bottom 4 counters
|
||||
* When HT is on, they are impacted by the HT bug and require EXCL access
|
||||
*/
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
|
@ -139,6 +143,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
|
|||
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
|
||||
/*
|
||||
* When HT is off these events can only run on the bottom 4 counters
|
||||
* When HT is on, they are impacted by the HT bug and require EXCL access
|
||||
*/
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
|
@ -182,6 +190,16 @@ struct event_constraint intel_skl_event_constraints[] = {
|
|||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
|
||||
/*
|
||||
* when HT is off, these can only run on the bottom 4 counters
|
||||
*/
|
||||
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
|
||||
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
@ -250,6 +268,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
|||
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
|
||||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
|
||||
|
||||
/*
|
||||
* When HT is off these events can only run on the bottom 4 counters
|
||||
* When HT is on, they are impacted by the HT bug and require EXCL access
|
||||
*/
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
|
@ -264,6 +286,13 @@ struct event_constraint intel_bdw_event_constraints[] = {
|
|||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
|
||||
/*
|
||||
* when HT is off, these can only run on the bottom 4 counters
|
||||
*/
|
||||
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
|
|
@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
|
|||
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
|
||||
i++;
|
||||
|
||||
if (i == 0)
|
||||
return 0;
|
||||
if (!i)
|
||||
return -ENODEV;
|
||||
|
||||
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
|
||||
if (!nb)
|
||||
|
|
|
@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
|
|||
return -ENODEV;
|
||||
|
||||
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
|
||||
acpi_irq_penalty_init();
|
||||
pcibios_enable_irq = acpi_pci_irq_enable;
|
||||
pcibios_disable_irq = acpi_pci_irq_disable;
|
||||
x86_init.pci.init_irq = x86_init_noop;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <asm/mtrr.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/* Defined in hibernate_asm_64.S */
|
||||
extern asmlinkage __visible int restore_image(void);
|
||||
|
@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
|
|||
* kernel's text (this value is passed in the image header).
|
||||
*/
|
||||
unsigned long restore_jump_address __visible;
|
||||
unsigned long jump_address_phys;
|
||||
|
||||
/*
|
||||
* Value of the cr3 register from before the hibernation (this value is passed
|
||||
|
@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
|
|||
|
||||
pgd_t *temp_level4_pgt __visible;
|
||||
|
||||
void *relocated_restore_code __visible;
|
||||
unsigned long relocated_restore_code __visible;
|
||||
|
||||
static int set_up_temporary_text_mapping(void)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pud_t *pud;
|
||||
|
||||
/*
|
||||
* The new mapping only has to cover the page containing the image
|
||||
* kernel's entry point (jump_address_phys), because the switch over to
|
||||
* it is carried out by relocated code running from a page allocated
|
||||
* specifically for this purpose and covered by the identity mapping, so
|
||||
* the temporary kernel text mapping is only needed for the final jump.
|
||||
* Moreover, in that mapping the virtual address of the image kernel's
|
||||
* entry point must be the same as its virtual address in the image
|
||||
* kernel (restore_jump_address), so the image kernel's
|
||||
* restore_registers() code doesn't find itself in a different area of
|
||||
* the virtual address space after switching over to the original page
|
||||
* tables used by the image kernel.
|
||||
*/
|
||||
pud = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pud)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!pmd)
|
||||
return -ENOMEM;
|
||||
|
||||
set_pmd(pmd + pmd_index(restore_jump_address),
|
||||
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
|
||||
set_pud(pud + pud_index(restore_jump_address),
|
||||
__pud(__pa(pmd) | _KERNPG_TABLE));
|
||||
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
|
||||
__pgd(__pa(pud) | _KERNPG_TABLE));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *alloc_pgt_page(void *context)
|
||||
{
|
||||
|
@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
|
|||
if (!temp_level4_pgt)
|
||||
return -ENOMEM;
|
||||
|
||||
/* It is safe to reuse the original kernel mapping */
|
||||
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
|
||||
init_level4_pgt[pgd_index(__START_KERNEL_map)]);
|
||||
/* Prepare a temporary mapping for the kernel text */
|
||||
result = set_up_temporary_text_mapping();
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/* Set up the direct mapping from scratch */
|
||||
for (i = 0; i < nr_pfn_mapped; i++) {
|
||||
|
@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int relocate_restore_code(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
|
||||
relocated_restore_code = get_safe_page(GFP_ATOMIC);
|
||||
if (!relocated_restore_code)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
|
||||
|
||||
/* Make the page containing the relocated code executable */
|
||||
pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
|
||||
pud = pud_offset(pgd, relocated_restore_code);
|
||||
if (pud_large(*pud)) {
|
||||
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
|
||||
} else {
|
||||
pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
|
||||
|
||||
if (pmd_large(*pmd)) {
|
||||
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
|
||||
} else {
|
||||
pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
|
||||
|
||||
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
|
||||
}
|
||||
}
|
||||
__flush_tlb_all();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int swsusp_arch_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
/* We have got enough memory and from now on we cannot recover */
|
||||
if ((error = set_up_temporary_mappings()))
|
||||
error = set_up_temporary_mappings();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
|
||||
if (!relocated_restore_code)
|
||||
return -ENOMEM;
|
||||
memcpy(relocated_restore_code, &core_restore_code,
|
||||
&restore_registers - &core_restore_code);
|
||||
error = relocate_restore_code();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
restore_image();
|
||||
return 0;
|
||||
|
@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
|
|||
|
||||
struct restore_data_record {
|
||||
unsigned long jump_address;
|
||||
unsigned long jump_address_phys;
|
||||
unsigned long cr3;
|
||||
unsigned long magic;
|
||||
};
|
||||
|
||||
#define RESTORE_MAGIC 0x0123456789ABCDEFUL
|
||||
#define RESTORE_MAGIC 0x123456789ABCDEF0UL
|
||||
|
||||
/**
|
||||
* arch_hibernation_header_save - populate the architecture specific part
|
||||
|
@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
|||
|
||||
if (max_size < sizeof(struct restore_data_record))
|
||||
return -EOVERFLOW;
|
||||
rdr->jump_address = restore_jump_address;
|
||||
rdr->jump_address = (unsigned long)&restore_registers;
|
||||
rdr->jump_address_phys = __pa_symbol(&restore_registers);
|
||||
rdr->cr3 = restore_cr3;
|
||||
rdr->magic = RESTORE_MAGIC;
|
||||
return 0;
|
||||
|
@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
|
|||
struct restore_data_record *rdr = addr;
|
||||
|
||||
restore_jump_address = rdr->jump_address;
|
||||
jump_address_phys = rdr->jump_address_phys;
|
||||
restore_cr3 = rdr->cr3;
|
||||
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
|
||||
}
|
||||
|
|
|
@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
|
|||
pushfq
|
||||
popq pt_regs_flags(%rax)
|
||||
|
||||
/* save the address of restore_registers */
|
||||
movq $restore_registers, %rax
|
||||
movq %rax, restore_jump_address(%rip)
|
||||
/* save cr3 */
|
||||
movq %cr3, %rax
|
||||
movq %rax, restore_cr3(%rip)
|
||||
|
@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
|
|||
ENDPROC(swsusp_arch_suspend)
|
||||
|
||||
ENTRY(restore_image)
|
||||
/* switch to temporary page tables */
|
||||
movq $__PAGE_OFFSET, %rdx
|
||||
movq temp_level4_pgt(%rip), %rax
|
||||
subq %rdx, %rax
|
||||
movq %rax, %cr3
|
||||
/* Flush TLB */
|
||||
movq mmu_cr4_features(%rip), %rax
|
||||
movq %rax, %rdx
|
||||
andq $~(X86_CR4_PGE), %rdx
|
||||
movq %rdx, %cr4; # turn off PGE
|
||||
movq %cr3, %rcx; # flush TLB
|
||||
movq %rcx, %cr3;
|
||||
movq %rax, %cr4; # turn PGE back on
|
||||
|
||||
/* prepare to jump to the image kernel */
|
||||
movq restore_jump_address(%rip), %rax
|
||||
movq restore_cr3(%rip), %rbx
|
||||
movq restore_jump_address(%rip), %r8
|
||||
movq restore_cr3(%rip), %r9
|
||||
|
||||
/* prepare to switch to temporary page tables */
|
||||
movq temp_level4_pgt(%rip), %rax
|
||||
movq mmu_cr4_features(%rip), %rbx
|
||||
|
||||
/* prepare to copy image data to their original locations */
|
||||
movq restore_pblist(%rip), %rdx
|
||||
|
||||
/* jump to relocated restore code */
|
||||
movq relocated_restore_code(%rip), %rcx
|
||||
jmpq *%rcx
|
||||
|
||||
/* code below has been relocated to a safe page */
|
||||
ENTRY(core_restore_code)
|
||||
/* switch to temporary page tables */
|
||||
movq $__PAGE_OFFSET, %rcx
|
||||
subq %rcx, %rax
|
||||
movq %rax, %cr3
|
||||
/* flush TLB */
|
||||
movq %rbx, %rcx
|
||||
andq $~(X86_CR4_PGE), %rcx
|
||||
movq %rcx, %cr4; # turn off PGE
|
||||
movq %cr3, %rcx; # flush TLB
|
||||
movq %rcx, %cr3;
|
||||
movq %rbx, %cr4; # turn PGE back on
|
||||
.Lloop:
|
||||
testq %rdx, %rdx
|
||||
jz .Ldone
|
||||
|
@ -96,24 +96,17 @@ ENTRY(core_restore_code)
|
|||
/* progress to the next pbe */
|
||||
movq pbe_next(%rdx), %rdx
|
||||
jmp .Lloop
|
||||
|
||||
.Ldone:
|
||||
/* jump to the restore_registers address from the image header */
|
||||
jmpq *%rax
|
||||
/*
|
||||
* NOTE: This assumes that the boot kernel's text mapping covers the
|
||||
* image kernel's page containing restore_registers and the address of
|
||||
* this page is the same as in the image kernel's text mapping (it
|
||||
* should always be true, because the text mapping is linear, starting
|
||||
* from 0, and is supposed to cover the entire kernel text for every
|
||||
* kernel).
|
||||
*
|
||||
* code below belongs to the image kernel
|
||||
*/
|
||||
jmpq *%r8
|
||||
|
||||
/* code below belongs to the image kernel */
|
||||
.align PAGE_SIZE
|
||||
ENTRY(restore_registers)
|
||||
FRAME_BEGIN
|
||||
/* go back to the original page tables */
|
||||
movq %rbx, %cr3
|
||||
movq %r9, %cr3
|
||||
|
||||
/* Flush TLB, including "global" things (vmalloc) */
|
||||
movq mmu_cr4_features(%rip), %rax
|
||||
|
|
|
@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
|
|||
if (ret)
|
||||
goto out;
|
||||
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
|
||||
task_lock(p);
|
||||
if (p->io_context)
|
||||
ret = p->io_context->ioprio;
|
||||
task_unlock(p);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
|
|||
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
|
||||
ret = n;
|
||||
out:
|
||||
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
|
||||
acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
|
|||
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
|
||||
ret = n;
|
||||
out:
|
||||
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
|
||||
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "acnamesp.h"
|
||||
#include "acdispat.h"
|
||||
#include "actables.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_NAMESPACE
|
||||
ACPI_MODULE_NAME("nsload")
|
||||
|
@ -78,6 +79,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ns_load_table);
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/*
|
||||
* Parse the table and load the namespace with all named
|
||||
* objects found within. Control methods are NOT parsed
|
||||
|
@ -89,7 +92,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
|
|||
*/
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
goto unlock_interp;
|
||||
}
|
||||
|
||||
/* If table already loaded into namespace, just return */
|
||||
|
@ -130,6 +133,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
|
|||
|
||||
unlock:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
unlock_interp:
|
||||
(void)acpi_ex_exit_interpreter();
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
|
|
|
@ -47,7 +47,6 @@
|
|||
#include "acparser.h"
|
||||
#include "acdispat.h"
|
||||
#include "actables.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_NAMESPACE
|
||||
ACPI_MODULE_NAME("nsparse")
|
||||
|
@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ns_parse_table);
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
/*
|
||||
* AML Parse, pass 1
|
||||
*
|
||||
|
@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
|
|||
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
|
||||
table_index, start_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto error_exit;
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
|
|||
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
|
||||
table_index, start_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto error_exit;
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
error_exit:
|
||||
acpi_ex_exit_interpreter();
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
|
|||
{
|
||||
struct acpi_pci_link *link;
|
||||
int penalty = 0;
|
||||
int i;
|
||||
|
||||
list_for_each_entry(link, &acpi_link_list, list) {
|
||||
/*
|
||||
|
@ -478,19 +479,15 @@ static int acpi_irq_pci_sharing_penalty(int irq)
|
|||
*/
|
||||
if (link->irq.active && link->irq.active == irq)
|
||||
penalty += PIRQ_PENALTY_PCI_USING;
|
||||
else {
|
||||
int i;
|
||||
|
||||
/*
|
||||
* If a link is inactive, penalize the IRQs it
|
||||
* might use, but not as severely.
|
||||
* penalize the IRQs PCI might use, but not as severely.
|
||||
*/
|
||||
for (i = 0; i < link->irq.possible_count; i++)
|
||||
if (link->irq.possible[i] == irq)
|
||||
penalty += PIRQ_PENALTY_PCI_POSSIBLE /
|
||||
link->irq.possible_count;
|
||||
}
|
||||
}
|
||||
|
||||
return penalty;
|
||||
}
|
||||
|
@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
|
|||
{
|
||||
int penalty = 0;
|
||||
|
||||
if (irq < ACPI_MAX_ISA_IRQS)
|
||||
penalty += acpi_isa_irq_penalty[irq];
|
||||
|
||||
/*
|
||||
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
|
||||
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
|
||||
|
@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
|
|||
penalty += PIRQ_PENALTY_PCI_USING;
|
||||
}
|
||||
|
||||
if (irq < ACPI_MAX_ISA_IRQS)
|
||||
return penalty + acpi_isa_irq_penalty[irq];
|
||||
|
||||
penalty += acpi_irq_pci_sharing_penalty(irq);
|
||||
return penalty;
|
||||
}
|
||||
|
||||
int __init acpi_irq_penalty_init(void)
|
||||
{
|
||||
struct acpi_pci_link *link;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Update penalties to facilitate IRQ balancing.
|
||||
*/
|
||||
list_for_each_entry(link, &acpi_link_list, list) {
|
||||
|
||||
/*
|
||||
* reflect the possible and active irqs in the penalty table --
|
||||
* useful for breaking ties.
|
||||
*/
|
||||
if (link->irq.possible_count) {
|
||||
int penalty =
|
||||
PIRQ_PENALTY_PCI_POSSIBLE /
|
||||
link->irq.possible_count;
|
||||
|
||||
for (i = 0; i < link->irq.possible_count; i++) {
|
||||
if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
|
||||
acpi_isa_irq_penalty[link->irq.
|
||||
possible[i]] +=
|
||||
penalty;
|
||||
}
|
||||
|
||||
} else if (link->irq.active &&
|
||||
(link->irq.active < ACPI_MAX_ISA_IRQS)) {
|
||||
acpi_isa_irq_penalty[link->irq.active] +=
|
||||
PIRQ_PENALTY_PCI_POSSIBLE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
|
||||
|
||||
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
|
||||
|
|
|
@ -207,6 +207,9 @@ struct blkfront_info
|
|||
struct blk_mq_tag_set tag_set;
|
||||
struct blkfront_ring_info *rinfo;
|
||||
unsigned int nr_rings;
|
||||
/* Save uncomplete reqs and bios for migration. */
|
||||
struct list_head requests;
|
||||
struct bio_list bio_list;
|
||||
};
|
||||
|
||||
static unsigned int nr_minors;
|
||||
|
@ -2002,70 +2005,23 @@ static int blkif_recover(struct blkfront_info *info)
|
|||
{
|
||||
unsigned int i, r_index;
|
||||
struct request *req, *n;
|
||||
struct blk_shadow *copy;
|
||||
int rc;
|
||||
struct bio *bio, *cloned_bio;
|
||||
struct bio_list bio_list, merge_bio;
|
||||
unsigned int segs, offset;
|
||||
int pending, size;
|
||||
struct split_bio *split_bio;
|
||||
struct list_head requests;
|
||||
|
||||
blkfront_gather_backend_features(info);
|
||||
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
blk_queue_max_segments(info->rq, segs);
|
||||
bio_list_init(&bio_list);
|
||||
INIT_LIST_HEAD(&requests);
|
||||
|
||||
for (r_index = 0; r_index < info->nr_rings; r_index++) {
|
||||
struct blkfront_ring_info *rinfo;
|
||||
|
||||
rinfo = &info->rinfo[r_index];
|
||||
/* Stage 1: Make a safe copy of the shadow state. */
|
||||
copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
|
||||
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
|
||||
if (!copy)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Stage 2: Set up free list. */
|
||||
memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
|
||||
for (i = 0; i < BLK_RING_SIZE(info); i++)
|
||||
rinfo->shadow[i].req.u.rw.id = i+1;
|
||||
rinfo->shadow_free = rinfo->ring.req_prod_pvt;
|
||||
rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
|
||||
|
||||
rc = blkfront_setup_indirect(rinfo);
|
||||
if (rc) {
|
||||
kfree(copy);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
for (i = 0; i < BLK_RING_SIZE(info); i++) {
|
||||
/* Not in use? */
|
||||
if (!copy[i].request)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Get the bios in the request so we can re-queue them.
|
||||
*/
|
||||
if (copy[i].request->cmd_flags &
|
||||
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
|
||||
/*
|
||||
* Flush operations don't contain bios, so
|
||||
* we need to requeue the whole request
|
||||
*/
|
||||
list_add(©[i].request->queuelist, &requests);
|
||||
continue;
|
||||
}
|
||||
merge_bio.head = copy[i].request->bio;
|
||||
merge_bio.tail = copy[i].request->biotail;
|
||||
bio_list_merge(&bio_list, &merge_bio);
|
||||
copy[i].request->bio = NULL;
|
||||
blk_end_request_all(copy[i].request, 0);
|
||||
}
|
||||
|
||||
kfree(copy);
|
||||
}
|
||||
xenbus_switch_state(info->xbdev, XenbusStateConnected);
|
||||
|
||||
/* Now safe for us to use the shared ring */
|
||||
|
@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
|
|||
kick_pending_request_queues(rinfo);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(req, n, &requests, queuelist) {
|
||||
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
|
||||
/* Requeue pending requests (flush or discard) */
|
||||
list_del_init(&req->queuelist);
|
||||
BUG_ON(req->nr_phys_segments > segs);
|
||||
|
@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
|
|||
}
|
||||
blk_mq_kick_requeue_list(info->rq);
|
||||
|
||||
while ((bio = bio_list_pop(&bio_list)) != NULL) {
|
||||
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
|
||||
/* Traverse the list of pending bios and re-queue them */
|
||||
if (bio_segments(bio) > segs) {
|
||||
/*
|
||||
|
@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
|
|||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
int err = 0;
|
||||
unsigned int i, j;
|
||||
|
||||
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
|
||||
|
||||
bio_list_init(&info->bio_list);
|
||||
INIT_LIST_HEAD(&info->requests);
|
||||
for (i = 0; i < info->nr_rings; i++) {
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[i];
|
||||
struct bio_list merge_bio;
|
||||
struct blk_shadow *shadow = rinfo->shadow;
|
||||
|
||||
for (j = 0; j < BLK_RING_SIZE(info); j++) {
|
||||
/* Not in use? */
|
||||
if (!shadow[j].request)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Get the bios in the request so we can re-queue them.
|
||||
*/
|
||||
if (shadow[j].request->cmd_flags &
|
||||
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
|
||||
/*
|
||||
* Flush operations don't contain bios, so
|
||||
* we need to requeue the whole request
|
||||
*/
|
||||
list_add(&shadow[j].request->queuelist, &info->requests);
|
||||
continue;
|
||||
}
|
||||
merge_bio.head = shadow[j].request->bio;
|
||||
merge_bio.tail = shadow[j].request->biotail;
|
||||
bio_list_merge(&info->bio_list, &merge_bio);
|
||||
shadow[j].request->bio = NULL;
|
||||
blk_mq_end_request(shadow[j].request, 0);
|
||||
}
|
||||
}
|
||||
|
||||
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
|
||||
|
||||
err = negotiate_mq(info);
|
||||
|
|
|
@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
|
||||
struct cpuidle_state *target_state = &drv->states[index];
|
||||
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
|
||||
u64 time_start, time_end;
|
||||
ktime_t time_start, time_end;
|
||||
s64 diff;
|
||||
|
||||
/*
|
||||
|
@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
sched_idle_set_state(target_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(index, dev->cpu);
|
||||
time_start = local_clock();
|
||||
time_start = ns_to_ktime(local_clock());
|
||||
|
||||
stop_critical_timings();
|
||||
entered_state = target_state->enter(dev, drv, index);
|
||||
start_critical_timings();
|
||||
|
||||
time_end = local_clock();
|
||||
time_end = ns_to_ktime(local_clock());
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
/* The cpu is no longer idle or about to enter idle. */
|
||||
|
@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
if (!cpuidle_state_is_coupled(drv, index))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* local_clock() returns the time in nanosecond, let's shift
|
||||
* by 10 (divide by 1024) to have microsecond based time.
|
||||
*/
|
||||
diff = (time_end - time_start) >> 10;
|
||||
diff = ktime_us_delta(time_end, time_start);
|
||||
if (diff > INT_MAX)
|
||||
diff = INT_MAX;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ config GPIO_DEVRES
|
|||
|
||||
config OF_GPIO
|
||||
def_bool y
|
||||
depends on OF || COMPILE_TEST
|
||||
depends on OF
|
||||
|
||||
config GPIO_ACPI
|
||||
def_bool y
|
||||
|
|
|
@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
|
|||
return gpio % 8;
|
||||
}
|
||||
|
||||
static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
|
||||
static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
|
||||
{
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
unsigned short offset, bit;
|
||||
u8 reg_val;
|
||||
|
||||
|
@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
|
|||
return reg_val;
|
||||
}
|
||||
|
||||
static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
|
||||
static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
|
||||
int val)
|
||||
{
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
unsigned short offset, bit;
|
||||
u8 reg_val;
|
||||
|
||||
|
@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
|
|||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
|
||||
spin_lock(&sch->lock);
|
||||
sch_gpio_reg_set(gc, gpio_num, GIO, 1);
|
||||
sch_gpio_reg_set(sch, gpio_num, GIO, 1);
|
||||
spin_unlock(&sch->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
|
||||
{
|
||||
return sch_gpio_reg_get(gc, gpio_num, GLV);
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
return sch_gpio_reg_get(sch, gpio_num, GLV);
|
||||
}
|
||||
|
||||
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
|
||||
|
@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
|
|||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
|
||||
spin_lock(&sch->lock);
|
||||
sch_gpio_reg_set(gc, gpio_num, GLV, val);
|
||||
sch_gpio_reg_set(sch, gpio_num, GLV, val);
|
||||
spin_unlock(&sch->lock);
|
||||
}
|
||||
|
||||
|
@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
|
|||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
|
||||
spin_lock(&sch->lock);
|
||||
sch_gpio_reg_set(gc, gpio_num, GIO, 0);
|
||||
sch_gpio_reg_set(sch, gpio_num, GIO, 0);
|
||||
spin_unlock(&sch->lock);
|
||||
|
||||
/*
|
||||
|
@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
|
|||
* GPIO7 is configured by the CMC as SLPIOVR
|
||||
* Enable GPIO[9:8] core powered gpios explicitly
|
||||
*/
|
||||
sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
|
||||
sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
|
||||
sch_gpio_reg_set(sch, 8, GEN, 1);
|
||||
sch_gpio_reg_set(sch, 9, GEN, 1);
|
||||
/*
|
||||
* SUS_GPIO[2:0] enabled by default
|
||||
* Enable SUS_GPIO3 resume powered gpio explicitly
|
||||
*/
|
||||
sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
|
||||
sch_gpio_reg_set(sch, 13, GEN, 1);
|
||||
break;
|
||||
|
||||
case PCI_DEVICE_ID_INTEL_ITC_LPC:
|
||||
|
|
|
@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
|
|||
if (!desc && gpio_is_valid(gpio))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
err = gpiod_request(desc, label);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & GPIOF_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
|
||||
|
@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
|
|||
if (flags & GPIOF_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
||||
err = gpiod_request(desc, label);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & GPIOF_DIR_IN)
|
||||
err = gpiod_direction_input(desc);
|
||||
else
|
||||
|
|
|
@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
}
|
||||
done:
|
||||
if (status < 0) {
|
||||
/* Clear flags that might have been set by the caller before
|
||||
* requesting the GPIO.
|
||||
*/
|
||||
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
return status;
|
||||
}
|
||||
|
@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_get_optional);
|
||||
|
||||
/**
|
||||
* gpiod_parse_flags - helper function to parse GPIO lookup flags
|
||||
* @desc: gpio to be setup
|
||||
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
|
||||
* of_get_gpio_hog()
|
||||
*
|
||||
* Set the GPIO descriptor flags based on the given GPIO lookup flags.
|
||||
*/
|
||||
static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
|
||||
{
|
||||
if (lflags & GPIO_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_SOURCE)
|
||||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* gpiod_configure_flags - helper function to configure a given GPIO
|
||||
* @desc: gpio whose value will be assigned
|
||||
* @con_id: function within the GPIO consumer
|
||||
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
|
||||
* of_get_gpio_hog()
|
||||
* @dflags: gpiod_flags - optional GPIO initialization flags
|
||||
*
|
||||
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
|
||||
|
@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
|
|||
* occurred while trying to acquire the GPIO.
|
||||
*/
|
||||
static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
|
||||
enum gpiod_flags dflags)
|
||||
unsigned long lflags, enum gpiod_flags dflags)
|
||||
{
|
||||
int status;
|
||||
|
||||
if (lflags & GPIO_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_DRAIN)
|
||||
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
|
||||
if (lflags & GPIO_OPEN_SOURCE)
|
||||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
|
||||
/* No particular flag request, return here... */
|
||||
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
|
||||
pr_debug("no flags found for %s\n", con_id);
|
||||
|
@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
|
|||
return desc;
|
||||
}
|
||||
|
||||
gpiod_parse_flags(desc, lookupflags);
|
||||
|
||||
status = gpiod_request(desc, con_id);
|
||||
if (status < 0)
|
||||
return ERR_PTR(status);
|
||||
|
||||
status = gpiod_configure_flags(desc, con_id, flags);
|
||||
status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
|
||||
if (status < 0) {
|
||||
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
|
||||
gpiod_put(desc);
|
||||
|
@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
|||
if (IS_ERR(desc))
|
||||
return desc;
|
||||
|
||||
ret = gpiod_request(desc, NULL);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (active_low)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
||||
|
@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
|||
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
|
||||
}
|
||||
|
||||
ret = gpiod_request(desc, NULL);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return desc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
|
||||
|
@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
|
|||
chip = gpiod_to_chip(desc);
|
||||
hwnum = gpio_chip_hwgpio(desc);
|
||||
|
||||
gpiod_parse_flags(desc, lflags);
|
||||
|
||||
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
|
||||
if (IS_ERR(local_desc)) {
|
||||
status = PTR_ERR(local_desc);
|
||||
|
@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
|
|||
return status;
|
||||
}
|
||||
|
||||
status = gpiod_configure_flags(desc, name, dflags);
|
||||
status = gpiod_configure_flags(desc, name, lflags, dflags);
|
||||
if (status < 0) {
|
||||
pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
|
||||
name, chip->label, hwnum, status);
|
||||
|
|
|
@ -98,7 +98,6 @@
|
|||
#define PCIE_BUS_CLK 10000
|
||||
#define TCLK (PCIE_BUS_CLK / 10)
|
||||
|
||||
#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
|
||||
|
||||
static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
|
||||
{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
|
||||
|
@ -733,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
|
|||
table->Smio[level] |=
|
||||
data->mvdd_voltage_table.entries[level].smio_low;
|
||||
}
|
||||
table->SmioMask2 = data->vddci_voltage_table.mask_low;
|
||||
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
|
||||
|
||||
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
|
||||
}
|
||||
|
@ -1807,27 +1806,25 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
|
|||
|
||||
ro = efuse * (max -min)/255 + min;
|
||||
|
||||
/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset
|
||||
* there is a little difference in calculating
|
||||
* volt_with_cks with windows */
|
||||
/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
|
||||
for (i = 0; i < sclk_table->count; i++) {
|
||||
data->smc_state_table.Sclk_CKS_masterEn0_7 |=
|
||||
sclk_table->entries[i].cks_enable << i;
|
||||
if (hwmgr->chip_id == CHIP_POLARIS10) {
|
||||
volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
|
||||
volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
|
||||
(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
|
||||
volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \
|
||||
(252248000 - sclk_table->entries[i].clk/100 * 115764));
|
||||
volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
|
||||
(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
|
||||
} else {
|
||||
volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
|
||||
(2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000));
|
||||
volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \
|
||||
(3422454 - sclk_table->entries[i].clk/100 * 18886376/10000));
|
||||
volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
|
||||
(2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
|
||||
volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
|
||||
(3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
|
||||
}
|
||||
|
||||
if (volt_without_cks >= volt_with_cks)
|
||||
volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks +
|
||||
sclk_table->entries[i].cks_voffset) * 100 / 625);
|
||||
volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
|
||||
sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
|
||||
|
||||
data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
|
||||
}
|
||||
|
@ -2685,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
uint16_t vv_id;
|
||||
uint16_t vddc = 0;
|
||||
uint32_t vddc = 0;
|
||||
uint16_t i, j;
|
||||
uint32_t sclk = 0;
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
|
@ -2716,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
|||
continue);
|
||||
|
||||
|
||||
/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
|
||||
PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
|
||||
/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
|
||||
* real voltage level in unit of 0.01mv */
|
||||
PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
|
||||
"Invalid VDDC value", result = -EINVAL;);
|
||||
|
||||
/* the voltage should not be zero nor equal to leakage ID */
|
||||
|
|
|
@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
|
|||
}
|
||||
|
||||
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
|
||||
{
|
||||
|
||||
int result;
|
||||
|
@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
|
|||
if (0 != result)
|
||||
return result;
|
||||
|
||||
*voltage = get_voltage_info_param_space.usVoltageLevel;
|
||||
*voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -305,7 +305,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
|
|||
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
|
||||
uint8_t level);
|
||||
extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
|
||||
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
|
||||
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
|
||||
|
||||
extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
|
||||
|
|
|
@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
|
|||
table->Smio[count] |=
|
||||
data->mvdd_voltage_table.entries[count].smio_low;
|
||||
}
|
||||
table->SmioMask2 = data->vddci_voltage_table.mask_low;
|
||||
table->SmioMask2 = data->mvdd_voltage_table.mask_low;
|
||||
|
||||
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
|
||||
}
|
||||
|
|
|
@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
|
|||
(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
|
||||
|
||||
if (0 != powerplay_table->usPPMTableOffset) {
|
||||
if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
|
||||
if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_EnablePlatformPowerManagement);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,8 @@ static int
|
|||
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
|
||||
{
|
||||
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
|
||||
nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
|
||||
const u32 soff = gf119_sor_soff(outp);
|
||||
nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
|
|||
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
|
||||
|
||||
sun4i_tcon_disable(drv->tcon);
|
||||
|
||||
if (crtc->state->event && !crtc->state->active) {
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
drm_crtc_send_vblank_event(crtc, crtc->state->event);
|
||||
spin_unlock_irq(&crtc->dev->event_lock);
|
||||
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void sun4i_crtc_enable(struct drm_crtc *crtc)
|
||||
|
|
|
@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
|
|||
/* Frame Buffer Operations */
|
||||
|
||||
/* VBlank Operations */
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.get_vblank_counter = drm_vblank_no_hw_counter,
|
||||
.enable_vblank = sun4i_drv_enable_vblank,
|
||||
.disable_vblank = sun4i_drv_disable_vblank,
|
||||
};
|
||||
|
@ -310,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
|
|||
|
||||
count += sun4i_drv_add_endpoints(&pdev->dev, &match,
|
||||
pipeline);
|
||||
of_node_put(pipeline);
|
||||
|
||||
DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
|
||||
count, i);
|
||||
|
|
|
@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Order is important here to make sure any unity map requirements are
|
||||
* fulfilled. The unity mappings are created and written to the device
|
||||
* table during the amd_iommu_init_api() call.
|
||||
*
|
||||
* After that we call init_device_table_dma() to make sure any
|
||||
* uninitialized DTE will block DMA, and in the end we flush the caches
|
||||
* of all IOMMUs to make sure the changes to the device table are
|
||||
* active.
|
||||
*/
|
||||
ret = amd_iommu_init_api();
|
||||
|
||||
init_device_table_dma();
|
||||
|
||||
for_each_iommu(iommu)
|
||||
iommu_flush_all_caches(iommu);
|
||||
|
||||
ret = amd_iommu_init_api();
|
||||
|
||||
if (!ret)
|
||||
print_iommu_info();
|
||||
|
||||
|
|
|
@ -4602,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
|
|||
for (i = 0; i < g_num_of_iommus; i++) {
|
||||
struct intel_iommu *iommu = g_iommus[i];
|
||||
struct dmar_domain *domain;
|
||||
u16 did;
|
||||
int did;
|
||||
|
||||
if (!iommu)
|
||||
continue;
|
||||
|
||||
for (did = 0; did < cap_ndoms(iommu->cap); did++) {
|
||||
domain = get_iommu_domain(iommu, did);
|
||||
domain = get_iommu_domain(iommu, (u16)did);
|
||||
|
||||
if (!domain)
|
||||
continue;
|
||||
|
|
|
@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
gic_map_to_pin(intr, gic_cpu_pin);
|
||||
gic_map_to_vpe(intr, vpe);
|
||||
gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
|
||||
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||
clear_bit(intr, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
|
||||
|
@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
|
|||
switch (bus_token) {
|
||||
case DOMAIN_BUS_IPI:
|
||||
is_ipi = d->bus_token == bus_token;
|
||||
return to_of_node(d->fwnode) == node && is_ipi;
|
||||
return (!node || to_of_node(d->fwnode) == node) && is_ipi;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
|
|
|
@ -101,11 +101,14 @@ enum ad_link_speed_type {
|
|||
#define MAC_ADDRESS_EQUAL(A, B) \
|
||||
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
|
||||
|
||||
static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
|
||||
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
|
||||
0, 0, 0, 0, 0, 0
|
||||
};
|
||||
static u16 ad_ticks_per_sec;
|
||||
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
|
||||
|
||||
static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
|
||||
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
|
||||
MULTICAST_LACPDU_ADDR;
|
||||
|
||||
/* ================= main 802.3ad protocol functions ================== */
|
||||
static int ad_lacpdu_send(struct port *port);
|
||||
|
@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
|
|||
aggregator->is_individual = false;
|
||||
aggregator->actor_admin_aggregator_key = 0;
|
||||
aggregator->actor_oper_aggregator_key = 0;
|
||||
aggregator->partner_system = null_mac_addr;
|
||||
eth_zero_addr(aggregator->partner_system.mac_addr_value);
|
||||
aggregator->partner_system_priority = 0;
|
||||
aggregator->partner_oper_aggregator_key = 0;
|
||||
aggregator->receive_state = 0;
|
||||
|
@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
|
|||
if (aggregator) {
|
||||
ad_clear_agg(aggregator);
|
||||
|
||||
aggregator->aggregator_mac_address = null_mac_addr;
|
||||
eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
|
||||
aggregator->aggregator_identifier = 0;
|
||||
aggregator->slave = NULL;
|
||||
}
|
||||
|
|
|
@ -42,13 +42,10 @@
|
|||
|
||||
|
||||
|
||||
#ifndef __long_aligned
|
||||
#define __long_aligned __attribute__((aligned((sizeof(long)))))
|
||||
#endif
|
||||
static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
|
||||
static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
||||
};
|
||||
static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
|
||||
static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
|
||||
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
|
||||
};
|
||||
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
|
||||
|
|
|
@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
}
|
||||
|
||||
/* check for initial state */
|
||||
new_slave->link = BOND_LINK_NOCHANGE;
|
||||
if (bond->params.miimon) {
|
||||
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
|
||||
if (bond->params.updelay) {
|
||||
|
|
|
@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
|
|||
else
|
||||
p = (char *)priv;
|
||||
p += s->stat_offset;
|
||||
data[i] = *(u32 *)p;
|
||||
data[i] = *(unsigned long *)p;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@
|
|||
#define __T4FW_VERSION_H__
|
||||
|
||||
#define T4FW_VERSION_MAJOR 0x01
|
||||
#define T4FW_VERSION_MINOR 0x0E
|
||||
#define T4FW_VERSION_MICRO 0x04
|
||||
#define T4FW_VERSION_MINOR 0x0F
|
||||
#define T4FW_VERSION_MICRO 0x25
|
||||
#define T4FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T4FW_MIN_VERSION_MAJOR 0x01
|
||||
|
@ -45,8 +45,8 @@
|
|||
#define T4FW_MIN_VERSION_MICRO 0x00
|
||||
|
||||
#define T5FW_VERSION_MAJOR 0x01
|
||||
#define T5FW_VERSION_MINOR 0x0E
|
||||
#define T5FW_VERSION_MICRO 0x04
|
||||
#define T5FW_VERSION_MINOR 0x0F
|
||||
#define T5FW_VERSION_MICRO 0x25
|
||||
#define T5FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T5FW_MIN_VERSION_MAJOR 0x00
|
||||
|
@ -54,8 +54,8 @@
|
|||
#define T5FW_MIN_VERSION_MICRO 0x00
|
||||
|
||||
#define T6FW_VERSION_MAJOR 0x01
|
||||
#define T6FW_VERSION_MINOR 0x0E
|
||||
#define T6FW_VERSION_MICRO 0x04
|
||||
#define T6FW_VERSION_MINOR 0x0F
|
||||
#define T6FW_VERSION_MICRO 0x25
|
||||
#define T6FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T6FW_MIN_VERSION_MAJOR 0x00
|
||||
|
|
|
@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
|
|||
writel(val, hw->hw_addr + reg);
|
||||
}
|
||||
|
||||
static bool e1000e_vlan_used(struct e1000_adapter *adapter)
|
||||
{
|
||||
u16 vid;
|
||||
|
||||
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_regdump - register printout routine
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
|
|||
|
||||
ew32(RCTL, rctl);
|
||||
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
|
||||
e1000e_vlan_used(adapter))
|
||||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
e1000e_vlan_strip_enable(adapter);
|
||||
else
|
||||
e1000e_vlan_strip_disable(adapter);
|
||||
|
@ -6926,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
|
|||
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
|
||||
features &= ~NETIF_F_RXFCS;
|
||||
|
||||
/* Since there is no support for separate Rx/Tx vlan accel
|
||||
* enable/disable make sure Tx flag is always in same state as Rx.
|
||||
*/
|
||||
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
else
|
||||
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
|
|||
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
|
||||
{
|
||||
struct ixgbe_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -IXGBE_ERR_MBX;
|
||||
s32 ret_val = IXGBE_ERR_MBX;
|
||||
|
||||
if (!mbx->ops.read)
|
||||
goto out;
|
||||
|
@ -111,7 +111,7 @@ out:
|
|||
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
|
||||
{
|
||||
struct ixgbe_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -IXGBE_ERR_MBX;
|
||||
s32 ret_val = IXGBE_ERR_MBX;
|
||||
|
||||
/* exit if either we can't write or there isn't a defined timeout */
|
||||
if (!mbx->ops.write || !mbx->timeout)
|
||||
|
|
|
@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
|
|||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
unregister_cpu_notifier(&pp->cpu_notifier);
|
||||
on_each_cpu(mvneta_percpu_disable, pp, true);
|
||||
free_percpu_irq(pp->dev->irq, pp->ports);
|
||||
err_cleanup_txqs:
|
||||
mvneta_cleanup_txqs(pp);
|
||||
|
|
|
@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
|
||||
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
|
||||
case MLX5_CMD_OP_2ERR_QP:
|
||||
case MLX5_CMD_OP_2RST_QP:
|
||||
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
|
||||
return MLX5_CMD_STAT_OK;
|
||||
|
||||
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
||||
|
@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
case MLX5_CMD_OP_SQERR2RTS_QP:
|
||||
case MLX5_CMD_OP_2ERR_QP:
|
||||
case MLX5_CMD_OP_2RST_QP:
|
||||
case MLX5_CMD_OP_QUERY_QP:
|
||||
case MLX5_CMD_OP_SQD_RTS_QP:
|
||||
case MLX5_CMD_OP_INIT2INIT_QP:
|
||||
|
@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
|
||||
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
|
||||
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
|
||||
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
|
||||
|
@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||
case MLX5_CMD_OP_CREATE_RQT:
|
||||
case MLX5_CMD_OP_MODIFY_RQT:
|
||||
case MLX5_CMD_OP_QUERY_RQT:
|
||||
|
||||
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
|
||||
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
|
||||
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
|
||||
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
|
||||
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
|
||||
|
||||
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
|
||||
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
|
||||
|
@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
|
|||
pr_debug("\n");
|
||||
}
|
||||
|
||||
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
||||
{
|
||||
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
|
||||
|
||||
return be16_to_cpu(hdr->opcode);
|
||||
}
|
||||
|
||||
static void cb_timeout_handler(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = container_of(work, struct delayed_work,
|
||||
work);
|
||||
struct mlx5_cmd_work_ent *ent = container_of(dwork,
|
||||
struct mlx5_cmd_work_ent,
|
||||
cb_timeout_work);
|
||||
struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
|
||||
cmd);
|
||||
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||
msg_to_opcode(ent->in));
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
|
||||
}
|
||||
|
||||
static void cmd_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
|
||||
struct mlx5_cmd *cmd = ent->cmd;
|
||||
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
|
||||
unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
|
||||
struct mlx5_cmd_layout *lay;
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
|
@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
dump_command(dev, ent, 1);
|
||||
ent->ts1 = ktime_get_ns();
|
||||
|
||||
if (ent->callback)
|
||||
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
|
||||
|
||||
/* ring doorbell after the descriptor is valid */
|
||||
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
|
||||
wmb();
|
||||
|
@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
|
|||
}
|
||||
}
|
||||
|
||||
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
||||
{
|
||||
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
|
||||
|
||||
return be16_to_cpu(hdr->opcode);
|
||||
}
|
||||
|
||||
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
|
||||
|
@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
|||
|
||||
if (cmd->mode == CMD_MODE_POLLING) {
|
||||
wait_for_completion(&ent->done);
|
||||
err = ent->ret;
|
||||
} else {
|
||||
if (!wait_for_completion_timeout(&ent->done, timeout))
|
||||
err = -ETIMEDOUT;
|
||||
else
|
||||
err = 0;
|
||||
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
|
||||
}
|
||||
|
||||
err = ent->ret;
|
||||
|
||||
if (err == -ETIMEDOUT) {
|
||||
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||
|
@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|||
if (!callback)
|
||||
init_completion(&ent->done);
|
||||
|
||||
INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
|
||||
INIT_WORK(&ent->work, cmd_work_handler);
|
||||
if (page_queue) {
|
||||
cmd_work_handler(&ent->work);
|
||||
|
@ -770,10 +796,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
if (!callback) {
|
||||
if (callback)
|
||||
goto out;
|
||||
|
||||
err = wait_func(dev, ent);
|
||||
if (err == -ETIMEDOUT)
|
||||
goto out;
|
||||
goto out_free;
|
||||
|
||||
ds = ent->ts2 - ent->ts1;
|
||||
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
|
||||
|
@ -788,10 +816,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|||
"fw exec time for %s is %lld nsec\n",
|
||||
mlx5_command_str(op), ds);
|
||||
*status = ent->status;
|
||||
free_cmd(ent);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
out_free:
|
||||
free_cmd(ent);
|
||||
|
@ -1181,41 +1205,30 @@ err_dbg:
|
|||
return err;
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
||||
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
down(&cmd->sem);
|
||||
|
||||
down(&cmd->pages_sem);
|
||||
|
||||
flush_workqueue(cmd->wq);
|
||||
|
||||
cmd->mode = CMD_MODE_EVENTS;
|
||||
cmd->mode = mode;
|
||||
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
|
||||
}
|
||||
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
down(&cmd->sem);
|
||||
|
||||
down(&cmd->pages_sem);
|
||||
|
||||
flush_workqueue(cmd->wq);
|
||||
cmd->mode = CMD_MODE_POLLING;
|
||||
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
|
||||
}
|
||||
|
||||
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
|
||||
|
@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
|
|||
struct semaphore *sem;
|
||||
|
||||
ent = cmd->ent_arr[i];
|
||||
if (ent->callback)
|
||||
cancel_delayed_work(&ent->cb_timeout_work);
|
||||
if (ent->page_queue)
|
||||
sem = &cmd->pages_sem;
|
||||
else
|
||||
|
|
|
@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
|
|||
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
|
||||
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
|
||||
#endif
|
||||
|
||||
struct mlx5e_params {
|
||||
|
@ -191,6 +190,7 @@ struct mlx5e_tstamp {
|
|||
enum {
|
||||
MLX5E_RQ_STATE_POST_WQES_ENABLE,
|
||||
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
|
||||
MLX5E_RQ_STATE_FLUSH_TIMEOUT,
|
||||
};
|
||||
|
||||
struct mlx5e_cq {
|
||||
|
@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
|
|||
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
|
||||
u16 ix);
|
||||
|
||||
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
|
||||
|
||||
struct mlx5e_dma_info {
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
|
@ -241,6 +243,7 @@ struct mlx5e_rq {
|
|||
struct mlx5e_cq cq;
|
||||
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
|
||||
mlx5e_fp_alloc_wqe alloc_wqe;
|
||||
mlx5e_fp_dealloc_wqe dealloc_wqe;
|
||||
|
||||
unsigned long state;
|
||||
int ix;
|
||||
|
@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
|
|||
enum {
|
||||
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
|
||||
MLX5E_SQ_STATE_BF_ENABLE,
|
||||
MLX5E_SQ_STATE_TX_TIMEOUT,
|
||||
};
|
||||
|
||||
struct mlx5e_ico_wqe_info {
|
||||
|
@ -538,6 +542,7 @@ struct mlx5e_priv {
|
|||
struct workqueue_struct *wq;
|
||||
struct work_struct update_carrier_work;
|
||||
struct work_struct set_rx_mode_work;
|
||||
struct work_struct tx_timeout_work;
|
||||
struct delayed_work update_stats_work;
|
||||
|
||||
struct mlx5_core_dev *mdev;
|
||||
|
@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
|||
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
||||
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
|
||||
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
|
||||
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
|
||||
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
|
||||
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
|
||||
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
|
||||
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
|
|
|
@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
|
|||
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
|
||||
tc_tx_bw[i] = ets->tc_tx_bw[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -140,9 +140,13 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
|
|||
|
||||
/* Validate Bandwidth Sum */
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
|
||||
if (!ets->tc_tx_bw[i])
|
||||
return -EINVAL;
|
||||
|
||||
bw_sum += ets->tc_tx_bw[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (bw_sum != 0 && bw_sum != 100)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -39,6 +39,13 @@
|
|||
#include "eswitch.h"
|
||||
#include "vxlan.h"
|
||||
|
||||
enum {
|
||||
MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
|
||||
MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
|
||||
MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
|
||||
MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
|
||||
};
|
||||
|
||||
struct mlx5e_rq_param {
|
||||
u32 rqc[MLX5_ST_SZ_DW(rqc)];
|
||||
struct mlx5_wq_param wq;
|
||||
|
@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
|
|||
port_state = mlx5_query_vport_state(mdev,
|
||||
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
|
||||
|
||||
if (port_state == VPORT_STATE_UP)
|
||||
if (port_state == VPORT_STATE_UP) {
|
||||
netdev_info(priv->netdev, "Link up\n");
|
||||
netif_carrier_on(priv->netdev);
|
||||
else
|
||||
} else {
|
||||
netdev_info(priv->netdev, "Link down\n");
|
||||
netif_carrier_off(priv->netdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_update_carrier_work(struct work_struct *work)
|
||||
|
@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
|
|||
mutex_unlock(&priv->state_lock);
|
||||
}
|
||||
|
||||
static void mlx5e_tx_timeout_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
|
||||
tx_timeout_work);
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
mutex_lock(&priv->state_lock);
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
goto unlock;
|
||||
mlx5e_close_locked(priv->netdev);
|
||||
err = mlx5e_open_locked(priv->netdev);
|
||||
if (err)
|
||||
netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
|
||||
err);
|
||||
unlock:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_sw_stats *s = &priv->stats.sw;
|
||||
|
@ -305,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||
}
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
|
||||
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
|
||||
|
||||
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
|
||||
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
|
||||
|
@ -320,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||
}
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
|
||||
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
|
||||
|
||||
rq->wqe_sz = (priv->params.lro_en) ?
|
||||
priv->params.lro_wqe_sz :
|
||||
|
@ -525,17 +557,25 @@ err_destroy_rq:
|
|||
|
||||
static void mlx5e_close_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
int tout = 0;
|
||||
int err;
|
||||
|
||||
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
|
||||
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
|
||||
|
||||
mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
|
||||
while (!mlx5_wq_ll_is_empty(&rq->wq))
|
||||
msleep(20);
|
||||
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
|
||||
while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
|
||||
tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
|
||||
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
|
||||
|
||||
if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
|
||||
set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
|
||||
|
||||
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
|
||||
napi_synchronize(&rq->channel->napi);
|
||||
|
||||
mlx5e_disable_rq(rq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
mlx5e_destroy_rq(rq);
|
||||
}
|
||||
|
||||
|
@ -782,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
|
|||
|
||||
static void mlx5e_close_sq(struct mlx5e_sq *sq)
|
||||
{
|
||||
int tout = 0;
|
||||
int err;
|
||||
|
||||
if (sq->txq) {
|
||||
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
|
||||
/* prevent netif_tx_wake_queue */
|
||||
|
@ -792,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
|
|||
if (mlx5e_sq_has_room_for(sq, 1))
|
||||
mlx5e_send_nop(sq, true);
|
||||
|
||||
mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
|
||||
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_ERR);
|
||||
if (err)
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
}
|
||||
|
||||
while (sq->cc != sq->pc) /* wait till sq is empty */
|
||||
msleep(20);
|
||||
/* wait till sq is empty, unless a TX timeout occurred on this SQ */
|
||||
while (sq->cc != sq->pc &&
|
||||
!test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
|
||||
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
|
||||
if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
}
|
||||
|
||||
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
|
||||
napi_synchronize(&sq->channel->napi);
|
||||
|
||||
mlx5e_free_tx_descs(sq);
|
||||
mlx5e_disable_sq(sq);
|
||||
mlx5e_destroy_sq(sq);
|
||||
}
|
||||
|
@ -1658,8 +1710,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
|
|||
|
||||
netdev_set_num_tc(netdev, ntc);
|
||||
|
||||
/* Map netdev TCs to offset 0
|
||||
* We have our own UP to TXQ mapping for QoS
|
||||
*/
|
||||
for (tc = 0; tc < ntc; tc++)
|
||||
netdev_set_tc_queue(netdev, tc, nch, tc * nch);
|
||||
netdev_set_tc_queue(netdev, tc, nch, 0);
|
||||
}
|
||||
|
||||
int mlx5e_open_locked(struct net_device *netdev)
|
||||
|
@ -2590,6 +2645,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
|||
return features;
|
||||
}
|
||||
|
||||
static void mlx5e_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
bool sched_work = false;
|
||||
int i;
|
||||
|
||||
netdev_err(dev, "TX timeout detected\n");
|
||||
|
||||
for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
|
||||
struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
|
||||
|
||||
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
||||
continue;
|
||||
sched_work = true;
|
||||
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
|
||||
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
|
||||
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
|
||||
}
|
||||
|
||||
if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
schedule_work(&priv->tx_timeout_work);
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_basic = {
|
||||
.ndo_open = mlx5e_open,
|
||||
.ndo_stop = mlx5e_close,
|
||||
|
@ -2607,6 +2685,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
|
|||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
|
||||
#endif
|
||||
.ndo_tx_timeout = mlx5e_tx_timeout,
|
||||
};
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
|
||||
|
@ -2636,6 +2715,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
|
|||
.ndo_get_vf_config = mlx5e_get_vf_config,
|
||||
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
|
||||
.ndo_get_vf_stats = mlx5e_get_vf_stats,
|
||||
.ndo_tx_timeout = mlx5e_tx_timeout,
|
||||
};
|
||||
|
||||
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||
|
@ -2838,6 +2918,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
|
|||
|
||||
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
|
||||
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
|
||||
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
|
||||
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
|
||||
}
|
||||
|
||||
|
|
|
@ -212,6 +212,20 @@ err_free_skb:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
|
||||
{
|
||||
struct sk_buff *skb = rq->skb[ix];
|
||||
|
||||
if (skb) {
|
||||
rq->skb[ix] = NULL;
|
||||
dma_unmap_single(rq->pdev,
|
||||
*((dma_addr_t *)skb->cb),
|
||||
rq->wqe_sz,
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
|
||||
{
|
||||
return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
|
||||
|
@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
{
|
||||
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
|
||||
|
||||
wi->free_wqe(rq, wi);
|
||||
}
|
||||
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
__be16 wqe_ix_be;
|
||||
u16 wqe_ix;
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
wqe_ix_be = *wq->tail_next;
|
||||
wqe_ix = be16_to_cpu(wqe_ix_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
}
|
||||
|
||||
#define RQ_CANNOT_POST(rq) \
|
||||
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
|
||||
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
|
||||
|
@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
|||
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
|
||||
int work_done = 0;
|
||||
|
||||
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
|
||||
return 0;
|
||||
|
||||
if (cq->decmprs_left)
|
||||
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
|
||||
|
||||
|
|
|
@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int channel_ix = fallback(dev, skb);
|
||||
int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
|
||||
skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
|
||||
int up = 0;
|
||||
|
||||
if (!netdev_get_num_tc(dev))
|
||||
return channel_ix;
|
||||
|
||||
if (skb_vlan_tag_present(skb))
|
||||
up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
|
||||
|
||||
/* channel_ix can be larger than num_channels since
|
||||
* dev->num_real_tx_queues = num_channels * num_tc
|
||||
*/
|
||||
if (channel_ix >= priv->params.num_channels)
|
||||
channel_ix = reciprocal_scale(channel_ix,
|
||||
priv->params.num_channels);
|
||||
|
||||
return priv->channeltc_to_txq_map[channel_ix][up];
|
||||
}
|
||||
|
@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
|||
* headers and occur before the data gather.
|
||||
* Therefore these headers must be copied into the WQE
|
||||
*/
|
||||
#define MLX5E_MIN_INLINE ETH_HLEN
|
||||
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
|
||||
|
||||
if (bf) {
|
||||
u16 ihs = skb_headlen(skb);
|
||||
|
@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
|||
return skb_headlen(skb);
|
||||
}
|
||||
|
||||
return MLX5E_MIN_INLINE;
|
||||
return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
|
||||
}
|
||||
|
||||
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
|
||||
|
@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return mlx5e_sq_xmit(sq, skb);
|
||||
}
|
||||
|
||||
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
struct sk_buff *skb;
|
||||
u16 ci;
|
||||
int i;
|
||||
|
||||
while (sq->cc != sq->pc) {
|
||||
ci = sq->cc & sq->wq.sz_m1;
|
||||
skb = sq->skb[ci];
|
||||
wi = &sq->wqe_info[ci];
|
||||
|
||||
if (!skb) { /* nop */
|
||||
sq->cc++;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < wi->num_dma; i++) {
|
||||
struct mlx5e_sq_dma *dma =
|
||||
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
|
||||
|
||||
mlx5e_tx_dma_unmap(sq->pdev, dma);
|
||||
}
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
sq->cc += wi->num_wqebbs;
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_sq *sq;
|
||||
|
@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
|
||||
sq = container_of(cq, struct mlx5e_sq, cq);
|
||||
|
||||
if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
|
||||
return false;
|
||||
|
||||
npkts = 0;
|
||||
nbytes = 0;
|
||||
|
||||
|
|
|
@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
|
|||
|
||||
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
return;
|
||||
goto unlock;
|
||||
|
||||
mlx5_core_err(dev, "start\n");
|
||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev))
|
||||
if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
trigger_cmd_completions(dev);
|
||||
}
|
||||
|
||||
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
|
||||
mlx5_core_err(dev, "end\n");
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
}
|
||||
|
||||
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
|
||||
|
@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
|
|||
u32 count;
|
||||
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
trigger_cmd_completions(dev);
|
||||
mod_timer(&health->timer, get_next_poll_jiffies());
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
|
|||
mlx5_pci_err_detected(dev->pdev, 0);
|
||||
}
|
||||
|
||||
/* wait for the device to show vital signs. For now we check
|
||||
* that we can read the device ID and that the health buffer
|
||||
* shows a non zero value which is different than 0xffffffff
|
||||
/* wait for the device to show vital signs by waiting
|
||||
* for the health counter to start counting.
|
||||
*/
|
||||
static void wait_vital(struct pci_dev *pdev)
|
||||
static int wait_vital(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
const int niter = 100;
|
||||
u32 last_count = 0;
|
||||
u32 count;
|
||||
u16 did;
|
||||
int i;
|
||||
|
||||
/* Wait for firmware to be ready after reset */
|
||||
msleep(1000);
|
||||
for (i = 0; i < niter; i++) {
|
||||
if (pci_read_config_word(pdev, 2, &did)) {
|
||||
dev_warn(&pdev->dev, "failed reading config word\n");
|
||||
break;
|
||||
}
|
||||
if (did == pdev->device) {
|
||||
dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
|
||||
break;
|
||||
}
|
||||
msleep(50);
|
||||
}
|
||||
if (i == niter)
|
||||
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
|
||||
|
||||
for (i = 0; i < niter; i++) {
|
||||
count = ioread32be(health->health_counter);
|
||||
if (count && count != 0xffffffff) {
|
||||
if (last_count && last_count != count) {
|
||||
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
|
||||
break;
|
||||
return 0;
|
||||
}
|
||||
last_count = count;
|
||||
}
|
||||
msleep(50);
|
||||
}
|
||||
|
||||
if (i == niter)
|
||||
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void mlx5_pci_resume(struct pci_dev *pdev)
|
||||
|
@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
|
|||
dev_info(&pdev->dev, "%s was called\n", __func__);
|
||||
|
||||
pci_save_state(pdev);
|
||||
wait_vital(pdev);
|
||||
err = wait_vital(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
err = mlx5_load_one(dev, priv);
|
||||
if (err)
|
||||
|
|
|
@ -345,7 +345,6 @@ retry:
|
|||
func_id, npages, err);
|
||||
goto out_4k;
|
||||
}
|
||||
dev->priv.fw_pages += npages;
|
||||
|
||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
||||
if (err) {
|
||||
|
@ -373,6 +372,33 @@ out_free:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_manage_pages_inbox *in, int in_size,
|
||||
struct mlx5_manage_pages_outbox *out, int out_size)
|
||||
{
|
||||
struct fw_page *fwp;
|
||||
struct rb_node *p;
|
||||
u32 npages;
|
||||
u32 i = 0;
|
||||
|
||||
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
||||
return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
|
||||
(u32 *)out, out_size);
|
||||
|
||||
npages = be32_to_cpu(in->num_entries);
|
||||
|
||||
p = rb_first(&dev->priv.page_root);
|
||||
while (p && i < npages) {
|
||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||
out->pas[i] = cpu_to_be64(fwp->addr);
|
||||
p = rb_next(p);
|
||||
i++;
|
||||
}
|
||||
|
||||
out->num_entries = cpu_to_be32(i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
int *nclaimed)
|
||||
{
|
||||
|
@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
|||
in.func_id = cpu_to_be16(func_id);
|
||||
in.num_entries = cpu_to_be32(npages);
|
||||
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "failed reclaiming pages\n");
|
||||
goto out_free;
|
||||
}
|
||||
dev->priv.fw_pages -= npages;
|
||||
|
||||
if (out->hdr.status) {
|
||||
err = mlx5_cmd_status_to_err(&out->hdr);
|
||||
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
|||
err = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
for (i = 0; i < num_claimed; i++) {
|
||||
addr = be64_to_cpu(out->pas[i]);
|
||||
free_4k(dev, addr);
|
||||
}
|
||||
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
dev->priv.fw_pages -= num_claimed;
|
||||
if (func_id)
|
||||
dev->priv.vfs_pages -= num_claimed;
|
||||
|
@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
|
|||
p = rb_first(&dev->priv.page_root);
|
||||
if (p) {
|
||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
free_4k(dev, fwp->addr);
|
||||
nclaimed = 1;
|
||||
} else {
|
||||
err = reclaim_pages(dev, fwp->func_id,
|
||||
optimal_reclaimed_pages(),
|
||||
&nclaimed);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
|
||||
err);
|
||||
|
@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
|
|||
}
|
||||
} while (p);
|
||||
|
||||
WARN(dev->priv.fw_pages,
|
||||
"FW pages counter is %d after reclaiming all pages\n",
|
||||
dev->priv.fw_pages);
|
||||
WARN(dev->priv.vfs_pages,
|
||||
"VFs FW pages counter is %d after reclaiming all pages\n",
|
||||
dev->priv.vfs_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
|||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
|
||||
void *nic_vport_context;
|
||||
u8 *guid;
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
|
@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
|||
|
||||
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
|
||||
in, nic_vport_context);
|
||||
guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
|
||||
node_guid);
|
||||
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
|
|
@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
|
|||
enc28j60_phy_read(priv, PHIR);
|
||||
}
|
||||
/* TX complete handler */
|
||||
if ((intflags & EIR_TXIF) != 0) {
|
||||
if (((intflags & EIR_TXIF) != 0) &&
|
||||
((intflags & EIR_TXERIF) == 0)) {
|
||||
bool err = false;
|
||||
loop++;
|
||||
if (netif_msg_intr(priv))
|
||||
|
@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
|
|||
enc28j60_tx_clear(ndev, true);
|
||||
} else
|
||||
enc28j60_tx_clear(ndev, true);
|
||||
locked_reg_bfclr(priv, EIR, EIR_TXERIF);
|
||||
locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
|
||||
}
|
||||
/* RX Error handler */
|
||||
if ((intflags & EIR_RXERIF) != 0) {
|
||||
|
@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
|
|||
*/
|
||||
static void enc28j60_hw_tx(struct enc28j60_net *priv)
|
||||
{
|
||||
BUG_ON(!priv->tx_skb);
|
||||
|
||||
if (netif_msg_tx_queued(priv))
|
||||
printk(KERN_DEBUG DRV_NAME
|
||||
": Tx Packet Len:%d\n", priv->tx_skb->len);
|
||||
|
|
|
@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
tx_ring->tx_stats.tx_bytes += skb->len;
|
||||
tx_ring->tx_stats.xmit_called++;
|
||||
|
||||
/* Ensure writes are complete before HW fetches Tx descriptors */
|
||||
wmb();
|
||||
qlcnic_update_cmd_producer(tx_ring);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
|
|
@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
|||
priv->tx_path_in_lpi_mode = true;
|
||||
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
|
||||
priv->tx_path_in_lpi_mode = false;
|
||||
if (status & CORE_IRQ_MTL_RX_OVERFLOW)
|
||||
if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
|
||||
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
|
||||
priv->rx_tail_addr,
|
||||
STMMAC_CHAN0);
|
||||
|
|
|
@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
|
||||
{
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
/* The max_mtu calculation does not take account of GENEVE
|
||||
* options, to avoid excluding potentially valid
|
||||
* configurations.
|
||||
*/
|
||||
int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
|
||||
- dev->hard_header_len;
|
||||
int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
|
||||
|
||||
if (geneve->remote.sa.sa_family == AF_INET6)
|
||||
max_mtu -= sizeof(struct ipv6hdr);
|
||||
else
|
||||
max_mtu -= sizeof(struct iphdr);
|
||||
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
|
|||
u64_stats_update_begin(&secy_stats->syncp);
|
||||
secy_stats->stats.OutPktsUntagged++;
|
||||
u64_stats_update_end(&secy_stats->syncp);
|
||||
skb->dev = macsec->real_dev;
|
||||
len = skb->len;
|
||||
ret = dev_queue_xmit(skb);
|
||||
count_tx(dev, ret, len);
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
|
||||
/* PHY CTRL bits */
|
||||
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
|
||||
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
|
||||
|
||||
/* RGMIIDCTL bits */
|
||||
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
|
||||
|
@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
|
|||
static int dp83867_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct dp83867_private *dp83867;
|
||||
int ret;
|
||||
u16 val, delay;
|
||||
int ret, val;
|
||||
u16 delay;
|
||||
|
||||
if (!phydev->priv) {
|
||||
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
|
||||
|
@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||
}
|
||||
|
||||
if (phy_interface_is_rgmii(phydev)) {
|
||||
ret = phy_write(phydev, MII_DP83867_PHYCTRL,
|
||||
(dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
|
||||
val = phy_read(phydev, MII_DP83867_PHYCTRL);
|
||||
if (val < 0)
|
||||
return val;
|
||||
val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
|
||||
val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
|
||||
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
|
|||
if (cdc_ncm_init(dev))
|
||||
goto error2;
|
||||
|
||||
/* Some firmwares need a pause here or they will silently fail
|
||||
* to set up the interface properly. This value was decided
|
||||
* empirically on a Sierra Wireless MC7455 running 02.08.02.00
|
||||
* firmware.
|
||||
*/
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* configure data interface */
|
||||
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
|
||||
if (temp) {
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#define NETNEXT_VERSION "08"
|
||||
|
||||
/* Information for net */
|
||||
#define NET_VERSION "4"
|
||||
#define NET_VERSION "5"
|
||||
|
||||
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
|
||||
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
|
||||
|
@ -624,6 +624,7 @@ struct r8152 {
|
|||
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
|
||||
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
|
||||
bool (*in_nway)(struct r8152 *);
|
||||
void (*autosuspend_en)(struct r8152 *tp, bool enable);
|
||||
} rtl_ops;
|
||||
|
||||
int intr_interval;
|
||||
|
@ -2408,9 +2409,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
|
|||
if (enable) {
|
||||
u32 ocp_data;
|
||||
|
||||
r8153_u1u2en(tp, false);
|
||||
r8153_u2p3en(tp, false);
|
||||
|
||||
__rtl_set_wol(tp, WAKE_ANY);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
|
||||
|
@ -2421,7 +2419,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
|
|||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
|
||||
} else {
|
||||
u32 ocp_data;
|
||||
|
||||
__rtl_set_wol(tp, tp->saved_wolopts);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
|
||||
|
||||
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
|
||||
ocp_data &= ~LINK_OFF_WAKE_EN;
|
||||
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
|
||||
|
||||
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
|
||||
{
|
||||
rtl_runtime_suspend_enable(tp, enable);
|
||||
|
||||
if (enable) {
|
||||
r8153_u1u2en(tp, false);
|
||||
r8153_u2p3en(tp, false);
|
||||
} else {
|
||||
r8153_u2p3en(tp, true);
|
||||
r8153_u1u2en(tp, true);
|
||||
}
|
||||
|
@ -3512,7 +3531,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
|
|||
napi_disable(&tp->napi);
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
rtl_stop_rx(tp);
|
||||
rtl_runtime_suspend_enable(tp, true);
|
||||
tp->rtl_ops.autosuspend_en(tp, true);
|
||||
} else {
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
tp->rtl_ops.down(tp);
|
||||
|
@ -3538,7 +3557,7 @@ static int rtl8152_resume(struct usb_interface *intf)
|
|||
|
||||
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
tp->rtl_ops.autosuspend_en(tp, false);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
napi_disable(&tp->napi);
|
||||
set_bit(WORK_ENABLE, &tp->flags);
|
||||
|
@ -3557,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf)
|
|||
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
|
||||
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
if (tp->netdev->flags & IFF_UP)
|
||||
rtl_runtime_suspend_enable(tp, false);
|
||||
tp->rtl_ops.autosuspend_en(tp, false);
|
||||
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
}
|
||||
|
||||
|
@ -4137,6 +4156,7 @@ static int rtl_ops_init(struct r8152 *tp)
|
|||
ops->eee_get = r8152_get_eee;
|
||||
ops->eee_set = r8152_set_eee;
|
||||
ops->in_nway = rtl8152_in_nway;
|
||||
ops->autosuspend_en = rtl_runtime_suspend_enable;
|
||||
break;
|
||||
|
||||
case RTL_VER_03:
|
||||
|
@ -4152,6 +4172,7 @@ static int rtl_ops_init(struct r8152 *tp)
|
|||
ops->eee_get = r8153_get_eee;
|
||||
ops->eee_set = r8153_set_eee;
|
||||
ops->in_nway = rtl8153_in_nway;
|
||||
ops->autosuspend_en = rtl8153_runtime_enable;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
|
|||
dev->hard_mtu = net->mtu + net->hard_header_len;
|
||||
if (dev->rx_urb_size == old_hard_mtu) {
|
||||
dev->rx_urb_size = dev->hard_mtu;
|
||||
if (dev->rx_urb_size > old_rx_urb_size)
|
||||
if (dev->rx_urb_size > old_rx_urb_size) {
|
||||
usbnet_pause_rx(dev);
|
||||
usbnet_unlink_rx_urbs(dev);
|
||||
usbnet_resume_rx(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/* max qlen depend on hard_mtu and rx_urb_size */
|
||||
|
@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
|
|||
} else if (netif_running (dev->net) &&
|
||||
netif_device_present (dev->net) &&
|
||||
netif_carrier_ok(dev->net) &&
|
||||
!timer_pending (&dev->delay) &&
|
||||
!test_bit (EVENT_RX_HALT, &dev->flags)) {
|
||||
!timer_pending(&dev->delay) &&
|
||||
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
|
||||
!test_bit(EVENT_RX_HALT, &dev->flags)) {
|
||||
int temp = dev->rxq.qlen;
|
||||
|
||||
if (temp < RX_QLEN(dev)) {
|
||||
|
|
|
@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
|
|||
goto exit;
|
||||
}
|
||||
|
||||
if (u_cmd.outsize != s_cmd->outsize ||
|
||||
u_cmd.insize != s_cmd->insize) {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
s_cmd->command += ec->cmd_offset;
|
||||
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
|
||||
/* Only copy data to userland if data was received. */
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
|
||||
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
|
||||
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
|
||||
ret = -EFAULT;
|
||||
exit:
|
||||
kfree(s_cmd);
|
||||
|
|
|
@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
|||
qeth_l2_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
|
|
@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
|||
qeth_l3_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
|
|
@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
|
|||
ioa_cfg->intr_flag = IPR_USE_MSI;
|
||||
else {
|
||||
ioa_cfg->intr_flag = IPR_USE_LSI;
|
||||
ioa_cfg->clear_isr = 1;
|
||||
ioa_cfg->nvectors = 1;
|
||||
dev_info(&pdev->dev, "Cannot enable MSI.\n");
|
||||
}
|
||||
|
|
|
@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
|||
if (!vha->flags.online)
|
||||
return;
|
||||
|
||||
if (rsp->msix->cpuid != smp_processor_id()) {
|
||||
if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
|
||||
/* if kernel does not notify qla of IRQ's CPU change,
|
||||
* then set it here.
|
||||
*/
|
||||
|
|
|
@ -429,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
|
|||
* here, and we don't know what device it is
|
||||
* trying to work with, leave it as-is.
|
||||
*/
|
||||
vmax = 8; /* max length of vendor */
|
||||
vmax = sizeof(devinfo->vendor);
|
||||
vskip = vendor;
|
||||
while (vmax > 0 && *vskip == ' ') {
|
||||
vmax--;
|
||||
|
@ -439,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
|
|||
while (vmax > 0 && vskip[vmax - 1] == ' ')
|
||||
--vmax;
|
||||
|
||||
mmax = 16; /* max length of model */
|
||||
mmax = sizeof(devinfo->model);
|
||||
mskip = model;
|
||||
while (mmax > 0 && *mskip == ' ') {
|
||||
mmax--;
|
||||
|
@ -455,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
|
|||
* Behave like the older version of get_device_flags.
|
||||
*/
|
||||
if (memcmp(devinfo->vendor, vskip, vmax) ||
|
||||
devinfo->vendor[vmax])
|
||||
(vmax < sizeof(devinfo->vendor) &&
|
||||
devinfo->vendor[vmax]))
|
||||
continue;
|
||||
if (memcmp(devinfo->model, mskip, mmax) ||
|
||||
devinfo->model[mmax])
|
||||
(mmax < sizeof(devinfo->model) &&
|
||||
devinfo->model[mmax]))
|
||||
continue;
|
||||
return devinfo;
|
||||
} else {
|
||||
|
|
|
@ -423,36 +423,7 @@ upload:
|
|||
|
||||
return 0;
|
||||
}
|
||||
static int __init check_prereq(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (!xen_initial_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (!acpi_gbl_FADT.smi_command)
|
||||
return -ENODEV;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
||||
if (!cpu_has(c, X86_FEATURE_EST))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
/* Copied from powernow-k8.h, can't include ../cpufreq/powernow
|
||||
* as we get compile warnings for the static functions.
|
||||
*/
|
||||
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
|
||||
#define USE_HW_PSTATE 0x00000080
|
||||
u32 eax, ebx, ecx, edx;
|
||||
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
|
||||
if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
/* acpi_perf_data is a pointer to percpu data. */
|
||||
static struct acpi_processor_performance __percpu *acpi_perf_data;
|
||||
|
||||
|
@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
|
|||
static int __init xen_acpi_processor_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int rc = check_prereq();
|
||||
int rc;
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
if (!xen_initial_domain())
|
||||
return -ENODEV;
|
||||
|
||||
nr_acpi_bits = get_max_acpi_id() + 1;
|
||||
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
|
||||
|
|
|
@ -316,10 +316,17 @@ static int xenbus_write_transaction(unsigned msg_type,
|
|||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry(trans, &u->transactions, list)
|
||||
if (trans->handle.id == u->u.msg.tx_id)
|
||||
break;
|
||||
if (&trans->list == &u->transactions)
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
reply = xenbus_dev_request_and_reply(&u->u.msg);
|
||||
if (IS_ERR(reply)) {
|
||||
if (msg_type == XS_TRANSACTION_START)
|
||||
kfree(trans);
|
||||
rc = PTR_ERR(reply);
|
||||
goto out;
|
||||
|
@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
|
|||
list_add(&trans->list, &u->transactions);
|
||||
}
|
||||
} else if (u->u.msg.type == XS_TRANSACTION_END) {
|
||||
list_for_each_entry(trans, &u->transactions, list)
|
||||
if (trans->handle.id == u->u.msg.tx_id)
|
||||
break;
|
||||
BUG_ON(&trans->list == &u->transactions);
|
||||
list_del(&trans->list);
|
||||
|
||||
kfree(trans);
|
||||
}
|
||||
|
||||
|
|
|
@ -232,10 +232,10 @@ static void transaction_resume(void)
|
|||
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
|
||||
{
|
||||
void *ret;
|
||||
struct xsd_sockmsg req_msg = *msg;
|
||||
enum xsd_sockmsg_type type = msg->type;
|
||||
int err;
|
||||
|
||||
if (req_msg.type == XS_TRANSACTION_START)
|
||||
if (type == XS_TRANSACTION_START)
|
||||
transaction_start();
|
||||
|
||||
mutex_lock(&xs_state.request_mutex);
|
||||
|
@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
|
|||
|
||||
mutex_unlock(&xs_state.request_mutex);
|
||||
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
if ((msg->type == XS_TRANSACTION_END) ||
|
||||
((req_msg.type == XS_TRANSACTION_START) &&
|
||||
(msg->type == XS_ERROR)))
|
||||
((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
|
||||
transaction_end();
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -357,8 +357,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
|
|||
|
||||
len = simple_write_to_buffer(buffer->bin_buffer,
|
||||
buffer->bin_buffer_size, ppos, buf, count);
|
||||
if (len > 0)
|
||||
*ppos += len;
|
||||
out:
|
||||
mutex_unlock(&buffer->mutex);
|
||||
return len;
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
* ecryptfs_to_hex
|
||||
* @dst: Buffer to take hex character representation of contents of
|
||||
* src; must be at least of size (src_size * 2)
|
||||
* @src: Buffer to be converted to a hex string respresentation
|
||||
* @src: Buffer to be converted to a hex string representation
|
||||
* @src_size: number of bytes to convert
|
||||
*/
|
||||
void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
|
||||
|
@ -60,7 +60,7 @@ void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
|
|||
* ecryptfs_from_hex
|
||||
* @dst: Buffer to take the bytes from src hex; must be at least of
|
||||
* size (src_size / 2)
|
||||
* @src: Buffer to be converted from a hex string respresentation to raw value
|
||||
* @src: Buffer to be converted from a hex string representation to raw value
|
||||
* @dst_size: size of dst buffer, or number of hex characters pairs to convert
|
||||
*/
|
||||
void ecryptfs_from_hex(char *dst, char *src, int dst_size)
|
||||
|
@ -953,7 +953,7 @@ struct ecryptfs_cipher_code_str_map_elem {
|
|||
};
|
||||
|
||||
/* Add support for additional ciphers by adding elements here. The
|
||||
* cipher_code is whatever OpenPGP applicatoins use to identify the
|
||||
* cipher_code is whatever OpenPGP applications use to identify the
|
||||
* ciphers. List in order of probability. */
|
||||
static struct ecryptfs_cipher_code_str_map_elem
|
||||
ecryptfs_cipher_code_str_map[] = {
|
||||
|
@ -1410,7 +1410,7 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
|
|||
*
|
||||
* Common entry point for reading file metadata. From here, we could
|
||||
* retrieve the header information from the header region of the file,
|
||||
* the xattr region of the file, or some other repostory that is
|
||||
* the xattr region of the file, or some other repository that is
|
||||
* stored separately from the file itself. The current implementation
|
||||
* supports retrieving the metadata information from the file contents
|
||||
* and from the xattr region.
|
||||
|
|
|
@ -169,9 +169,22 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct file *lower_file = ecryptfs_file_to_lower(file);
|
||||
/*
|
||||
* Don't allow mmap on top of file systems that don't support it
|
||||
* natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
|
||||
* allows recursive mounting, this will need to be extended.
|
||||
*/
|
||||
if (!lower_file->f_op->mmap)
|
||||
return -ENODEV;
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* ecryptfs_open
|
||||
* @inode: inode speciying file to open
|
||||
* @inode: inode specifying file to open
|
||||
* @file: Structure to return filled in
|
||||
*
|
||||
* Opens the file specified by inode.
|
||||
|
@ -240,7 +253,7 @@ out:
|
|||
|
||||
/**
|
||||
* ecryptfs_dir_open
|
||||
* @inode: inode speciying file to open
|
||||
* @inode: inode specifying file to open
|
||||
* @file: Structure to return filled in
|
||||
*
|
||||
* Opens the file specified by inode.
|
||||
|
@ -403,7 +416,7 @@ const struct file_operations ecryptfs_main_fops = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ecryptfs_compat_ioctl,
|
||||
#endif
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap = ecryptfs_mmap,
|
||||
.open = ecryptfs_open,
|
||||
.flush = ecryptfs_flush,
|
||||
.release = ecryptfs_release,
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/file.h>
|
||||
#include "ecryptfs_kernel.h"
|
||||
|
||||
struct ecryptfs_open_req {
|
||||
|
@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
|
|||
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
|
||||
(*lower_file) = dentry_open(&req.path, flags, cred);
|
||||
if (!IS_ERR(*lower_file))
|
||||
goto have_file;
|
||||
goto out;
|
||||
if ((flags & O_ACCMODE) == O_RDONLY) {
|
||||
rc = PTR_ERR((*lower_file));
|
||||
goto out;
|
||||
|
@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
|
|||
mutex_unlock(&ecryptfs_kthread_ctl.mux);
|
||||
wake_up(&ecryptfs_kthread_ctl.wait);
|
||||
wait_for_completion(&req.done);
|
||||
if (IS_ERR(*lower_file)) {
|
||||
if (IS_ERR(*lower_file))
|
||||
rc = PTR_ERR(*lower_file);
|
||||
goto out;
|
||||
}
|
||||
have_file:
|
||||
if ((*lower_file)->f_op->mmap == NULL) {
|
||||
fput(*lower_file);
|
||||
*lower_file = NULL;
|
||||
rc = -EMEDIUMTYPE;
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -738,7 +738,6 @@ static void ecryptfs_free_kmem_caches(void)
|
|||
struct ecryptfs_cache_info *info;
|
||||
|
||||
info = &ecryptfs_cache_infos[i];
|
||||
if (*(info->cache))
|
||||
kmem_cache_destroy(*(info->cache));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -483,9 +483,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
|
|||
goto out_free;
|
||||
}
|
||||
inode->i_state |= I_WB_SWITCH;
|
||||
__iget(inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
ihold(inode);
|
||||
isw->inode = inode;
|
||||
|
||||
atomic_inc(&isw_nr_in_flight);
|
||||
|
|
|
@ -78,6 +78,7 @@
|
|||
|
||||
/* ACPI PCI Interrupt Link (pci_link.c) */
|
||||
|
||||
int acpi_irq_penalty_init(void);
|
||||
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
|
||||
int *polarity, char **name);
|
||||
int acpi_pci_link_free_irq(acpi_handle handle);
|
||||
|
|
|
@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent {
|
|||
void *uout;
|
||||
int uout_size;
|
||||
mlx5_cmd_cbk_t callback;
|
||||
struct delayed_work cb_timeout_work;
|
||||
void *context;
|
||||
int idx;
|
||||
struct completion done;
|
||||
|
|
|
@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
|
|||
}
|
||||
|
||||
void __skb_get_hash(struct sk_buff *skb);
|
||||
u32 __skb_get_hash_symmetric(struct sk_buff *skb);
|
||||
u32 skb_get_poff(const struct sk_buff *skb);
|
||||
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
|
||||
const struct flow_keys *keys, int hlen);
|
||||
|
@ -2869,6 +2870,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
|
|||
skb->csum = csum_partial(start, len, skb->csum);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_push_rcsum - push skb and update receive checksum
|
||||
* @skb: buffer to update
|
||||
* @len: length of data pulled
|
||||
*
|
||||
* This function performs an skb_push on the packet and updates
|
||||
* the CHECKSUM_COMPLETE checksum. It should be used on
|
||||
* receive path processing instead of skb_push unless you know
|
||||
* that the checksum difference is zero (e.g., a valid IP header)
|
||||
* or you are setting ip_summed to CHECKSUM_NONE.
|
||||
*/
|
||||
static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
|
||||
unsigned int len)
|
||||
{
|
||||
skb_push(skb, len);
|
||||
skb_postpush_rcsum(skb, skb->data, len);
|
||||
return skb->data;
|
||||
}
|
||||
|
||||
/**
|
||||
* pskb_trim_rcsum - trim received skb and update checksum
|
||||
* @skb: buffer to trim
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
|
||||
#define BOND_DEFAULT_MIIMON 100
|
||||
|
||||
#ifndef __long_aligned
|
||||
#define __long_aligned __attribute__((aligned((sizeof(long)))))
|
||||
#endif
|
||||
/*
|
||||
* Less bad way to call ioctl from within the kernel; this needs to be
|
||||
* done some other way to get the call out of interrupt context.
|
||||
|
@ -138,7 +141,9 @@ struct bond_params {
|
|||
struct reciprocal_value reciprocal_packets_per_slave;
|
||||
u16 ad_actor_sys_prio;
|
||||
u16 ad_user_port_key;
|
||||
u8 ad_actor_system[ETH_ALEN];
|
||||
|
||||
/* 2 bytes of padding : see ether_addr_equal_64bits() */
|
||||
u8 ad_actor_system[ETH_ALEN + 2];
|
||||
};
|
||||
|
||||
struct bond_parm_tbl {
|
||||
|
|
|
@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
|
|||
return min(dst->dev->mtu, IP_MAX_MTU);
|
||||
}
|
||||
|
||||
static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
|
||||
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
|
||||
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
|
||||
|
||||
|
|
|
@ -1458,6 +1458,7 @@ config KALLSYMS_ALL
|
|||
|
||||
config KALLSYMS_ABSOLUTE_PERCPU
|
||||
bool
|
||||
depends on KALLSYMS
|
||||
default X86_64 && SMP
|
||||
|
||||
config KALLSYMS_BASE_RELATIVE
|
||||
|
|
|
@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
|
|||
return event->state == PERF_EVENT_STATE_DEAD;
|
||||
}
|
||||
|
||||
static inline int pmu_filter_match(struct perf_event *event)
|
||||
static inline int __pmu_filter_match(struct perf_event *event)
|
||||
{
|
||||
struct pmu *pmu = event->pmu;
|
||||
return pmu->filter_match ? pmu->filter_match(event) : 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether we should attempt to schedule an event group based on
|
||||
* PMU-specific filtering. An event group can consist of HW and SW events,
|
||||
* potentially with a SW leader, so we must check all the filters, to
|
||||
* determine whether a group is schedulable:
|
||||
*/
|
||||
static inline int pmu_filter_match(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *child;
|
||||
|
||||
if (!__pmu_filter_match(event))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(child, &event->sibling_list, group_entry) {
|
||||
if (!__pmu_filter_match(child))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
event_filter_match(struct perf_event *event)
|
||||
{
|
||||
|
|
|
@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
|
||||
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
|
||||
#else
|
||||
void init_entity_runnable_average(struct sched_entity *se)
|
||||
{
|
||||
|
@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
# ifdef CONFIG_SMP
|
||||
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
long tg_weight;
|
||||
|
||||
/*
|
||||
* Use this CPU's real-time load instead of the last load contribution
|
||||
* as the updating of the contribution is delayed, and we will use the
|
||||
* the real-time load to calc the share. See update_tg_load_avg().
|
||||
*/
|
||||
tg_weight = atomic_long_read(&tg->load_avg);
|
||||
tg_weight -= cfs_rq->tg_load_avg_contrib;
|
||||
tg_weight += cfs_rq->load.weight;
|
||||
|
||||
return tg_weight;
|
||||
}
|
||||
|
||||
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
||||
{
|
||||
long tg_weight, load, shares;
|
||||
|
||||
tg_weight = calc_tg_weight(tg, cfs_rq);
|
||||
load = cfs_rq->load.weight;
|
||||
/*
|
||||
* This really should be: cfs_rq->avg.load_avg, but instead we use
|
||||
* cfs_rq->load.weight, which is its upper bound. This helps ramp up
|
||||
* the shares for small weight interactive tasks.
|
||||
*/
|
||||
load = scale_load_down(cfs_rq->load.weight);
|
||||
|
||||
tg_weight = atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure tg_weight >= load */
|
||||
tg_weight -= cfs_rq->tg_load_avg_contrib;
|
||||
tg_weight += load;
|
||||
|
||||
shares = (tg->shares * load);
|
||||
if (tg_weight)
|
||||
|
@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
|||
return tg->shares;
|
||||
}
|
||||
# endif /* CONFIG_SMP */
|
||||
|
||||
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||
unsigned long weight)
|
||||
{
|
||||
|
@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
|||
return wl;
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
long w, W;
|
||||
struct cfs_rq *cfs_rq = se->my_q;
|
||||
long W, w = cfs_rq_load_avg(cfs_rq);
|
||||
|
||||
tg = se->my_q->tg;
|
||||
tg = cfs_rq->tg;
|
||||
|
||||
/*
|
||||
* W = @wg + \Sum rw_j
|
||||
*/
|
||||
W = wg + calc_tg_weight(tg, se->my_q);
|
||||
W = wg + atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure \Sum rw_j >= rw_i */
|
||||
W -= cfs_rq->tg_load_avg_contrib;
|
||||
W += w;
|
||||
|
||||
/*
|
||||
* w = rw_i + @wl
|
||||
*/
|
||||
w = cfs_rq_load_avg(se->my_q) + wl;
|
||||
w += wl;
|
||||
|
||||
/*
|
||||
* wl = S * s'_i; see (2)
|
||||
|
|
|
@ -2225,9 +2225,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|||
error = shmem_getpage(inode, index, &page, SGP_FALLOC);
|
||||
if (error) {
|
||||
/* Remove the !PageUptodate pages we added */
|
||||
if (index > start) {
|
||||
shmem_undo_range(inode,
|
||||
(loff_t)start << PAGE_SHIFT,
|
||||
((loff_t)index << PAGE_SHIFT) - 1, true);
|
||||
}
|
||||
goto undone;
|
||||
}
|
||||
|
||||
|
|
|
@ -700,7 +700,7 @@ static int
|
|||
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
int (*output)(struct net *, struct sock *, struct sk_buff *))
|
||||
{
|
||||
unsigned int mtu = ip_skb_dst_mtu(skb);
|
||||
unsigned int mtu = ip_skb_dst_mtu(sk, skb);
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче