Merge branches 'arm/smmu', 'core', 'x86/vt-d', 'arm/shmobile', 'x86/amd', 'ppc/pamu', 'iommu/fixes' and 'arm/msm' into next

This commit is contained in:
Joerg Roedel 2014-01-09 13:06:59 +01:00
162 изменённых файлов: 1689 добавлений и 938 удалений

Просмотреть файл

@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
F: arch/arm/boot/dts/sama*.dtsi
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Rob Herring <rob.herring@calxeda.com>
M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-highbank/
@ -1368,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git
S: Supported
F: arch/arm/mach-zynq/
F: drivers/cpuidle/cpuidle-zynq.c
N: zynq
N: xilinx
F: drivers/clocksource/cadence_ttc_timer.c
ARM SMMU DRIVER
M: Will Deacon <will.deacon@arm.com>
@ -6256,7 +6259,7 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@linaro.org>
M: Rob Herring <rob.herring@calxeda.com>
M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
W: http://fdt.secretlab.ca
T: git git://git.secretlab.ca/git/linux-2.6.git
@ -6268,7 +6271,7 @@ K: of_get_property
K: of_match_table
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <rob.herring@calxeda.com>
M: Rob Herring <robh+dt@kernel.org>
M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
M: Ian Campbell <ijc+devicetree@hellion.org.uk>

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 13
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*

Просмотреть файл

@ -58,7 +58,6 @@
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
ranges = <0x0 0x80000000 0x400000>;
reg = <0x80000000 0x400000>;
bus-frequency = <66000000>; // 66 MHz ips bus
@ -189,6 +188,10 @@
reg = <0xA000 0x1000>;
};
// disable USB1 port
// TODO:
// correct pinmux config and fix USB3320 ulpi dependency
// before re-enabling it
usb@3000 {
compatible = "fsl,mpc5121-usb2-dr";
reg = <0x3000 0x400>;
@ -197,6 +200,7 @@
interrupts = <43 0x8>;
dr_mode = "host";
phy_type = "ulpi";
status = "disabled";
};
// 5125 PSCs are not 52xx or 5121 PSC compatible

Просмотреть файл

@ -284,7 +284,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \

Просмотреть файл

@ -4,13 +4,18 @@
#ifdef __KERNEL__
/*
* The PowerPC can do unaligned accesses itself in big endian mode.
* The PowerPC can do unaligned accesses itself based on its endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
#ifdef __LITTLE_ENDIAN__
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#else
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */

Просмотреть файл

@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
* of the function that the cpu should jump to to continue
* initialization.
*/
.balign 8
.globl __secondary_hold_spinloop
__secondary_hold_spinloop:
.llong 0x0
@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */

Просмотреть файл

@ -9,6 +9,14 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
#define sHd srd /* Shift towards high-numbered address. */
#else
#define sLd srd /* Shift towards low-numbered address. */
#define sHd sld /* Shift towards high-numbered address. */
#endif
.align 7
_GLOBAL(__copy_tofrom_user)
BEGIN_FTR_SECTION
@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
sld r6,r9,r10
sLd r6,r9,r10
26: ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
sHd r7,r0,r11
sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
sld r8,r0,r10
sLd r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
sHd r12,r9,r11
sLd r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
sHd r7,r0,r11
sLd r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
2: sHd r12,r9,r11
sLd r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
sHd r7,r0,r11
sLd r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
5: srd r12,r9,r11
5: sHd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
sLd r9,r9,r10
ble cr1,7f
34: ld r0,8(r4)
srd r7,r0,r11
sHd r7,r0,r11
or r9,r7,r9
7:
bf cr7*4+1,1f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
#endif
94: stw r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,32
#endif
addi r3,r3,4
1: bf cr7*4+2,2f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
#endif
95: sth r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,16
#endif
addi r3,r3,2
2: bf cr7*4+3,3f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
#endif
96: stb r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,8
#endif
3: li r3,0
blr

Просмотреть файл

@ -36,7 +36,6 @@
#include "powernv.h"
#include "pci.h"
static char *hub_diag = NULL;
static int ioda_eeh_nb_init = 0;
static int ioda_eeh_event(struct notifier_block *nb,
@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
ioda_eeh_nb_init = 1;
}
/* We needn't HUB diag-data on PHB3 */
if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!hub_diag) {
pr_err("%s: Out of memory !\n", __func__);
return -ENOMEM;
}
}
#ifdef CONFIG_DEBUG_FS
if (phb->dbgfs) {
debugfs_create_file("err_injct_outbound", 0600,
@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
static void ioda_eeh_hub_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
struct OpalIoP7IOCErrorData *data;
struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
long rc;
data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
__func__, phb->hub_id, rc);
@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
struct OpalIoPhbErrorCommon *common;
long rc;
common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
switch (common->ioType) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
ioda_eeh_p7ioc_phb_diag(hose, common);

Просмотреть файл

@ -172,11 +172,13 @@ struct pnv_phb {
} ioda;
};
/* PHB status structure */
/* PHB and hub status structure */
union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc;
struct OpalIoP7IOCErrorData hub_diag;
} diag;
};
extern struct pci_ops pnv_pci_ops;

Просмотреть файл

@ -135,7 +135,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
select OLD_SIGACTION

Просмотреть файл

@ -31,6 +31,7 @@ extern void smp_yield(void);
extern void smp_stop_cpu(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_stop_cpu(void) { }
static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */

Просмотреть файл

@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
setup_vmcoreinfo();
setup_lowcore();
smp_fill_possible_mask();
cpu_init();
s390_init_cpu_topology();

Просмотреть файл

@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return 0;
}
static int __init setup_possible_cpus(char *s)
{
int max, cpu;
static unsigned int setup_possible_cpus __initdata;
if (kstrtoint(s, 0, &max) < 0)
return 0;
init_cpu_possible(cpumask_of(0));
for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
set_cpu_possible(cpu, true);
static int __init _setup_possible_cpus(char *s)
{
get_option(&s, &setup_possible_cpus);
return 0;
}
early_param("possible_cpus", setup_possible_cpus);
early_param("possible_cpus", _setup_possible_cpus);
#ifdef CONFIG_HOTPLUG_CPU
@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
void __init smp_fill_possible_mask(void)
{
unsigned int possible, cpu;
possible = setup_possible_cpus;
if (!possible)
possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
set_cpu_possible(cpu, true);
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */

Просмотреть файл

@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
zdev->fh = ccdf->fh;
ret = zpci_enable_device(zdev);
if (ret)
break;
@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
if (pdev)
pci_stop_and_remove_bus_device(pdev);
zdev->fh = ccdf->fh;
zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
break;

Просмотреть файл

@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_FLATMEM
/* need in pfn_valid macro */
EXPORT_SYMBOL(min_low_pfn);
EXPORT_SYMBOL(max_low_pfn);
#endif
#define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name)

Просмотреть файл

@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
/* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
recalculate_apic_map(vcpu->kvm);
}
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);

Просмотреть файл

@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
if (nested_cpu_has_ept(vmcs12))
nested_ept_uninit_mmu_context(vcpu);
nested_ept_uninit_mmu_context(vcpu);
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);

Просмотреть файл

@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
}
EXPORT_SYMBOL(acpi_bus_get_private_data);
void acpi_bus_no_hotplug(acpi_handle handle)
{
struct acpi_device *adev = NULL;
acpi_bus_get_device(handle, &adev);
if (adev)
adev->flags.no_hotplug = true;
}
EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{

Просмотреть файл

@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0489, 0xe057) },
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x0489, 0xe04e) },
@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },

Просмотреть файл

@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },

Просмотреть файл

@ -839,9 +839,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
/* set default policy */
ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
if (ret) {
pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
@ -1016,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
if (frozen)
/* Restore the saved policy when doing light-weight init */
policy = cpufreq_policy_restore(cpu);
else
/*
* Restore the saved policy when doing light-weight init and fall back
* to the full init if that fails.
*/
policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) {
frozen = false;
policy = cpufreq_policy_alloc();
if (!policy)
goto nomem_out;
if (!policy)
goto nomem_out;
}
/*
* In the resume path, since we restore a saved policy, the assignment
@ -1069,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
if (!frozen) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
}
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@ -1101,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
if (!frozen) {
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@ -1118,8 +1124,11 @@ err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
if (frozen)
if (frozen) {
/* Do not leave stale fallback data behind. */
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
cpufreq_policy_put_kobj(policy);
}
cpufreq_policy_free(policy);
nomem_out:

Просмотреть файл

@ -614,6 +614,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu);
if (!cpu->pstate.current_pstate) {
all_cpu_data[cpunum] = NULL;
kfree(cpu);
return -ENODATA;
}
cpu->cpu = cpunum;

Просмотреть файл

@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
.state_count = 2,
};
static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
static int calxeda_cpuidle_probe(struct platform_device *pdev)
{
return cpuidle_register(&calxeda_idle_driver, NULL);
}

Просмотреть файл

@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i, err ;
int i, err;
pdev = platform_device_register_full(&ixp_dev_info);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
dev = &pdev->dev;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);

Просмотреть файл

@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "mapping src buffer failed\n");
goto free_resources;
}
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_dest)) {
dev_err(dev, "mapping dest buffer failed\n");
goto unmap_src;
}
flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
unmap_dma:
dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
unmap_src:
dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:

Просмотреть файл

@ -4,6 +4,7 @@ config DRM_MSM
depends on DRM
depends on ARCH_MSM
depends on ARCH_MSM8960
depends on MSM_IOMMU
select DRM_KMS_HELPER
select SHMEM
select TMPFS

Просмотреть файл

@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
if (!acpi_has_method(dhandle, "_DSM"))
if (!acpi_has_method(dhandle, "_DSM")) {
nouveau_dsm_priv.other_handle = dhandle;
return false;
}
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
/*
* On some systems hotplug events are generated for the device
* being switched off when _DSM is executed. They cause ACPI
* hotplug to trigger and attempt to remove the device from
* the system, which causes it to break down. Prevent that from
* happening by setting the no_hotplug flag for the involved
* ACPI device objects.
*/
acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}

Просмотреть файл

@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
if (rdev->family >= CHIP_BONAIRE)
tmp = rdev->config.cik.tile_config;
else if (rdev->family >= CHIP_TAHITI)
tmp = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
switch ((tmp & 0xf0) >> 4) {
case 0: /* 4 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
break;
case 1: /* 8 banks */
default:
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
break;
case 2: /* 16 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
break;
/* Set NUM_BANKS. */
if (rdev->family >= CHIP_BONAIRE) {
unsigned tileb, index, num_banks, tile_split_bytes;
/* Calculate the macrotile mode index. */
tile_split_bytes = 64 << tile_split;
tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
tileb = min(tile_split_bytes, tileb);
for (index = 0; tileb > 64; index++) {
tileb >>= 1;
}
if (index >= 16) {
DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
target_fb->bits_per_pixel, tile_split);
return -EINVAL;
}
num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
/* SI and older. */
if (rdev->family >= CHIP_TAHITI)
tmp = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
switch ((tmp & 0xf0) >> 4) {
case 0: /* 4 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
break;
case 1: /* 8 banks */
default:
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
break;
case 2: /* 16 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
break;
}
}
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@ -1180,19 +1202,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
if (rdev->family >= CHIP_BONAIRE) {
u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
u32 num_rb = rdev->config.cik.max_backends_per_se;
if (num_pipe_configs > 8)
num_pipe_configs = 8;
if (num_pipe_configs == 8)
fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
else if (num_pipe_configs == 4) {
if (num_rb == 4)
fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
else if (num_rb < 4)
fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
} else if (num_pipe_configs == 2)
fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
/* Read the pipe config from the 2D TILED SCANOUT mode.
* It should be the same for the other modes too, but not all
* modes set the pipe config field. */
u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
} else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);

Просмотреть файл

@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
* Returns the disabled RB bitmask.
*/
static u32 cik_get_rb_disabled(struct radeon_device *rdev,
u32 max_rb_num, u32 se_num,
u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
*/
static void cik_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
u32 max_rb_num)
u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
if (rdev->family == CHIP_HAWAII)
disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
else
@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
for (i = 0; i < max_rb_num; i++) {
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
rdev->config.cik.backend_enable_mask = enabled_rbs;
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;

Просмотреть файл

@ -1940,7 +1940,7 @@ struct si_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@ -1970,7 +1970,7 @@ struct cik_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;

Просмотреть файл

@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
if (ACPI_FAILURE(status))
if (ACPI_FAILURE(status)) {
radeon_atpx_priv.other_handle = dhandle;
return false;
}
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
/*
* On some systems hotplug events are generated for the device
* being switched off when ATPX is executed. They cause ACPI
* hotplug to trigger and attempt to remove the device from
* the system, which causes it to break down. Prevent that from
* happening by setting the no_hotplug flag for the involved
* ACPI device objects.
*/
acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;

Просмотреть файл

@ -77,9 +77,10 @@
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

Просмотреть файл

@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
if (rdev->family >= CHIP_BONAIRE) {
*value = rdev->config.cik.backend_enable_mask;
} else if (rdev->family >= CHIP_TAHITI) {
*value = rdev->config.si.backend_enable_mask;
} else {
DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
}
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;

Просмотреть файл

@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
if ((start >> 28) != (end >> 28)) {
if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
start, end);
return -EINVAL;

Просмотреть файл

@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
}
static u32 si_get_rb_disabled(struct radeon_device *rdev,
u32 max_rb_num, u32 se_num,
u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
u32 max_rb_num)
u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
for (i = 0; i < max_rb_num; i++) {
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
rdev->config.si.backend_enable_mask = enabled_rbs;
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;

Просмотреть файл

@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
#define VLAN_NONE 0xfff
#define FILTER_SEL_VLAN_NONE 0xffff
#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
#define FILTER_SEL_WIDTH_VIN_P_FC \
(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
#define FILTER_SEL_WIDTH_TAG_P_FC \
(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
struct l2t_entry *l2t)
{
unsigned int ntuple = 0;
u32 viid;
switch (dev->rdev.lldi.filt_mode) {
/* default filter mode */
case HW_TPL_FR_MT_PR_IV_P_FC:
if (l2t->vlan == VLAN_NONE)
ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
else {
ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
}
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
break;
case HW_TPL_FR_MT_PR_OV_P_FC: {
viid = cxgb4_port_viid(l2t->neigh->dev);
ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
break;
}
default:
break;
}
return ntuple;
}
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
req->params = cpu_to_be32(select_ntuple(ep->com.dev,
ep->dst, ep->l2t));
req->params = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
req->opt2 = cpu_to_be32(opt2);
} else {
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0);
req6->params = cpu_to_be32(
select_ntuple(ep->com.dev, ep->dst,
ep->l2t));
req6->params = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
req6->opt2 = cpu_to_be32(opt2);
}
} else {
@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
select_ntuple(ep->com.dev,
ep->dst, ep->l2t)));
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32(
select_ntuple(ep->com.dev, ep->dst, ep->l2t));
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
if (dev->rdev.lldi.enable_fw_ofld_conn)
if (dev->rdev.lldi.enable_fw_ofld_conn &&
ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
cm_id->local_addr.ss_family, ep);
else
@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
- dev->rdev.lldi.tids->sftid_base
+ dev->rdev.lldi.tids->nstids;
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
if (!lep) {
@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
dev->rdev.lldi.ports[0],
e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the

Просмотреть файл

@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
break;
case EV_ABS:
input_alloc_absinfo(dev);
if (!dev->absinfo)
return;
__set_bit(code, dev->absbit);
break;

Просмотреть файл

@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
}
}
static irqreturn_t zforce_interrupt(int irq, void *dev_id)
static irqreturn_t zforce_irq(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
if (ts->suspended && device_may_wakeup(&client->dev))
pm_wakeup_event(&client->dev, 500);
return IRQ_WAKE_THREAD;
}
static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
u8 *payload;
/*
* When suspended, emit a wakeup signal if necessary and return.
* When still suspended, return.
* Due to the level-interrupt we will get re-triggered later.
*/
if (ts->suspended) {
if (device_may_wakeup(&client->dev))
pm_wakeup_event(&client->dev, 500);
msleep(20);
return IRQ_HANDLED;
}
@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
* Therefore we can trigger the interrupt anytime it is low and do
* not need to limit it to the interrupt edge.
*/
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
zforce_interrupt,
ret = devm_request_threaded_irq(&client->dev, client->irq,
zforce_irq, zforce_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
input_dev->name, ts);
if (ret) {

Просмотреть файл

@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM
depends on SH_MOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
select SHMOBILE_IPMMU

Просмотреть файл

@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
if (!dev || !dev->dma_mask)
return false;
/* No device or no PCI device */
if (dev->bus != &pci_bus_type)
/* No PCI device */
if (!dev_is_pci(dev))
return false;
devid = get_device_id(dev);

Просмотреть файл

@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
segment, scope->bus, path->device, path->function);
*dev = NULL;
return 0;
}
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
ret = dmar_parse_one_dev_scope(scope,
&(*devices)[index], segment);
if (ret) {
kfree(*devices);
dmar_free_dev_scope(devices, cnt);
return ret;
}
index ++;
@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
return 0;
}
void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
{
if (*devices && *cnt) {
while (--*cnt >= 0)
pci_dev_put((*devices)[*cnt]);
kfree(*devices);
*devices = NULL;
*cnt = 0;
}
}
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0;
}
static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
{
if (dmaru->devices && dmaru->devices_cnt)
dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
if (dmaru->iommu)
free_iommu(dmaru->iommu);
kfree(dmaru);
}
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
if (dmaru->include_all)
return 0;
ret = dmar_parse_dev_scope((void *)(drhd + 1),
((void *)drhd) + drhd->header.length,
&dmaru->devices_cnt, &dmaru->devices,
drhd->segment);
if (ret) {
list_del(&dmaru->list);
kfree(dmaru);
}
return ret;
return dmar_parse_dev_scope((void *)(drhd + 1),
((void *)drhd) + drhd->header.length,
&dmaru->devices_cnt, &dmaru->devices,
drhd->segment);
}
#ifdef CONFIG_ACPI_NUMA
@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
int __init dmar_dev_scope_init(void)
{
static int dmar_dev_scope_initialized;
struct dmar_drhd_unit *drhd, *drhd_n;
struct dmar_drhd_unit *drhd;
int ret = -ENODEV;
if (dmar_dev_scope_initialized)
@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
if (list_empty(&dmar_drhd_units))
goto fail;
list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
list_for_each_entry(drhd, &dmar_drhd_units, list) {
ret = dmar_parse_dev(drhd);
if (ret)
goto fail;
@ -456,24 +472,23 @@ int __init dmar_table_init(void)
static int dmar_table_initialized;
int ret;
if (dmar_table_initialized)
return 0;
if (dmar_table_initialized == 0) {
ret = parse_dmar_table();
if (ret < 0) {
if (ret != -ENODEV)
pr_info("parse DMAR table failure.\n");
} else if (list_empty(&dmar_drhd_units)) {
pr_info("No DMAR devices found\n");
ret = -ENODEV;
}
dmar_table_initialized = 1;
ret = parse_dmar_table();
if (ret) {
if (ret != -ENODEV)
pr_info("parse DMAR table failure.\n");
return ret;
if (ret < 0)
dmar_table_initialized = ret;
else
dmar_table_initialized = 1;
}
if (list_empty(&dmar_drhd_units)) {
pr_info("No DMAR devices found\n");
return -ENODEV;
}
return 0;
return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
}
static void warn_invalid_dmar(u64 addr, const char *message)
@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
int __init check_zero_address(void)
static int __init check_zero_address(void)
{
struct acpi_table_dmar *dmar;
struct acpi_dmar_header *entry_header;
@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
if (ret)
ret = check_zero_address();
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
dmar->flags & 0x1)
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
}
early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
return ret ? 1 : -ENODEV;
@ -647,7 +654,7 @@ out:
return err;
}
int alloc_iommu(struct dmar_drhd_unit *drhd)
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return err;
}
void free_iommu(struct intel_iommu *iommu)
static void free_iommu(struct intel_iommu *iommu)
{
if (!iommu)
return;
if (iommu->irq) {
free_irq(iommu->irq, iommu);
irq_set_handler_data(iommu->irq, NULL);
destroy_irq(iommu->irq);
}
free_dmar_iommu(iommu);
if (iommu->qi) {
free_page((unsigned long)iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
if (iommu->reg)
unmap_iommu(iommu);
@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
if (!desc_page) {
kfree(qi);
iommu->qi = 0;
iommu->qi = NULL;
return -ENOMEM;
}
@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
kfree(qi);
iommu->qi = 0;
iommu->qi = NULL;
return -ENOMEM;
}
@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
"Blocked an interrupt request due to source-id verification failure",
};
#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{
if (fault_reason >= 0x20 && (fault_reason - 0x20 <
ARRAY_SIZE(irq_remap_fault_reasons))) {
@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
int __init enable_drhd_fault_handling(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
/*
* Enable fault control interrupt.
*/
for_each_drhd_unit(drhd) {
int ret;
struct intel_iommu *iommu = drhd->iommu;
for_each_iommu(iommu, drhd) {
u32 fault_status;
ret = dmar_set_interrupt(iommu);
int ret = dmar_set_interrupt(iommu);
if (ret) {
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
return 0;
return dmar->flags & 0x1;
}
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;
/* DMAR units are in use */
if (irq_remapping_enabled || intel_iommu_enabled)
return 0;
list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
list_del(&dmaru->list);
dmar_free_drhd(dmaru);
}
return 0;
}
late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);

Просмотреть файл

@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while attaching a
* PCI device.
*/
if (dev->bus == &pci_bus_type) {
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
* Use LIODN of the PCI controller while detaching a
* PCI device.
*/
if (dev->bus == &pci_bus_type) {
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
* For platform devices we allocate a separate group for
* each of the devices.
*/
if (dev->bus == &pci_bus_type) {
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
/* Don't create device groups for virtual PCI bridges */
if (pdev->subordinate)

Просмотреть файл

@ -63,6 +63,7 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_AGAW_WIDTH 64
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
static inline int agaw_to_width(int agaw)
{
return 30 + agaw * LEVEL_STRIDE;
return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
return (width - 30) / LEVEL_STRIDE;
return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
return 1 << ((lvl - 1) * LEVEL_STRIDE);
return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
pte->val = 0;
}
static inline void dma_set_pte_readable(struct dma_pte *pte)
{
pte->val |= DMA_PTE_READ;
}
static inline void dma_set_pte_writable(struct dma_pte *pte)
{
pte->val |= DMA_PTE_WRITE;
}
static inline void dma_set_pte_snp(struct dma_pte *pte)
{
pte->val |= DMA_PTE_SNP;
}
static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
{
pte->val = (pte->val & ~3) | (prot & 3);
}
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
#endif
}
static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
{
pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
}
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
@ -406,7 +382,7 @@ struct device_domain_info {
static void flush_unmaps_timeout(unsigned long data);
DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
#define HIGH_WATER_MARK 250
struct deferred_flush_tables {
@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
struct dmar_drhd_unit *drhd = NULL;
int i;
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
for_each_active_drhd_unit(drhd) {
if (segment != drhd->segment)
continue;
@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
} while (start_pfn && start_pfn <= last_pfn);
order = (large_page - 1) * 9;
return order;
return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
ndomains);
pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
iommu->seq_id, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
*/
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
printk(KERN_ERR "Allocating domain id array failed\n");
pr_err("IOMMU%d: allocating domain id array failed\n",
iommu->seq_id);
return -ENOMEM;
}
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL);
if (!iommu->domains) {
printk(KERN_ERR "Allocating domain array failed\n");
pr_err("IOMMU%d: allocating domain array failed\n",
iommu->seq_id);
kfree(iommu->domain_ids);
iommu->domain_ids = NULL;
return -ENOMEM;
}
@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void domain_exit(struct dmar_domain *domain);
static void vm_domain_exit(struct dmar_domain *domain);
void free_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
int i, count;
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
clear_bit(i, iommu->domain_ids);
spin_lock_irqsave(&domain->iommu_lock, flags);
if (--domain->iommu_count == 0) {
count = --domain->iommu_count;
spin_unlock_irqrestore(&domain->iommu_lock, flags);
if (count == 0) {
if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
vm_domain_exit(domain);
else
domain_exit(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
}
}
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
if (iommu->irq) {
irq_set_handler_data(iommu->irq, NULL);
/* This will mask the irq */
free_irq(iommu->irq, iommu);
destroy_irq(iommu->irq);
}
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;
iommu->domain_ids = NULL;
g_iommus[iommu->seq_id] = NULL;
@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
}
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
pr_debug("IOMMU: identity mapping domain is domain %d\n",
si_domain->id);
if (hw)
return 0;
@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
goto error;
}
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = drhd->iommu;
for_each_active_iommu(iommu, drhd) {
g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
/*
* Start from the sane iommu hardware state.
*/
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = drhd->iommu;
for_each_active_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
dmar_disable_qi(iommu);
}
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = drhd->iommu;
for_each_active_iommu(iommu, drhd) {
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based
@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
* global invalidate iotlb
* enable translation
*/
for_each_drhd_unit(drhd) {
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
iommu_disable_protect_mem_regions(drhd->iommu);
iommu_disable_protect_mem_regions(iommu);
continue;
}
iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
return 0;
error:
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = drhd->iommu;
free_iommu(iommu);
}
for_each_active_iommu(iommu, drhd)
free_dmar_iommu(iommu);
kfree(deferred_flush);
kfree(g_iommus);
return ret;
}
@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
struct pci_dev *pdev;
int found;
if (unlikely(dev->bus != &pci_bus_type))
if (unlikely(!dev_is_pci(dev)))
return 1;
pdev = to_pci_dev(dev);
@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
}
}
for_each_drhd_unit(drhd) {
for_each_active_drhd_unit(drhd) {
int i;
if (drhd->ignored || drhd->include_all)
if (drhd->include_all)
continue;
for (i = 0; i < drhd->devices_cnt; i++)
@ -3514,18 +3468,12 @@ static int __init
rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
{
struct acpi_dmar_reserved_memory *rmrr;
int ret;
rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
ret = dmar_parse_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
if (ret || (rmrru->devices_cnt == 0)) {
list_del(&rmrru->list);
kfree(rmrru);
}
return ret;
return dmar_parse_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt, &rmrru->devices,
rmrr->segment);
}
static LIST_HEAD(dmar_atsr_units);
@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
{
int rc;
struct acpi_dmar_atsr *atsr;
if (atsru->include_all)
return 0;
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
rc = dmar_parse_dev_scope((void *)(atsr + 1),
(void *)atsr + atsr->header.length,
&atsru->devices_cnt, &atsru->devices,
atsr->segment);
if (rc || !atsru->devices_cnt) {
list_del(&atsru->list);
kfree(atsru);
return dmar_parse_dev_scope((void *)(atsr + 1),
(void *)atsr + atsr->header.length,
&atsru->devices_cnt, &atsru->devices,
atsr->segment);
}
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{
dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
kfree(atsru);
}
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
struct dmar_atsr_unit *atsru, *atsr_n;
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
kfree(rmrru);
}
return rc;
list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
list_del(&atsru->list);
intel_iommu_free_atsr(atsru);
}
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@ -3610,17 +3574,17 @@ found:
int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
struct dmar_rmrr_unit *rmrr;
struct dmar_atsr_unit *atsr;
int ret = 0;
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
list_for_each_entry(atsr, &dmar_atsr_units, list) {
ret = atsr_parse_dev(atsr);
if (ret)
return ret;
@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
int ret = -ENODEV;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
return -ENODEV;
goto out_free_dmar;
}
/*
* Disable translation if already enabled prior to OS handover.
*/
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu;
if (drhd->ignored)
continue;
iommu = drhd->iommu;
for_each_active_iommu(iommu, drhd)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
}
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
return -ENODEV;
goto out_free_dmar;
}
if (no_iommu || dmar_disabled)
return -ENODEV;
goto out_free_dmar;
if (iommu_init_mempool()) {
if (force_on)
panic("tboot: Failed to initialize iommu memory\n");
return -ENODEV;
goto out_free_dmar;
}
if (list_empty(&dmar_rmrr_units))
@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
if (dmar_init_reserved_ranges()) {
if (force_on)
panic("tboot: Failed to reserve iommu ranges\n");
return -ENODEV;
goto out_free_mempool;
}
init_no_remapping_devices();
@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n");
put_iova_domain(&reserved_iova_list);
iommu_exit_mempool();
return ret;
goto out_free_reserved_range;
}
printk(KERN_INFO
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
intel_iommu_enabled = 1;
return 0;
out_free_reserved_range:
put_iova_domain(&reserved_iova_list);
out_free_mempool:
iommu_exit_mempool();
out_free_dmar:
intel_iommu_free_dmars();
return ret;
}
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
}
/* domain id for virtual machine, it won't be set in context */
static unsigned long vm_domid;
static atomic_t vm_domid = ATOMIC_INIT(0);
static struct dmar_domain *iommu_alloc_vm_domain(void)
{
@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
if (!domain)
return NULL;
domain->id = vm_domid++;
domain->id = atomic_inc_return(&vm_domid);
domain->nid = -1;
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
unsigned long i;
unsigned long ndomains;
for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
iommu = drhd->iommu;
for_each_active_iommu(iommu, drhd) {
ndomains = cap_ndoms(iommu->cap);
for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {

Просмотреть файл

@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static int __init parse_ioapics_under_ir(void);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
int get_irte(int irq, struct irte *entry)
static int get_irte(int irq, struct irte *entry)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_get_chip_data(irq);
u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
int i;
int index;
if (!count || !irq_iommu)
return -1;
/*
* start the IRTE search from index 0.
*/
index = start_index = 0;
if (count > 1) {
count = __roundup_pow_of_two(count);
mask = ilog2(count);
@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
}
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
do {
for (i = index; i < index + count; i++)
if (table->base[i].present)
break;
/* empty index found */
if (i == index + count)
break;
index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
if (index == start_index) {
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
} while (1);
for (i = index; i < index + count; i++)
table->base[i].present = 1;
cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
index = bitmap_find_free_region(table->bitmap,
INTR_REMAP_TABLE_ENTRIES, mask);
if (index < 0) {
pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
} else {
cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
}
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0);
}
bitmap_release_region(iommu->ir_table->bitmap, index,
irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
return -1;
}
set_irte_sid(irte, 1, 0, sid);
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
return 0;
}
@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC);
@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
INTR_REMAP_PAGE_ORDER);
if (!pages) {
printk(KERN_ERR "failed to allocate pages of order %d\n",
INTR_REMAP_PAGE_ORDER);
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
kfree(ir_table);
return -ENOMEM;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode);
return 0;
@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
static int __init intel_irq_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
if (disable_irq_remap)
return 0;
@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
if (!dmar_ir_support())
return 0;
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
for_each_iommu(iommu, drhd)
if (!ecap_ir_support(iommu->ecap))
return 0;
}
return 1;
}
@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool x2apic_present;
int setup = 0;
int eim = 0;
@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
}
if (x2apic_present) {
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
eim = !dmar_x2apic_optout();
if (!eim)
printk(KERN_WARNING
@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
for_each_iommu(iommu, drhd) {
/*
* If the queued invalidation is already initialized,
* shouldn't disable it.
@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* check for the Interrupt-remapping support
*/
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
/*
* Enable queued invalidation for all the DRHD's.
*/
for_each_drhd_unit(drhd) {
int ret;
struct intel_iommu *iommu = drhd->iommu;
ret = dmar_enable_qi(iommu);
for_each_iommu(iommu, drhd) {
int ret = dmar_enable_qi(iommu);
if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
for_each_iommu(iommu, drhd) {
if (!ecap_ir_support(iommu->ecap))
continue;
@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
* Finds the assocaition between IOAPIC's and its Interrupt-remapping
* hardware unit.
*/
int __init parse_ioapics_under_ir(void)
static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ir_supported = 0;
int ioapic_idx;
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
for_each_iommu(iommu, drhd)
if (ecap_ir_support(iommu->ecap)) {
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
}
}
if (!ir_supported)
return 0;
@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
return 1;
}
int __init ir_dev_scope_init(void)
static int __init ir_dev_scope_init(void)
{
if (!irq_remapping_enabled)
return 0;

Просмотреть файл

@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
return do_setup_msix_irqs(dev, nvec);
}
void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
{
/*
* Intr-remapping uses pin number as the virtual vector
@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
static int set_remapped_irq_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
!remap_ops->set_affinity)

Просмотреть файл

@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/limits.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
/**
* of_get_dma_window - Parse *dma-window property and returns 0 if found.

Просмотреть файл

@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
kmem_cache_destroy(l1cache);
return -ENOMEM;
}
archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
if (!archdata) {
kmem_cache_destroy(l1cache);
kmem_cache_destroy(l2cache);
return -ENOMEM;
}
spin_lock_init(&archdata->attach_lock);
archdata->attached = NULL;
archdata->ipmmu = ipmmu;
ipmmu_archdata = archdata;
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);

Просмотреть файл

@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
if (!ipmmu)
return;
mutex_lock(&ipmmu->flush_lock);
spin_lock(&ipmmu->flush_lock);
if (ipmmu->tlb_enabled)
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
else
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
mutex_unlock(&ipmmu->flush_lock);
spin_unlock(&ipmmu->flush_lock);
}
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
if (!ipmmu)
return;
mutex_lock(&ipmmu->flush_lock);
spin_lock(&ipmmu->flush_lock);
switch (size) {
default:
ipmmu->tlb_enabled = 0;
@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
}
ipmmu_reg_write(ipmmu, IMTTBR, phys);
ipmmu_reg_write(ipmmu, IMASID, asid);
mutex_unlock(&ipmmu->flush_lock);
spin_unlock(&ipmmu->flush_lock);
}
static int ipmmu_probe(struct platform_device *pdev)
@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate device data\n");
return -ENOMEM;
}
mutex_init(&ipmmu->flush_lock);
spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));

Просмотреть файл

@ -14,7 +14,7 @@ struct shmobile_ipmmu {
struct device *dev;
void __iomem *ipmmu_base;
int tlb_enabled;
struct mutex flush_lock;
spinlock_t flush_lock;
const char * const *dev_names;
unsigned int num_dev_names;
};

Просмотреть файл

@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
// if slave is null, the whole port is not initialized
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
__get_state_machine_lock(port);
port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
pr_debug("Port %d changed speed\n", port->actor_port_number);
// there is no need to reselect a new aggregator, just signal the
// state machines to reinitialize
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
port->sm_vars |= AD_PORT_BEGIN;
__release_state_machine_lock(port);
}
/**
@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
// if slave is null, the whole port is not initialized
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
__get_state_machine_lock(port);
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
pr_debug("Port %d changed duplex\n", port->actor_port_number);
// there is no need to reselect a new aggregator, just signal the
// state machines to reinitialize
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
port->sm_vars |= AD_PORT_BEGIN;
__release_state_machine_lock(port);
}
/**
@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port = &(SLAVE_AD_INFO(slave).port);
// if slave is null, the whole port is not initialized
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
// on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
// on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
__get_state_machine_lock(port);
/* on link down we are zeroing duplex and speed since
* some of the adaptors(ce1000.lan) report full duplex/speed
* instead of N/A(duplex) / 0(speed).
*
* on link up we are forcing recheck on the duplex and speed since
* some of he adaptors(ce1000.lan) report.
*/
if (link == BOND_LINK_UP) {
port->is_enabled = true;
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
//BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
// there is no need to reselect a new aggregator, just signal the
// state machines to reinitialize
pr_debug("Port %d changed link status to %s",
port->actor_port_number,
(link == BOND_LINK_UP) ? "UP" : "DOWN");
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
port->sm_vars |= AD_PORT_BEGIN;
__release_state_machine_lock(port);
}
/*

Просмотреть файл

@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Make sure pointer to data buffer is set */
wmb();
skb_tx_timestamp(skb);
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
/* Increment index to point to the next BD */
@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
arc_reg_set(priv, R_STATUS, TXPL_MASK);
skb_tx_timestamp(skb);
return NETDEV_TX_OK;
}

Просмотреть файл

@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
* Mask some pcie error bits
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
if (pos) {
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
}
/* clear error status */
pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_NFED |

Просмотреть файл

@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
return -EINVAL;
}
BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;

Просмотреть файл

@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
return (base > 0xffffdcc0) && (base + len + 8 < base);
return base + len + 8 < base;
}
/* Test for TSO DMA buffers that cross into regions which are within MSS bytes

Просмотреть файл

@ -228,6 +228,25 @@ struct tp_params {
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
* Filter Tuple portion of filters and TCP TCB connections. The
* fields which are present are controlled by the TP_VLAN_PRI_MAP.
* Since a variable number of fields may or may not be present, their
* shifted field positions within the Compressed Filter Tuple may
* vary, or not even be present if the field isn't selected in
* TP_VLAN_PRI_MAP. Since some of these fields are needed in various
* places we store their offsets here, or a -1 if the field isn't
* present.
*/
int vlan_shift;
int vnic_shift;
int port_shift;
int protocol_shift;
};
struct vpd_params {
@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,

Просмотреть файл

@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
t->stids_in_use++;
/* IPv6 requires max of 520 bits or 16 cells in TCAM
* This is equivalent to 4 TIDs. With CLIP enabled it
* needs 2 TIDs.
*/
if (family == PF_INET)
t->stids_in_use++;
else
t->stids_in_use += 4;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
stid -= t->nstids;
stid += t->sftid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
stid -= t->stid_base;
/* Is it a server filter TID? */
if (t->nsftids && (stid >= t->sftid_base)) {
stid -= t->sftid_base;
stid += t->nstids;
} else {
stid -= t->stid_base;
}
spin_lock_bh(&t->stid_lock);
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
t->stids_in_use--;
if (family == PF_INET)
t->stids_in_use--;
else
t->stids_in_use -= 4;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) +
@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
(is_t4(adap->params.chip) || is_t5(adap->params.chip)))
__set_bit(0, t->stid_bmap);
return 0;
}
@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
lli.filt_mode = adap->filter_mode;
lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
stid -= adap->tids.nstids;
stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ...
@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
if (adap->filter_mode & F_PORT) {
if (adap->params.tp.vlan_pri_map & F_PORT) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
f->fs.val.proto = IPPROTO_TCP;
f->fs.mask.proto = ~0;
}
f->fs.dirsteer = 1;
f->fs.iq = queue;
/* Mark filter as locked */
@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
stid -= adap->tids.nstids;
stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid];
@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
int reset = 1, j;
int reset = 1;
/*
* Contact FW, advertising Master capability (and potentially forcing
@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
/*
* These are finalized by FW initialization, load their values now.
*/
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->filter_mode, 1,
TP_VLAN_PRI_MAP);
t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;

Просмотреть файл

@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
stid -= t->stid_base;
/* Is it a server filter TID? */
if (t->nsftids && (stid >= t->sftid_base)) {
stid -= t->sftid_base;
stid += t->nstids;
} else {
stid -= t->stid_base;
}
return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}

Просмотреть файл

@ -45,6 +45,7 @@
#include "l2t.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#include "t4_regs.h"
#define VLAN_NONE 0xfff
@ -411,6 +412,40 @@ done:
}
EXPORT_SYMBOL(cxgb4_l2t_get);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t)
{
struct adapter *adap = netdev2adap(dev);
struct tp_params *tp = &adap->params.tp;
u64 ntuple = 0;
/* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
ntuple |= (u64)l2t->lport << tp->port_shift;
if (tp->protocol_shift >= 0)
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
if (tp->vnic_shift >= 0) {
u32 viid = cxgb4_port_viid(dev);
u32 vf = FW_VIID_VIN_GET(viid);
u32 pf = FW_VIID_PFN_GET(viid);
u32 vld = FW_VIID_VIVLD_GET(viid);
ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
V_FT_VNID_ID_PF(pf) |
V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
}
return ntuple;
}
EXPORT_SYMBOL(cxgb4_select_ntuple);
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,

Просмотреть файл

@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,

Просмотреть файл

@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
return 0;
}
/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
*
* Initialize various fields of the adapter's TP Parameters structure.
*/
int t4_init_tp_params(struct adapter *adap)
{
int chan;
u32 v;
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
for (chan = 0; chan < NCHAN; chan++)
adap->params.tp.tx_modq[chan] = chan;
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP);
t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
&adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG);
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
F_PROTOCOL);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presense of an Outer VLAN instead of a VNIC ID.
*/
if ((adap->params.tp.ingress_config & F_VNIC) == 0)
adap->params.tp.vnic_shift = -1;
return 0;
}
/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
*
* Return the shift position of a filter field within the Compressed
* Filter Tuple. The filter field is specified via its selection bit
* within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
*/
int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
{
unsigned int filter_mode = adap->params.tp.vlan_pri_map;
unsigned int sel;
int field_shift;
if ((filter_mode & filter_sel) == 0)
return -1;
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case F_FCOE:
field_shift += W_FT_FCOE;
break;
case F_PORT:
field_shift += W_FT_PORT;
break;
case F_VNIC_ID:
field_shift += W_FT_VNIC_ID;
break;
case F_VLAN:
field_shift += W_FT_VLAN;
break;
case F_TOS:
field_shift += W_FT_TOS;
break;
case F_PROTOCOL:
field_shift += W_FT_PROTOCOL;
break;
case F_ETHERTYPE:
field_shift += W_FT_ETHERTYPE;
break;
case F_MACMATCH:
field_shift += W_FT_MACMATCH;
break;
case F_MPSHITTYPE:
field_shift += W_FT_MPSHITTYPE;
break;
case F_FRAGMENTATION:
field_shift += W_FT_FRAGMENTATION;
break;
}
}
return field_shift;
}
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];

Просмотреть файл

@ -1171,10 +1171,50 @@
#define A_TP_TX_SCHED_PCMD 0x25
#define S_VNIC 11
#define V_VNIC(x) ((x) << S_VNIC)
#define F_VNIC V_VNIC(1U)
#define S_FRAGMENTATION 9
#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
#define F_FRAGMENTATION V_FRAGMENTATION(1U)
#define S_MPSHITTYPE 8
#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
#define F_MPSHITTYPE V_MPSHITTYPE(1U)
#define S_MACMATCH 7
#define V_MACMATCH(x) ((x) << S_MACMATCH)
#define F_MACMATCH V_MACMATCH(1U)
#define S_ETHERTYPE 6
#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
#define F_ETHERTYPE V_ETHERTYPE(1U)
#define S_PROTOCOL 5
#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
#define F_PROTOCOL V_PROTOCOL(1U)
#define S_TOS 4
#define V_TOS(x) ((x) << S_TOS)
#define F_TOS V_TOS(1U)
#define S_VLAN 3
#define V_VLAN(x) ((x) << S_VLAN)
#define F_VLAN V_VLAN(1U)
#define S_VNIC_ID 2
#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
#define F_VNIC_ID V_VNIC_ID(1U)
#define S_PORT 1
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
#define S_FCOE 0
#define V_FCOE(x) ((x) << S_FCOE)
#define F_FCOE V_FCOE(1U)
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@ -1213,4 +1253,37 @@
#define V_CHIPID(x) ((x) << S_CHIPID)
#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
* Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
* selects for a particular field being present. These fields, when present
* in the Compressed Filter Tuple, have the following widths in bits.
*/
#define W_FT_FCOE 1
#define W_FT_PORT 3
#define W_FT_VNIC_ID 17
#define W_FT_VLAN 17
#define W_FT_TOS 8
#define W_FT_PROTOCOL 8
#define W_FT_ETHERTYPE 16
#define W_FT_MACMATCH 9
#define W_FT_MPSHITTYPE 3
#define W_FT_FRAGMENTATION 1
/* Some of the Compressed Filter Tuple fields have internal structure. These
* bit shifts/masks describe those structures. All shifts are relative to the
* base position of the fields within the Compressed Filter Tuple
*/
#define S_FT_VLAN_VLD 16
#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
#define S_FT_VNID_ID_VF 0
#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
#define S_FT_VNID_ID_PF 7
#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
#define S_FT_VNID_ID_VLD 16
#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
#endif /* __T4_REGS_H */

Просмотреть файл

@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(bdp, fep);
skb_tx_timestamp(skb);
fep->cur_tx = bdp;
if (fep->cur_tx == fep->dirty_tx)
@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
skb_tx_timestamp(skb);
return NETDEV_TX_OK;
}
@ -2049,6 +2049,8 @@ static void fec_reset_phy(struct platform_device *pdev)
int err, phy_reset;
int msec = 1;
struct device_node *np = pdev->dev.of_node;
enum of_gpio_flags flags;
bool port;
if (!np)
return;
@ -2058,18 +2060,22 @@ static void fec_reset_phy(struct platform_device *pdev)
if (msec > 1000)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
phy_reset = of_get_named_gpio_flags(np, "phy-reset-gpios", 0, &flags);
if (!gpio_is_valid(phy_reset))
return;
err = devm_gpio_request_one(&pdev->dev, phy_reset,
GPIOF_OUT_INIT_LOW, "phy-reset");
if (flags & OF_GPIO_ACTIVE_LOW)
port = GPIOF_OUT_INIT_LOW;
else
port = GPIOF_OUT_INIT_HIGH;
err = devm_gpio_request_one(&pdev->dev, phy_reset, port, "phy-reset");
if (err) {
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
msleep(msec);
gpio_set_value(phy_reset, 1);
gpio_set_value(phy_reset, !port);
}
#else /* CONFIG_OF */
static void fec_reset_phy(struct platform_device *pdev)

Просмотреть файл

@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
if (ret_val)
return ret_val;
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);

Просмотреть файл

@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
return __e1000_resume(pdev);
}
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
static int e1000_runtime_suspend(struct device *dev)

Просмотреть файл

@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
udelay(usec_interval);
if (usec_interval >= 1000)
msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
mdelay(usec_interval / 1000);
msleep(usec_interval / 1000);
else
udelay(usec_interval);
}

Просмотреть файл

@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
#endif
err = ixgbe_disable_sriov(adapter);

Просмотреть файл

@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
if (time_is_before_jiffies(end))
++timedout;
} else {
/* wait_event_timeout does not guarantee a delay of at
* least one whole jiffie, so timeout must be no less
* than two.
*/
if (timeout < 2)
timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
orion_mdio_smi_is_done(dev),
timeout);

Просмотреть файл

@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
u32 seq_number;
u8 vhdr_len = 0;
if (unlikely(ring > adapter->max_rds_rings))
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
if (unlikely(index > rds_ring->num_desc))
if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];

Просмотреть файл

@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
if (netif_msg_hw(priv)) {
if (priv->dma_cap.time_stamp) {
pr_debug("IEEE 1588-2002 Time Stamp supported\n");
priv->adv_ts = 0;
}
if (priv->dma_cap.atime_stamp && priv->extend_desc) {
pr_debug
("IEEE 1588-2008 Advanced Time Stamp supported\n");
priv->adv_ts = 1;
}
}
priv->adv_ts = 0;
if (priv->dma_cap.atime_stamp && priv->extend_desc)
priv->adv_ts = 1;
if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
pr_debug("IEEE 1588-2002 Time Stamp supported\n");
if (netif_msg_hw(priv) && priv->adv_ts)
pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;

Просмотреть файл

@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
priv->hw->ptp->config_addend(priv->ioaddr, addend);
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}

Просмотреть файл

@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
else if (phy->speed == 10)
mac_control |= BIT(18); /* In Band mode */
*link = true;
} else {
@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
dev_name(priv->dev), priv)) {
dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}

Просмотреть файл

@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;

Просмотреть файл

@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCYAMGCFG:
memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;

Просмотреть файл

@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
if (!net) {
netdev_err(net, "got receive callback but net device"
" not initialized yet\n");
if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
@ -435,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
free_netdev(net);
goto out;
}
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
unregister_netdev(net);
free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
@ -456,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
netif_carrier_on(net);
out:
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
free_netdev(net);
}
return ret;
}

Просмотреть файл

@ -690,8 +690,19 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
netdev_features_t mask;
return features & (vlan->set_features | ~MACVLAN_FEATURES);
features |= NETIF_F_ALL_FOR_ALL;
features &= (vlan->set_features | ~MACVLAN_FEATURES);
mask = features;
features = netdev_increment_features(vlan->lowerdev->features,
features,
mask);
if (!vlan->fwd_priv)
features |= NETIF_F_LLTX;
return features;
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
vlan->dev->features = dev->features & MACVLAN_FEATURES;
vlan->dev->gso_max_size = dev->gso_max_size;
netdev_features_change(vlan->dev);
netdev_update_features(vlan->dev);
}
break;
case NETDEV_UNREGISTER:

Просмотреть файл

@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
int err = 0;
atomic_set(&phydev->irq_disable, 0);
if (request_irq(phydev->irq, phy_interrupt,
IRQF_SHARED,
"phy_interrupt",
phydev) < 0) {
if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;

Просмотреть файл

@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
module will be called cdc_mbim.
config USB_NET_DM9601
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
tristate "Davicom DM96xx based USB 10/100 ethernet devices"
depends on USB_USBNET
select CRC32
help
This option adds support for Davicom DM9601 based USB 1.1
10/100 Ethernet adapters.
This option adds support for Davicom DM9601/DM9620/DM9621A
based USB 10/100 Ethernet adapters.
config USB_NET_SR9700
tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
* Davicom DM96xx USB 10/100Mbps ethernet devices
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
/* dm9620/21a require room for 4 byte padding, even in dm9601
* mode, so we need +1 to be able to receive full size
* ethernet frames.
*/
dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read;
@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
int len;
int len, pad;
/* format:
b1: packet length low
@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
b3..n: packet data
*/
len = skb->len;
len = skb->len + DM_TX_OVERHEAD;
if (skb_headroom(skb) < DM_TX_OVERHEAD) {
/* workaround for dm962x errata with tx fifo getting out of
* sync if a USB bulk transfer retry happens right after a
* packet with odd / maxpacket length by adding up to 3 bytes
* padding.
*/
while ((len & 1) || !(len % dev->maxpacket))
len++;
len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
pad = len - skb->len;
if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
__skb_push(skb, DM_TX_OVERHEAD);
/* usbnet adds padding if length is a multiple of packet size
if so, adjust length value in header */
if ((skb->len % dev->maxpacket) == 0)
len++;
if (pad) {
memset(skb->data + skb->len, 0, pad);
__skb_put(skb, pad);
}
skb->data[0] = len;
skb->data[1] = len >> 8;
@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
}
static const struct driver_info dm9601_info = {
.description = "Davicom DM9601 USB Ethernet",
.description = "Davicom DM96xx USB 10/100 Ethernet",
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.bind = dm9601_bind,
.rx_fixup = dm9601_rx_fixup,
@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{}, // END
};
@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
mask2 |= ATH9K_INT_CST;
if (isr2 & AR_ISR_S2_TSFOOR)
mask2 |= ATH9K_INT_TSFOOR;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S2, isr2);
isr &= ~AR_ISR_BCNMISC;
}
}
isr = REG_READ(ah, AR_ISR_RAC);
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
isr = REG_READ(ah, AR_ISR_RAC);
if (isr == 0xffffffff) {
*masked = 0;
return false;
@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= ATH9K_INT_TX;
s0_s = REG_READ(ah, AR_ISR_S0_S);
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
s0_s = REG_READ(ah, AR_ISR_S0_S);
s1_s = REG_READ(ah, AR_ISR_S1_S);
} else {
s0_s = REG_READ(ah, AR_ISR_S0);
REG_WRITE(ah, AR_ISR_S0, s0_s);
s1_s = REG_READ(ah, AR_ISR_S1);
REG_WRITE(ah, AR_ISR_S1, s1_s);
isr &= ~(AR_ISR_TXOK |
AR_ISR_TXDESC |
AR_ISR_TXERR |
AR_ISR_TXEOL);
}
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
s1_s = REG_READ(ah, AR_ISR_S1_S);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
}
@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= mask2;
}
if (AR_SREV_9100(ah))
return true;
if (isr & AR_ISR_GENTMR) {
if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
u32 s5_s;
s5_s = REG_READ(ah, AR_ISR_S5_S);
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
s5_s = REG_READ(ah, AR_ISR_S5_S);
} else {
s5_s = REG_READ(ah, AR_ISR_S5);
}
ah->intr_gen_timer_trigger =
MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
*masked |= ATH9K_INT_TIM_TIMER;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S5, s5_s);
isr &= ~AR_ISR_GENTMR;
}
}
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR, isr);
REG_READ(ah, AR_ISR);
}
if (AR_SREV_9100(ah))
return true;
if (sync_cause) {
ath9k_debug_sync_cause(common, sync_cause);
fatal_int =

Просмотреть файл

@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath9k_vif_iter_data *iter_data = data;
int i;
for (i = 0; i < ETH_ALEN; i++)
iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
if (iter_data->hw_macaddr != NULL) {
for (i = 0; i < ETH_ALEN; i++)
iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
} else {
iter_data->hw_macaddr = mac;
}
}
static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_vif_iter_data iter_data;
/*
* Use the hardware MAC address as reference, the hardware uses it
* together with the BSSID mask when matching addresses.
* Pick the MAC address of the first interface as the new hardware
* MAC address. The hardware will use it together with the BSSID mask
* when matching addresses.
*/
iter_data.hw_macaddr = common->macaddr;
iter_data.hw_macaddr = NULL;
memset(&iter_data.mask, 0xff, ETH_ALEN);
if (vif)
@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
ath9k_htc_bssid_iter, &iter_data);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
if (iter_data.hw_macaddr)
memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
ath_hw_setbssidmask(common);
}
@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
goto out;
}
ath9k_htc_set_bssid_mask(priv, vif);
ath9k_htc_set_mac_bssid_mask(priv, vif);
priv->vif_slot |= (1 << avp->index);
priv->nvifs++;
@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
ath9k_htc_set_bssid_mask(priv, vif);
ath9k_htc_set_mac_bssid_mask(priv, vif);
/*
* Stop ANI only if there are no associated station interfaces.

Просмотреть файл

@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(ah);
/*
* Use the hardware MAC address as reference, the hardware uses it
* together with the BSSID mask when matching addresses.
* Pick the MAC address of the first interface as the new hardware
* MAC address. The hardware will use it together with the BSSID mask
* when matching addresses.
*/
memset(iter_data, 0, sizeof(*iter_data));
memset(&iter_data->mask, 0xff, ETH_ALEN);

Просмотреть файл

@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
if (rtlpci->driver_is_goingto_unload)
return;
/*RX NORMAL PKT */
while (count--) {
/*rx descriptor */
@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
*/
set_hal_stop(rtlhal);
rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->disable_interrupt(hw);
cancel_work_sync(&rtlpriv->works.lps_change_work);
@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
ppsc->rfchange_inprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->hw_disable(hw);
/* some things are not needed if firmware not available */
if (!rtlpriv->max_fw_size)

Просмотреть файл

@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
/* It's possible for an skb to have a maximal number of frags
* but still be less than MAX_BUFFER_OFFSET in size. Thus the
* worst-case number of copy operations is MAX_SKB_FRAGS per
* ring slot.
*/
#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@ -143,13 +150,13 @@ struct xenvif {
*/
RING_IDX rx_req_cons_peek;
/* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
* head/fragment page uses 2 copy operations because it
* straddles two buffers in the frontend.
*/
struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
/* We create one meta structure per ring request we consume, so
* the maximum number is the same as the ring size.
*/
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];

Просмотреть файл

@ -307,6 +307,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
MAX_GRANT_COPY_OPS);
if (vif->grant_copy_op == NULL) {
pr_warn("Could not allocate grant copy space for %s\n", name);
free_netdev(dev);
return ERR_PTR(-ENOMEM);
}
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
@ -487,6 +496,7 @@ void xenvif_free(struct xenvif *vif)
unregister_netdev(vif->dev);
vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);

Просмотреть файл

@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
if (!npo.copy_prod)
return;
BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
@ -1209,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check)))
offsetof(struct tcphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate_partial_csum)
tcp_hdr(skb)->check =
@ -1227,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check)))
offsetof(struct udphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate_partial_csum)
udp_hdr(skb)->check =
@ -1350,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check)))
offsetof(struct tcphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate_partial_csum)
tcp_hdr(skb)->check =
@ -1368,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (!skb_partial_csum_set(skb, off,
offsetof(struct udphdr, check)))
offsetof(struct udphdr, check))) {
err = -EPROTO;
goto out;
}
if (recalculate_partial_csum)
udp_hdr(skb)->check =

Просмотреть файл

@ -20,7 +20,7 @@ config OF_SELFTEST
depends on OF_IRQ
help
This option builds in test cases for the device tree infrastructure
that are executed one at boot time, and the results dumped to the
that are executed once at boot time, and the results dumped to the
console.
If unsure, say N here, but this option is safe to enable.

Просмотреть файл

@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
/*
* If the number of address cells is larger than 2 we assume the
* mapping doesn't specify a physical address. Rather, the address
* specifies an identifier that must match exactly.
*/
if (na > 2 && memcmp(range, addr, na * 4) != 0)
return OF_BAD_ADDR;
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;

Просмотреть файл

@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
*/
void __init unflatten_and_copy_device_tree(void)
{
int size = __be32_to_cpu(initial_boot_params->totalsize);
void *dt = early_init_dt_alloc_memory_arch(size,
int size;
void *dt;
if (!initial_boot_params) {
pr_warn("No valid device tree found, continuing without\n");
return;
}
size = __be32_to_cpu(initial_boot_params->totalsize);
dt = early_init_dt_alloc_memory_arch(size,
__alignof__(struct boot_param_header));
if (dt) {

Просмотреть файл

@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
of_node_put(old);
return 0;
}
@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* Successfully parsed an interrrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
of_node_put(out_irq->np);
out_irq->np = of_node_get(newpar);
out_irq->np = newpar;
match_array = imap - newaddrsize - newintsize;
for (i = 0; i < newintsize; i++)
@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
}
fail:
of_node_put(ipar);
of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;

Просмотреть файл

@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
if (status != AE_NOT_FOUND)
acpi_handle_warn(handle,
"can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
}
static bool acpiphp_no_hotplug(acpi_handle handle)
{
struct acpi_device *adev = NULL;
acpi_bus_get_device(handle, &adev);
return adev && adev->flags.no_hotplug;
}
static bool slot_no_hotplug(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
list_for_each_entry(func, &slot->funcs, sibling)
if (acpiphp_no_hotplug(func_to_handle(func)))
return true;
return false;
}
/**
* get_slot_status - get ACPI slot status
@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
|| acpiphp_no_hotplug(handle);
}
if (!alive) {
u32 v;
@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
struct pci_dev *dev, *tmp;
mutex_lock(&slot->crit_sect);
/* wake up all functions */
if (get_slot_status(slot) == ACPI_STA_ALL) {
if (slot_no_hotplug(slot)) {
; /* do nothing */
} else if (get_slot_status(slot) == ACPI_STA_ALL) {
/* remove stale devices if any */
list_for_each_entry_safe(dev, tmp, &bus->devices,
bus_list)

Просмотреть файл

@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
struct acpi_device *adev = ACPI_COMPANION(dev);
if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
if (!adev)
return;
pci_acpi_add_pm_notifier(adev, pci_dev);
if (!adev->wakeup.flags.valid)
return;
device_set_wakeup_capable(dev, true);
acpi_pci_sleep_wake(pci_dev, false);
pci_acpi_add_pm_notifier(adev, pci_dev);
if (adev->wakeup.flags.run_wake)
device_set_run_wake(dev, true);
}
static void pci_acpi_cleanup(struct device *dev)
{
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
if (!adev)
return;
pci_acpi_remove_pm_notifier(adev);
if (adev->wakeup.flags.valid) {
device_set_wakeup_capable(dev, false);
device_set_run_wake(dev, false);
pci_acpi_remove_pm_notifier(adev);
}
}

Просмотреть файл

@ -196,6 +196,7 @@ config BATTERY_MAX17040
config BATTERY_MAX17042
tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
select REGMAP_I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured

Просмотреть файл

@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
dev_set_drvdata(dev, psy);
psy->dev = dev;
rc = dev_set_name(dev, "%s", psy->name);
if (rc)
goto dev_set_name_failed;
INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_check_supplies(psy);
@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto wakeup_init_failed;
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
rc = device_add(dev);
if (rc)
goto device_add_failed;
@ -553,11 +553,11 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
wakeup_init_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
wakeup_init_failed:
check_supplies_failed:
dev_set_name_failed:
put_device(dev);
success:
return rc;

Просмотреть файл

@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);

Просмотреть файл

@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
const unsigned char *path,
struct cifs_sb_info *cifs_sb, unsigned int xid);
extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr,
const unsigned char *path);
extern int mdfour(unsigned char *, unsigned char *, int);
extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
const struct nls_table *codepage);

Просмотреть файл

@ -4010,7 +4010,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@ -4179,7 +4179,7 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@ -4263,7 +4263,7 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);

Просмотреть файл

@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
static int
cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
struct tcon_link *tlink, unsigned oflags, umode_t mode,
__u32 *oplock, struct cifs_fid *fid, int *created)
__u32 *oplock, struct cifs_fid *fid)
{
int rc = -ENOENT;
int create_options = CREATE_NOT_DIR;
@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
.device = 0,
};
*created |= FILE_CREATED;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = current_fsuid();
if (inode->i_mode & S_ISGID)
@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
cifs_add_pending_open(&fid, tlink, &open);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
&oplock, &fid, opened);
&oplock, &fid);
if (rc) {
cifs_del_pending_open(&open);
goto out;
}
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
*opened |= FILE_CREATED;
rc = finish_open(file, direntry, generic_file_open, opened);
if (rc) {
if (server->ops->close)
@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
struct TCP_Server_Info *server;
struct cifs_fid fid;
__u32 oplock;
int created = FILE_CREATED;
cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
inode, direntry->d_name.name, direntry);
@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
server->ops->new_lease_key(&fid);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
&oplock, &fid, &created);
&oplock, &fid);
if (!rc && server->ops->close)
server->ops->close(xid, tcon, &fid);

Просмотреть файл

@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
full_path);
if (tmprc)
cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
}
@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
full_path);
if (tmprc)
cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
}

Просмотреть файл

@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
int
CIFSCheckMFSymlink(struct cifs_fattr *fattr,
const unsigned char *path,
struct cifs_sb_info *cifs_sb, unsigned int xid)
CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
const unsigned char *path)
{
int rc = 0;
int rc;
u8 *buf = NULL;
unsigned int link_len = 0;
unsigned int bytes_read = 0;
struct cifs_tcon *ptcon;
if (!CIFSCouldBeMFSymlink(fattr))
/* it's not a symlink */
return 0;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf) {
rc = -ENOMEM;
goto out;
}
if (!buf)
return -ENOMEM;
ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
&bytes_read, cifs_sb, xid);
if (tcon->ses->server->ops->query_mf_symlink)
rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
&bytes_read, cifs_sb, xid);
else
goto out;
rc = -ENOSYS;
if (rc != 0)
if (rc)
goto out;
if (bytes_read == 0) /* not a symlink */

Просмотреть файл

@ -1907,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
}
}
}
if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
tep = tf.file->private_data;
mutex_lock_nested(&tep->mtx, 1);
}
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше