Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (47 commits) doc: CONFIG_UNEVICTABLE_LRU doesn't exist anymore Update cpuset info & webiste for cgroups dcdbas: force SMI to happen when expected arch/arm/Kconfig: remove one to many l's in the word. asm-generic/user.h: Fix spelling in comment drm: fix printk typo 'sracth' Remove one to many n's in a word Documentation/filesystems/romfs.txt: fixing link to genromfs drivers:scsi Change printk typo initate -> initiate serial, pch uart: Remove duplicate inclusion of linux/pci.h header fs/eventpoll.c: fix spelling mm: Fix out-of-date comments which refers non-existent functions drm: Fix printk typo 'failled' coh901318.c: Change initate to initiate. mbox-db5500.c Change initate to initiate. edac: correct i82975x error-info reported edac: correct i82975x mci initialisation edac: correct commented info fs: update comments to point correct document target: remove duplicate include of target/target_core_device.h from drivers/target/target_core_hba.c ... Trivial conflict in fs/eventpoll.c (spelling vs addition)
This commit is contained in:
Коммит
e16b396ce3
|
@ -693,7 +693,7 @@ There are ways to query or modify cpusets:
|
|||
- via the C library libcgroup.
|
||||
(http://sourceforge.net/projects/libcg/)
|
||||
- via the python application cset.
|
||||
(http://developer.novell.com/wiki/index.php/Cpuset)
|
||||
(http://code.google.com/p/cpuset/)
|
||||
|
||||
The sched_setaffinity calls can also be done at the shell prompt using
|
||||
SGI's runon or Robert Love's taskset. The mbind and set_mempolicy
|
||||
|
@ -725,13 +725,14 @@ Now you want to do something with this cpuset.
|
|||
|
||||
In this directory you can find several files:
|
||||
# ls
|
||||
cpuset.cpu_exclusive cpuset.memory_spread_slab
|
||||
cpuset.cpus cpuset.mems
|
||||
cpuset.mem_exclusive cpuset.sched_load_balance
|
||||
cpuset.mem_hardwall cpuset.sched_relax_domain_level
|
||||
cpuset.memory_migrate notify_on_release
|
||||
cpuset.memory_pressure tasks
|
||||
cpuset.memory_spread_page
|
||||
cgroup.clone_children cpuset.memory_pressure
|
||||
cgroup.event_control cpuset.memory_spread_page
|
||||
cgroup.procs cpuset.memory_spread_slab
|
||||
cpuset.cpu_exclusive cpuset.mems
|
||||
cpuset.cpus cpuset.sched_load_balance
|
||||
cpuset.mem_exclusive cpuset.sched_relax_domain_level
|
||||
cpuset.mem_hardwall notify_on_release
|
||||
cpuset.memory_migrate tasks
|
||||
|
||||
Reading them will give you information about the state of this cpuset:
|
||||
the CPUs and Memory Nodes it can use, the processes that are using
|
||||
|
|
|
@ -485,8 +485,9 @@ The feature can be disabled by
|
|||
|
||||
# echo 0 > memory.use_hierarchy
|
||||
|
||||
NOTE1: Enabling/disabling will fail if the cgroup already has other
|
||||
cgroups created below it.
|
||||
NOTE1: Enabling/disabling will fail if either the cgroup already has other
|
||||
cgroups created below it, or if the parent cgroup has use_hierarchy
|
||||
enabled.
|
||||
|
||||
NOTE2: When panic_on_oom is set to "2", the whole system will panic in
|
||||
case of an OOM event in any cgroup.
|
||||
|
|
|
@ -41,7 +41,7 @@ Example scripts
|
|||
===============
|
||||
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
|
||||
encryption with dm-crypt using the 'cryptsetup' utility, see
|
||||
http://clemens.endorphin.org/cryptography
|
||||
http://code.google.com/p/cryptsetup/
|
||||
|
||||
[[
|
||||
#!/bin/sh
|
||||
|
|
|
@ -17,8 +17,7 @@ comparison, an actual rescue disk used up 3202 blocks with ext2, while
|
|||
with romfs, it needed 3079 blocks.
|
||||
|
||||
To create such a file system, you'll need a user program named
|
||||
genromfs. It is available via anonymous ftp on sunsite.unc.edu and
|
||||
its mirrors, in the /pub/Linux/system/recovery/ directory.
|
||||
genromfs. It is available on http://romfs.sourceforge.net/
|
||||
|
||||
As the name suggests, romfs could be also used (space-efficiently) on
|
||||
various read-only media, like (E)EPROM disks if someone will have the
|
||||
|
|
|
@ -146,7 +146,7 @@ INSTALL_MOD_STRIP
|
|||
INSTALL_MOD_STRIP, if defined, will cause modules to be
|
||||
stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
|
||||
the default option --strip-debug will be used. Otherwise,
|
||||
INSTALL_MOD_STRIP will used as the options to the strip command.
|
||||
INSTALL_MOD_STRIP value will be used as the options to the strip command.
|
||||
|
||||
INSTALL_FW_PATH
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -1325,7 +1325,8 @@ The top Makefile exports the following variables:
|
|||
If this variable is specified, will cause modules to be stripped
|
||||
after they are installed. If INSTALL_MOD_STRIP is '1', then the
|
||||
default option --strip-debug will be used. Otherwise,
|
||||
INSTALL_MOD_STRIP will used as the option(s) to the strip command.
|
||||
INSTALL_MOD_STRIP value will be used as the option(s) to the strip
|
||||
command.
|
||||
|
||||
|
||||
=== 9 Makefile language
|
||||
|
|
|
@ -166,7 +166,7 @@ Returns: 0 on success, -1 on error
|
|||
|
||||
This ioctl is obsolete and has been removed.
|
||||
|
||||
4.6 KVM_CREATE_VCPU
|
||||
4.7 KVM_CREATE_VCPU
|
||||
|
||||
Capability: basic
|
||||
Architectures: all
|
||||
|
@ -177,7 +177,7 @@ Returns: vcpu fd on success, -1 on error
|
|||
This API adds a vcpu to a virtual machine. The vcpu id is a small integer
|
||||
in the range [0, max_vcpus).
|
||||
|
||||
4.7 KVM_GET_DIRTY_LOG (vm ioctl)
|
||||
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -200,7 +200,7 @@ since the last call to this ioctl. Bit 0 is the first page in the
|
|||
memory slot. Ensure the entire structure is cleared to avoid padding
|
||||
issues.
|
||||
|
||||
4.8 KVM_SET_MEMORY_ALIAS
|
||||
4.9 KVM_SET_MEMORY_ALIAS
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -210,7 +210,7 @@ Returns: 0 (success), -1 (error)
|
|||
|
||||
This ioctl is obsolete and has been removed.
|
||||
|
||||
4.9 KVM_RUN
|
||||
4.10 KVM_RUN
|
||||
|
||||
Capability: basic
|
||||
Architectures: all
|
||||
|
@ -226,7 +226,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
|
|||
KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct
|
||||
kvm_run' (see below).
|
||||
|
||||
4.10 KVM_GET_REGS
|
||||
4.11 KVM_GET_REGS
|
||||
|
||||
Capability: basic
|
||||
Architectures: all
|
||||
|
@ -246,7 +246,7 @@ struct kvm_regs {
|
|||
__u64 rip, rflags;
|
||||
};
|
||||
|
||||
4.11 KVM_SET_REGS
|
||||
4.12 KVM_SET_REGS
|
||||
|
||||
Capability: basic
|
||||
Architectures: all
|
||||
|
@ -258,7 +258,7 @@ Writes the general purpose registers into the vcpu.
|
|||
|
||||
See KVM_GET_REGS for the data structure.
|
||||
|
||||
4.12 KVM_GET_SREGS
|
||||
4.13 KVM_GET_SREGS
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -283,7 +283,7 @@ interrupt_bitmap is a bitmap of pending external interrupts. At most
|
|||
one bit may be set. This interrupt has been acknowledged by the APIC
|
||||
but not yet injected into the cpu core.
|
||||
|
||||
4.13 KVM_SET_SREGS
|
||||
4.14 KVM_SET_SREGS
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -294,7 +294,7 @@ Returns: 0 on success, -1 on error
|
|||
Writes special registers into the vcpu. See KVM_GET_SREGS for the
|
||||
data structures.
|
||||
|
||||
4.14 KVM_TRANSLATE
|
||||
4.15 KVM_TRANSLATE
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -317,7 +317,7 @@ struct kvm_translation {
|
|||
__u8 pad[5];
|
||||
};
|
||||
|
||||
4.15 KVM_INTERRUPT
|
||||
4.16 KVM_INTERRUPT
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86, ppc
|
||||
|
@ -365,7 +365,7 @@ c) KVM_INTERRUPT_SET_LEVEL
|
|||
Note that any value for 'irq' other than the ones stated above is invalid
|
||||
and incurs unexpected behavior.
|
||||
|
||||
4.16 KVM_DEBUG_GUEST
|
||||
4.17 KVM_DEBUG_GUEST
|
||||
|
||||
Capability: basic
|
||||
Architectures: none
|
||||
|
@ -375,7 +375,7 @@ Returns: -1 on error
|
|||
|
||||
Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
|
||||
|
||||
4.17 KVM_GET_MSRS
|
||||
4.18 KVM_GET_MSRS
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -403,7 +403,7 @@ Application code should set the 'nmsrs' member (which indicates the
|
|||
size of the entries array) and the 'index' member of each array entry.
|
||||
kvm will fill in the 'data' member.
|
||||
|
||||
4.18 KVM_SET_MSRS
|
||||
4.19 KVM_SET_MSRS
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -418,7 +418,7 @@ Application code should set the 'nmsrs' member (which indicates the
|
|||
size of the entries array), and the 'index' and 'data' members of each
|
||||
array entry.
|
||||
|
||||
4.19 KVM_SET_CPUID
|
||||
4.20 KVM_SET_CPUID
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -446,7 +446,7 @@ struct kvm_cpuid {
|
|||
struct kvm_cpuid_entry entries[0];
|
||||
};
|
||||
|
||||
4.20 KVM_SET_SIGNAL_MASK
|
||||
4.21 KVM_SET_SIGNAL_MASK
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -468,7 +468,7 @@ struct kvm_signal_mask {
|
|||
__u8 sigset[0];
|
||||
};
|
||||
|
||||
4.21 KVM_GET_FPU
|
||||
4.22 KVM_GET_FPU
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -493,7 +493,7 @@ struct kvm_fpu {
|
|||
__u32 pad2;
|
||||
};
|
||||
|
||||
4.22 KVM_SET_FPU
|
||||
4.23 KVM_SET_FPU
|
||||
|
||||
Capability: basic
|
||||
Architectures: x86
|
||||
|
@ -518,7 +518,7 @@ struct kvm_fpu {
|
|||
__u32 pad2;
|
||||
};
|
||||
|
||||
4.23 KVM_CREATE_IRQCHIP
|
||||
4.24 KVM_CREATE_IRQCHIP
|
||||
|
||||
Capability: KVM_CAP_IRQCHIP
|
||||
Architectures: x86, ia64
|
||||
|
@ -531,7 +531,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
|
|||
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
|
||||
only go to the IOAPIC. On ia64, a IOSAPIC is created.
|
||||
|
||||
4.24 KVM_IRQ_LINE
|
||||
4.25 KVM_IRQ_LINE
|
||||
|
||||
Capability: KVM_CAP_IRQCHIP
|
||||
Architectures: x86, ia64
|
||||
|
@ -552,7 +552,7 @@ struct kvm_irq_level {
|
|||
__u32 level; /* 0 or 1 */
|
||||
};
|
||||
|
||||
4.25 KVM_GET_IRQCHIP
|
||||
4.26 KVM_GET_IRQCHIP
|
||||
|
||||
Capability: KVM_CAP_IRQCHIP
|
||||
Architectures: x86, ia64
|
||||
|
@ -573,7 +573,7 @@ struct kvm_irqchip {
|
|||
} chip;
|
||||
};
|
||||
|
||||
4.26 KVM_SET_IRQCHIP
|
||||
4.27 KVM_SET_IRQCHIP
|
||||
|
||||
Capability: KVM_CAP_IRQCHIP
|
||||
Architectures: x86, ia64
|
||||
|
@ -594,7 +594,7 @@ struct kvm_irqchip {
|
|||
} chip;
|
||||
};
|
||||
|
||||
4.27 KVM_XEN_HVM_CONFIG
|
||||
4.28 KVM_XEN_HVM_CONFIG
|
||||
|
||||
Capability: KVM_CAP_XEN_HVM
|
||||
Architectures: x86
|
||||
|
@ -618,7 +618,7 @@ struct kvm_xen_hvm_config {
|
|||
__u8 pad2[30];
|
||||
};
|
||||
|
||||
4.27 KVM_GET_CLOCK
|
||||
4.29 KVM_GET_CLOCK
|
||||
|
||||
Capability: KVM_CAP_ADJUST_CLOCK
|
||||
Architectures: x86
|
||||
|
@ -636,7 +636,7 @@ struct kvm_clock_data {
|
|||
__u32 pad[9];
|
||||
};
|
||||
|
||||
4.28 KVM_SET_CLOCK
|
||||
4.30 KVM_SET_CLOCK
|
||||
|
||||
Capability: KVM_CAP_ADJUST_CLOCK
|
||||
Architectures: x86
|
||||
|
@ -654,7 +654,7 @@ struct kvm_clock_data {
|
|||
__u32 pad[9];
|
||||
};
|
||||
|
||||
4.29 KVM_GET_VCPU_EVENTS
|
||||
4.31 KVM_GET_VCPU_EVENTS
|
||||
|
||||
Capability: KVM_CAP_VCPU_EVENTS
|
||||
Extended by: KVM_CAP_INTR_SHADOW
|
||||
|
@ -693,7 +693,7 @@ struct kvm_vcpu_events {
|
|||
KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
|
||||
interrupt.shadow contains a valid state. Otherwise, this field is undefined.
|
||||
|
||||
4.30 KVM_SET_VCPU_EVENTS
|
||||
4.32 KVM_SET_VCPU_EVENTS
|
||||
|
||||
Capability: KVM_CAP_VCPU_EVENTS
|
||||
Extended by: KVM_CAP_INTR_SHADOW
|
||||
|
@ -719,7 +719,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
|
|||
the flags field to signal that interrupt.shadow contains a valid state and
|
||||
shall be written into the VCPU.
|
||||
|
||||
4.32 KVM_GET_DEBUGREGS
|
||||
4.33 KVM_GET_DEBUGREGS
|
||||
|
||||
Capability: KVM_CAP_DEBUGREGS
|
||||
Architectures: x86
|
||||
|
@ -737,7 +737,7 @@ struct kvm_debugregs {
|
|||
__u64 reserved[9];
|
||||
};
|
||||
|
||||
4.33 KVM_SET_DEBUGREGS
|
||||
4.34 KVM_SET_DEBUGREGS
|
||||
|
||||
Capability: KVM_CAP_DEBUGREGS
|
||||
Architectures: x86
|
||||
|
@ -750,7 +750,7 @@ Writes debug registers into the vcpu.
|
|||
See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
|
||||
yet and must be cleared on entry.
|
||||
|
||||
4.34 KVM_SET_USER_MEMORY_REGION
|
||||
4.35 KVM_SET_USER_MEMORY_REGION
|
||||
|
||||
Capability: KVM_CAP_USER_MEM
|
||||
Architectures: all
|
||||
|
@ -796,7 +796,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
|
|||
The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
|
||||
allocation and is deprecated.
|
||||
|
||||
4.35 KVM_SET_TSS_ADDR
|
||||
4.36 KVM_SET_TSS_ADDR
|
||||
|
||||
Capability: KVM_CAP_SET_TSS_ADDR
|
||||
Architectures: x86
|
||||
|
@ -814,7 +814,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
|
|||
because of a quirk in the virtualization implementation (see the internals
|
||||
documentation when it pops into existence).
|
||||
|
||||
4.36 KVM_ENABLE_CAP
|
||||
4.37 KVM_ENABLE_CAP
|
||||
|
||||
Capability: KVM_CAP_ENABLE_CAP
|
||||
Architectures: ppc
|
||||
|
@ -849,7 +849,7 @@ function properly, this is the place to put them.
|
|||
__u8 pad[64];
|
||||
};
|
||||
|
||||
4.37 KVM_GET_MP_STATE
|
||||
4.38 KVM_GET_MP_STATE
|
||||
|
||||
Capability: KVM_CAP_MP_STATE
|
||||
Architectures: x86, ia64
|
||||
|
@ -879,7 +879,7 @@ Possible values are:
|
|||
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
|
||||
irqchip, the multiprocessing state must be maintained by userspace.
|
||||
|
||||
4.38 KVM_SET_MP_STATE
|
||||
4.39 KVM_SET_MP_STATE
|
||||
|
||||
Capability: KVM_CAP_MP_STATE
|
||||
Architectures: x86, ia64
|
||||
|
@ -893,7 +893,7 @@ arguments.
|
|||
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
|
||||
irqchip, the multiprocessing state must be maintained by userspace.
|
||||
|
||||
4.39 KVM_SET_IDENTITY_MAP_ADDR
|
||||
4.40 KVM_SET_IDENTITY_MAP_ADDR
|
||||
|
||||
Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
|
||||
Architectures: x86
|
||||
|
@ -911,7 +911,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
|
|||
because of a quirk in the virtualization implementation (see the internals
|
||||
documentation when it pops into existence).
|
||||
|
||||
4.40 KVM_SET_BOOT_CPU_ID
|
||||
4.41 KVM_SET_BOOT_CPU_ID
|
||||
|
||||
Capability: KVM_CAP_SET_BOOT_CPU_ID
|
||||
Architectures: x86, ia64
|
||||
|
@ -923,7 +923,7 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
|
|||
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
|
||||
is vcpu 0.
|
||||
|
||||
4.41 KVM_GET_XSAVE
|
||||
4.42 KVM_GET_XSAVE
|
||||
|
||||
Capability: KVM_CAP_XSAVE
|
||||
Architectures: x86
|
||||
|
@ -937,7 +937,7 @@ struct kvm_xsave {
|
|||
|
||||
This ioctl would copy current vcpu's xsave struct to the userspace.
|
||||
|
||||
4.42 KVM_SET_XSAVE
|
||||
4.43 KVM_SET_XSAVE
|
||||
|
||||
Capability: KVM_CAP_XSAVE
|
||||
Architectures: x86
|
||||
|
@ -951,7 +951,7 @@ struct kvm_xsave {
|
|||
|
||||
This ioctl would copy userspace's xsave struct to the kernel.
|
||||
|
||||
4.43 KVM_GET_XCRS
|
||||
4.44 KVM_GET_XCRS
|
||||
|
||||
Capability: KVM_CAP_XCRS
|
||||
Architectures: x86
|
||||
|
@ -974,7 +974,7 @@ struct kvm_xcrs {
|
|||
|
||||
This ioctl would copy current vcpu's xcrs to the userspace.
|
||||
|
||||
4.44 KVM_SET_XCRS
|
||||
4.45 KVM_SET_XCRS
|
||||
|
||||
Capability: KVM_CAP_XCRS
|
||||
Architectures: x86
|
||||
|
@ -997,7 +997,7 @@ struct kvm_xcrs {
|
|||
|
||||
This ioctl would set vcpu's xcr to the value userspace specified.
|
||||
|
||||
4.45 KVM_GET_SUPPORTED_CPUID
|
||||
4.46 KVM_GET_SUPPORTED_CPUID
|
||||
|
||||
Capability: KVM_CAP_EXT_CPUID
|
||||
Architectures: x86
|
||||
|
@ -1062,7 +1062,7 @@ emulate them efficiently. The fields in each entry are defined as follows:
|
|||
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
|
||||
this function/index combination
|
||||
|
||||
4.46 KVM_PPC_GET_PVINFO
|
||||
4.47 KVM_PPC_GET_PVINFO
|
||||
|
||||
Capability: KVM_CAP_PPC_GET_PVINFO
|
||||
Architectures: ppc
|
||||
|
@ -1085,7 +1085,7 @@ of 4 instructions that make up a hypercall.
|
|||
If any additional field gets added to this structure later on, a bit for that
|
||||
additional piece of information will be set in the flags bitmap.
|
||||
|
||||
4.47 KVM_ASSIGN_PCI_DEVICE
|
||||
4.48 KVM_ASSIGN_PCI_DEVICE
|
||||
|
||||
Capability: KVM_CAP_DEVICE_ASSIGNMENT
|
||||
Architectures: x86 ia64
|
||||
|
@ -1113,7 +1113,7 @@ following flags are specified:
|
|||
/* Depends on KVM_CAP_IOMMU */
|
||||
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
|
||||
|
||||
4.48 KVM_DEASSIGN_PCI_DEVICE
|
||||
4.49 KVM_DEASSIGN_PCI_DEVICE
|
||||
|
||||
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
|
||||
Architectures: x86 ia64
|
||||
|
@ -1126,7 +1126,7 @@ Ends PCI device assignment, releasing all associated resources.
|
|||
See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
|
||||
used in kvm_assigned_pci_dev to identify the device.
|
||||
|
||||
4.49 KVM_ASSIGN_DEV_IRQ
|
||||
4.50 KVM_ASSIGN_DEV_IRQ
|
||||
|
||||
Capability: KVM_CAP_ASSIGN_DEV_IRQ
|
||||
Architectures: x86 ia64
|
||||
|
@ -1164,7 +1164,7 @@ The following flags are defined:
|
|||
It is not valid to specify multiple types per host or guest IRQ. However, the
|
||||
IRQ type of host and guest can differ or can even be null.
|
||||
|
||||
4.50 KVM_DEASSIGN_DEV_IRQ
|
||||
4.51 KVM_DEASSIGN_DEV_IRQ
|
||||
|
||||
Capability: KVM_CAP_ASSIGN_DEV_IRQ
|
||||
Architectures: x86 ia64
|
||||
|
@ -1178,7 +1178,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
|
|||
by assigned_dev_id, flags must correspond to the IRQ type specified on
|
||||
KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
|
||||
|
||||
4.51 KVM_SET_GSI_ROUTING
|
||||
4.52 KVM_SET_GSI_ROUTING
|
||||
|
||||
Capability: KVM_CAP_IRQ_ROUTING
|
||||
Architectures: x86 ia64
|
||||
|
@ -1226,7 +1226,7 @@ struct kvm_irq_routing_msi {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
4.52 KVM_ASSIGN_SET_MSIX_NR
|
||||
4.53 KVM_ASSIGN_SET_MSIX_NR
|
||||
|
||||
Capability: KVM_CAP_DEVICE_MSIX
|
||||
Architectures: x86 ia64
|
||||
|
@ -1245,7 +1245,7 @@ struct kvm_assigned_msix_nr {
|
|||
|
||||
#define KVM_MAX_MSIX_PER_DEV 256
|
||||
|
||||
4.53 KVM_ASSIGN_SET_MSIX_ENTRY
|
||||
4.54 KVM_ASSIGN_SET_MSIX_ENTRY
|
||||
|
||||
Capability: KVM_CAP_DEVICE_MSIX
|
||||
Architectures: x86 ia64
|
||||
|
|
|
@ -367,7 +367,7 @@ the different loglevels.
|
|||
|
||||
- console_loglevel: messages with a higher priority than
|
||||
this will be printed to the console
|
||||
- default_message_level: messages without an explicit priority
|
||||
- default_message_loglevel: messages without an explicit priority
|
||||
will be printed with this priority
|
||||
- minimum_console_loglevel: minimum (highest) value to which
|
||||
console_loglevel can be set
|
||||
|
|
|
@ -84,8 +84,7 @@ indicate that the page is being managed on the unevictable list.
|
|||
|
||||
The PG_unevictable flag is analogous to, and mutually exclusive with, the
|
||||
PG_active flag in that it indicates on which LRU list a page resides when
|
||||
PG_lru is set. The unevictable list is compile-time configurable based on the
|
||||
UNEVICTABLE_LRU Kconfig option.
|
||||
PG_lru is set.
|
||||
|
||||
The Unevictable LRU infrastructure maintains unevictable pages on an additional
|
||||
LRU list for a few reasons:
|
||||
|
|
15
MAINTAINERS
15
MAINTAINERS
|
@ -2376,7 +2376,7 @@ F: include/linux/edac_mce.h
|
|||
|
||||
EDAC-I82975X
|
||||
M: Ranganathan Desikan <ravi@jetztechnologies.com>
|
||||
M: "Arvind R." <arvind@jetztechnologies.com>
|
||||
M: "Arvind R." <arvino55@gmail.com>
|
||||
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
|
||||
W: bluesmoke.sourceforge.net
|
||||
S: Maintained
|
||||
|
@ -3472,6 +3472,7 @@ F: net/ipx/
|
|||
IRDA SUBSYSTEM
|
||||
M: Samuel Ortiz <samuel@sortiz.org>
|
||||
L: irda-users@lists.sourceforge.net (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://irda.sourceforge.net/
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
|
||||
|
@ -3909,6 +3910,12 @@ L: linux-security-module@vger.kernel.org
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
|
||||
S: Supported
|
||||
|
||||
LIS3LV02D ACCELEROMETER DRIVER
|
||||
M: Eric Piel <eric.piel@tremplin-utc.net>
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/lis3lv02d
|
||||
F: drivers/hwmon/lis3lv02d.*
|
||||
|
||||
LLC (802.2)
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
S: Maintained
|
||||
|
@ -3916,12 +3923,6 @@ F: include/linux/llc.h
|
|||
F: include/net/llc*
|
||||
F: net/llc/
|
||||
|
||||
LIS3LV02D ACCELEROMETER DRIVER
|
||||
M: Eric Piel <eric.piel@tremplin-utc.net>
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/lis3lv02d
|
||||
F: drivers/hwmon/lis3lv02d.*
|
||||
|
||||
LM73 HARDWARE MONITOR DRIVER
|
||||
M: Guillaume Ligneul <guillaume.ligneul@gmail.com>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -666,7 +666,7 @@ export MODLIB
|
|||
# INSTALL_MOD_STRIP, if defined, will cause modules to be
|
||||
# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
|
||||
# the default option --strip-debug will be used. Otherwise,
|
||||
# INSTALL_MOD_STRIP will used as the options to the strip command.
|
||||
# INSTALL_MOD_STRIP value will be used as the options to the strip command.
|
||||
|
||||
ifdef INSTALL_MOD_STRIP
|
||||
ifeq ($(INSTALL_MOD_STRIP),1)
|
||||
|
|
|
@ -63,7 +63,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
|
|||
struct page *page, unsigned long addr, int len);
|
||||
#endif
|
||||
|
||||
/* This is used only in do_no_page and do_swap_page. */
|
||||
/* This is used only in __do_fault and do_swap_page. */
|
||||
#define flush_icache_page(vma, page) \
|
||||
flush_icache_user_range((vma), (page), 0, 0)
|
||||
|
||||
|
|
|
@ -498,7 +498,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
|
|||
#endif
|
||||
|
||||
dev_info(&(mbox->pdev->dev),
|
||||
"Mailbox driver with index %d initated!\n", mbox_id);
|
||||
"Mailbox driver with index %d initiated!\n", mbox_id);
|
||||
|
||||
exit:
|
||||
return mbox;
|
||||
|
|
|
@ -54,7 +54,7 @@ config OMAP_SMARTREFLEX
|
|||
user must write 1 to
|
||||
/debug/voltage/vdd_<X>/smartreflex/autocomp,
|
||||
where X is mpu or core for OMAP3.
|
||||
Optionallly autocompensation can be enabled in the kernel
|
||||
Optionally autocompensation can be enabled in the kernel
|
||||
by default during system init via the enable_on_init flag
|
||||
which an be passed as platform data to the smartreflex driver.
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
|||
}
|
||||
|
||||
/*
|
||||
* This one is called from do_no_page(), do_swap_page() and install_page().
|
||||
* This one is called from __do_fault() and do_swap_page().
|
||||
*/
|
||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
||||
{
|
||||
|
|
|
@ -241,7 +241,7 @@ flush_etrax_cacherange(void *startadr, int length)
|
|||
}
|
||||
|
||||
/* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers
|
||||
* will occationally corrupt certain CPU writes if the DMA buffers
|
||||
* will occasionally corrupt certain CPU writes if the DMA buffers
|
||||
* happen to be hot in the cache.
|
||||
*
|
||||
* As a workaround, we have to flush the relevant parts of the cache
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#define _ASM_IA64_PERFMON_H
|
||||
|
||||
/*
|
||||
* perfmon comamnds supported on all CPU models
|
||||
* perfmon commands supported on all CPU models
|
||||
*/
|
||||
#define PFM_WRITE_PMCS 0x01
|
||||
#define PFM_WRITE_PMDS 0x02
|
||||
|
|
|
@ -3983,7 +3983,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
|
|||
static __init int bad_ioapic(unsigned long address)
|
||||
{
|
||||
if (nr_ioapics >= MAX_IO_APICS) {
|
||||
printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
|
||||
printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
|
||||
"(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static int inline addr_increment(void)
|
||||
static inline int addr_increment(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return smp_num_siblings == 2 ? 2 : 1;
|
||||
|
|
|
@ -598,7 +598,7 @@ CONFIG_DEBUG_NOMMU_REGIONS=y
|
|||
# CONFIG_CONTEXT_SWITCH_TRACER is not set
|
||||
# CONFIG_BOOT_TRACER is not set
|
||||
# CONFIG_TRACE_BRANCH_PROFILING is not set
|
||||
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
|
||||
# CONFIG_DYNAMIC_DEBUG is not set
|
||||
# CONFIG_SAMPLES is not set
|
||||
|
||||
#
|
||||
|
|
|
@ -1031,7 +1031,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
|
|||
/* We now use the "submit_command" function to submit commands to
|
||||
the firestream. There is a define up near the definition of
|
||||
that routine that switches this routine between immediate write
|
||||
to the immediate comamnd registers and queuing the commands in
|
||||
to the immediate command registers and queuing the commands in
|
||||
the HPTXQ for execution. This last technique might be more
|
||||
efficient if we know we're going to submit a whole lot of
|
||||
commands in one go, but this driver is not setup to be able to
|
||||
|
|
|
@ -95,7 +95,7 @@ static unsigned long smart4_completed(ctlr_info_t *h)
|
|||
/*
|
||||
* This hardware returns interrupt pending at a different place and
|
||||
* it does not tell us if the fifo is empty, we will have check
|
||||
* that by getting a 0 back from the comamnd_completed call.
|
||||
* that by getting a 0 back from the command_completed call.
|
||||
*/
|
||||
static unsigned long smart4_intr_pending(ctlr_info_t *h)
|
||||
{
|
||||
|
|
|
@ -433,7 +433,7 @@ static void btusb_isoc_complete(struct urb *urb)
|
|||
}
|
||||
}
|
||||
|
||||
static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
|
||||
static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
|
||||
{
|
||||
int i, offset = 0;
|
||||
|
||||
|
@ -780,7 +780,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
|
|||
}
|
||||
}
|
||||
|
||||
static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting)
|
||||
static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
|
||||
{
|
||||
struct btusb_data *data = hdev->driver_data;
|
||||
struct usb_interface *intf = data->isoc;
|
||||
|
|
|
@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = {
|
|||
.release = cpuidle_state_sysfs_release,
|
||||
};
|
||||
|
||||
static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
|
||||
static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
|
||||
{
|
||||
kobject_put(&device->kobjs[i]->kobj);
|
||||
wait_for_completion(&device->kobjs[i]->kobj_unregister);
|
||||
|
|
|
@ -849,7 +849,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
|
|||
|
||||
/* Must clear TC interrupt before calling
|
||||
* dma_tc_handle
|
||||
* in case tc_handle initate a new dma job
|
||||
* in case tc_handle initiate a new dma job
|
||||
*/
|
||||
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
|
||||
|
||||
|
@ -894,7 +894,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
|
|||
}
|
||||
/* Must clear TC interrupt before calling
|
||||
* dma_tc_handle
|
||||
* in case tc_handle initate a new dma job
|
||||
* in case tc_handle initiate a new dma job
|
||||
*/
|
||||
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
|
||||
|
||||
|
|
|
@ -750,7 +750,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Find the first not transferred desciptor */
|
||||
/* Find the first not transferred descriptor */
|
||||
list_for_each_entry(desc, &sh_chan->ld_queue, node)
|
||||
if (desc->mark == DESC_SUBMITTED) {
|
||||
dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
|
||||
|
|
|
@ -629,7 +629,7 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
desc_node)
|
||||
list_move(&td_desc->desc_node, &td_chan->free_list);
|
||||
|
||||
/* now tear down the runnning */
|
||||
/* now tear down the running */
|
||||
__td_finish(td_chan);
|
||||
spin_unlock_bh(&td_chan->lock);
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci;
|
|||
#define AMBPRESENT_0 0x64
|
||||
#define AMBPRESENT_1 0x66
|
||||
|
||||
const static u16 mtr_regs[MAX_SLOTS] = {
|
||||
static const u16 mtr_regs[MAX_SLOTS] = {
|
||||
0x80, 0x84, 0x88, 0x8c,
|
||||
0x82, 0x86, 0x8a, 0x8e
|
||||
};
|
||||
|
|
|
@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled
|
|||
* 3:2 Rank 1 architecture
|
||||
* 1:0 Rank 0 architecture
|
||||
*
|
||||
* 00 => x16 devices; i.e 4 banks
|
||||
* 01 => x8 devices; i.e 8 banks
|
||||
* 00 => 4 banks
|
||||
* 01 => 8 banks
|
||||
*/
|
||||
#define I82975X_C0BNKARC 0x10e
|
||||
#define I82975X_C1BNKARC 0x18e
|
||||
|
@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
|
|||
struct i82975x_error_info *info, int handle_errors)
|
||||
{
|
||||
int row, multi_chan, chan;
|
||||
unsigned long offst, page;
|
||||
|
||||
multi_chan = mci->csrows[0].nr_channels - 1;
|
||||
|
||||
|
@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
|
|||
info->errsts = info->errsts2;
|
||||
}
|
||||
|
||||
chan = info->eap & 1;
|
||||
info->eap >>= 1;
|
||||
if (info->xeap )
|
||||
info->eap |= 0x80000000;
|
||||
info->eap >>= PAGE_SHIFT;
|
||||
row = edac_mc_find_csrow_by_page(mci, info->eap);
|
||||
page = (unsigned long) info->eap;
|
||||
if (info->xeap & 1)
|
||||
page |= 0x100000000ul;
|
||||
chan = page & 1;
|
||||
page >>= 1;
|
||||
offst = page & ((1 << PAGE_SHIFT) - 1);
|
||||
page >>= PAGE_SHIFT;
|
||||
row = edac_mc_find_csrow_by_page(mci, page);
|
||||
|
||||
if (info->errsts & 0x0002)
|
||||
edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
|
||||
edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
|
||||
else
|
||||
edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
|
||||
edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
|
||||
multi_chan ? chan : 0,
|
||||
"i82975x CE");
|
||||
|
||||
|
@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window)
|
|||
static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
|
||||
{
|
||||
/*
|
||||
* ASUS P5W DH either does not program this register or programs
|
||||
* it wrong!
|
||||
* ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
|
||||
* for each rank should be 01b - the LSB of the word should be 0x55;
|
||||
* but it reads 0!
|
||||
* ECC is possible on i92975x ONLY with DEV_X8
|
||||
*/
|
||||
return DEV_X8;
|
||||
}
|
||||
|
@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
|
|||
static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
||||
struct pci_dev *pdev, void __iomem *mch_window)
|
||||
{
|
||||
static const char *labels[4] = {
|
||||
"DIMM A1", "DIMM A2",
|
||||
"DIMM B1", "DIMM B2"
|
||||
};
|
||||
struct csrow_info *csrow;
|
||||
unsigned long last_cumul_size;
|
||||
u8 value;
|
||||
u32 cumul_size;
|
||||
int index;
|
||||
int index, chan;
|
||||
|
||||
last_cumul_size = 0;
|
||||
|
||||
|
@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
|||
* The dram row boundary (DRB) reg values are boundary address
|
||||
* for each DRAM row with a granularity of 32 or 64MB (single/dual
|
||||
* channel operation). DRB regs are cumulative; therefore DRB7 will
|
||||
* contain the total memory contained in all eight rows.
|
||||
*
|
||||
* FIXME:
|
||||
* EDAC currently works for Dual-channel Interleaved configuration.
|
||||
* Other configurations, which the chip supports, need fixing/testing.
|
||||
* contain the total memory contained in all rows.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
|||
((index >= 4) ? 0x80 : 0));
|
||||
cumul_size = value;
|
||||
cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
|
||||
/*
|
||||
* Adjust cumul_size w.r.t number of channels
|
||||
*
|
||||
*/
|
||||
if (csrow->nr_channels > 1)
|
||||
cumul_size <<= 1;
|
||||
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
|
||||
cumul_size);
|
||||
|
||||
/*
|
||||
* Initialise dram labels
|
||||
* index values:
|
||||
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
|
||||
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
|
||||
*/
|
||||
for (chan = 0; chan < csrow->nr_channels; chan++)
|
||||
strncpy(csrow->channels[chan].label,
|
||||
labels[(index >> 1) + (chan * 2)],
|
||||
EDAC_MC_LABEL_LEN);
|
||||
|
||||
if (cumul_size == last_cumul_size)
|
||||
continue; /* not populated */
|
||||
|
||||
|
@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
|
|||
csrow->last_page = cumul_size - 1;
|
||||
csrow->nr_pages = cumul_size - last_cumul_size;
|
||||
last_cumul_size = cumul_size;
|
||||
csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */
|
||||
csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
|
||||
csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
|
||||
csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
|
||||
csrow->dtype = i82975x_dram_type(mch_window, index);
|
||||
csrow->edac_mode = EDAC_SECDED; /* only supported */
|
||||
}
|
||||
|
@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
|
|||
|
||||
debugf3("%s(): init mci\n", __func__);
|
||||
mci->dev = &pdev->dev;
|
||||
mci->mtype_cap = MEM_FLAG_DDR;
|
||||
mci->mtype_cap = MEM_FLAG_DDR2;
|
||||
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
|
||||
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
|
||||
mci->mod_name = EDAC_MOD_STR;
|
||||
mci->mod_ver = I82975X_REVISION;
|
||||
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
|
||||
mci->dev_name = pci_name(pdev);
|
||||
mci->edac_check = i82975x_check;
|
||||
mci->ctl_page_to_phys = NULL;
|
||||
debugf3("%s(): init pvt\n", __func__);
|
||||
pvt = (struct i82975x_pvt *) mci->pvt_info;
|
||||
pvt->mch_window = mch_window;
|
||||
i82975x_init_csrows(mci, pdev, mch_window);
|
||||
mci->scrub_mode = SCRUB_HW_SRC;
|
||||
i82975x_get_error_info(mci, &discard); /* clear counters */
|
||||
|
||||
/* finalize this instance of memory controller with edac core */
|
||||
|
@ -664,7 +683,7 @@ module_init(i82975x_init);
|
|||
module_exit(i82975x_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
|
||||
MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>");
|
||||
MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
|
||||
|
||||
module_param(edac_op_state, int, 0444);
|
||||
|
|
|
@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
|
|||
}
|
||||
|
||||
/* generate SMI */
|
||||
/* inb to force posted write through and make SMI happen now */
|
||||
asm volatile (
|
||||
"outb %b0,%w1"
|
||||
"outb %b0,%w1\n"
|
||||
"inb %w1"
|
||||
: /* no output args */
|
||||
: "a" (smi_cmd->command_code),
|
||||
"d" (smi_cmd->command_address),
|
||||
|
|
|
@ -59,9 +59,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
sman->mm = (struct drm_sman_mm *) kcalloc(num_managers,
|
||||
sizeof(*sman->mm),
|
||||
GFP_KERNEL);
|
||||
sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
|
||||
if (!sman->mm) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -2987,7 +2987,7 @@ int evergreen_resume(struct radeon_device *rdev)
|
|||
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -3617,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev)
|
|||
if (i < rdev->usec_timeout) {
|
||||
DRM_INFO("ib test succeeded in %u usecs\n", i);
|
||||
} else {
|
||||
DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
|
||||
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
||||
scratch, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
@ -3637,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev)
|
|||
|
||||
r = radeon_ib_pool_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
|
||||
r100_ib_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_test(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled testing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
|
||||
r100_ib_fini(rdev);
|
||||
return r;
|
||||
}
|
||||
|
@ -3799,12 +3799,12 @@ static int r100_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -1401,12 +1401,12 @@ static int r300_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -260,13 +260,13 @@ static int r420_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r420_cp_errata_init(rdev);
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -193,12 +193,12 @@ static int r520_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -2464,7 +2464,7 @@ int r600_resume(struct radeon_device *rdev)
|
|||
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||
/* 64 dwords should be enough for fence too */
|
||||
r = radeon_ring_lock(rdev, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
|
||||
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_ib_execute(rdev, ib);
|
||||
|
|
|
@ -412,12 +412,12 @@ static int rs400_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -865,12 +865,12 @@ static int rs600_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -627,12 +627,12 @@ static int rs690_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -398,12 +398,12 @@ static int rv515_startup(struct radeon_device *rdev)
|
|||
/* 1M ring buffer */
|
||||
r = r100_cp_init(rdev, 1024 * 1024);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
r = r100_ib_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
|
||||
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -1209,7 +1209,7 @@ int rv770_resume(struct radeon_device *rdev)
|
|||
|
||||
r = r600_ib_test(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@ recv_Bchannel(struct bchannel *bch, unsigned int id)
|
|||
hh->id = id;
|
||||
if (bch->rcount >= 64) {
|
||||
printk(KERN_WARNING "B-channel %p receive queue overflow, "
|
||||
"fushing!\n", bch);
|
||||
"flushing!\n", bch);
|
||||
skb_queue_purge(&bch->rqueue);
|
||||
bch->rcount = 0;
|
||||
return;
|
||||
|
@ -231,7 +231,7 @@ recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
|
|||
{
|
||||
if (bch->rcount >= 64) {
|
||||
printk(KERN_WARNING "B-channel %p receive queue overflow, "
|
||||
"fushing!\n", bch);
|
||||
"flushing!\n", bch);
|
||||
skb_queue_purge(&bch->rqueue);
|
||||
bch->rcount = 0;
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ confirm_Bsend(struct bchannel *bch)
|
|||
|
||||
if (bch->rcount >= 64) {
|
||||
printk(KERN_WARNING "B-channel %p receive queue overflow, "
|
||||
"fushing!\n", bch);
|
||||
"flushing!\n", bch);
|
||||
skb_queue_purge(&bch->rqueue);
|
||||
bch->rcount = 0;
|
||||
}
|
||||
|
|
|
@ -1044,8 +1044,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
|
|||
|
||||
static int cfg_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct i2o_cfg_info *tmp =
|
||||
(struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
|
||||
struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
|
||||
GFP_KERNEL);
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -722,9 +722,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
|
|||
/*
|
||||
* MXC NANDFC can only perform full page+spare or
|
||||
* spare-only read/write. When the upper layers
|
||||
* layers perform a read/write buf operation,
|
||||
* we will used the saved column address to index into
|
||||
* the full page.
|
||||
* perform a read/write buf operation, the saved column
|
||||
* address is used to index into the full page.
|
||||
*/
|
||||
host->send_addr(host, 0, page_addr == -1);
|
||||
if (mtd->writesize > 512)
|
||||
|
|
|
@ -265,7 +265,7 @@ struct atl1c_recv_ret_status {
|
|||
__le32 word3;
|
||||
};
|
||||
|
||||
/* RFD desciptor */
|
||||
/* RFD descriptor */
|
||||
struct atl1c_rx_free_desc {
|
||||
__le64 buffer_addr;
|
||||
};
|
||||
|
@ -531,7 +531,7 @@ struct atl1c_rfd_ring {
|
|||
struct atl1c_buffer *buffer_info;
|
||||
};
|
||||
|
||||
/* receive return desciptor (rrd) ring */
|
||||
/* receive return descriptor (rrd) ring */
|
||||
struct atl1c_rrd_ring {
|
||||
void *desc; /* descriptor ring virtual address */
|
||||
dma_addr_t dma; /* descriptor ring physical address */
|
||||
|
|
|
@ -2460,7 +2460,7 @@ map_error:
|
|||
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
|
||||
* in the IOCB plus a chain of outbound address lists (OAL) that
|
||||
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
|
||||
* will used to point to an OAL when more ALP entries are required.
|
||||
* will be used to point to an OAL when more ALP entries are required.
|
||||
* The IOCB is always the top of the chain followed by one or more
|
||||
* OALs (when necessary).
|
||||
*/
|
||||
|
|
|
@ -843,7 +843,7 @@ struct gem_txd {
|
|||
|
||||
/* GEM requires that RX descriptors are provided four at a time,
|
||||
* aligned. Also, the RX ring may not wrap around. This means that
|
||||
* there will be at least 4 unused desciptor entries in the middle
|
||||
* there will be at least 4 unused descriptor entries in the middle
|
||||
* of the RX ring at all times.
|
||||
*
|
||||
* Similar to HME, GEM assumes that it can write garbage bytes before
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/input/sparse-keymap.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <acpi/acpi_drivers.h>
|
||||
|
||||
|
|
|
@ -672,7 +672,7 @@ struct scb_data {
|
|||
/************************ Target Mode Definitions *****************************/
|
||||
|
||||
/*
|
||||
* Connection desciptor for select-in requests in target mode.
|
||||
* Connection descriptor for select-in requests in target mode.
|
||||
*/
|
||||
struct target_cmd {
|
||||
uint8_t scsiid; /* Our ID and the initiator's ID */
|
||||
|
|
|
@ -618,7 +618,7 @@ struct scb_data {
|
|||
/************************ Target Mode Definitions *****************************/
|
||||
|
||||
/*
|
||||
* Connection desciptor for select-in requests in target mode.
|
||||
* Connection descriptor for select-in requests in target mode.
|
||||
*/
|
||||
struct target_cmd {
|
||||
uint8_t scsiid; /* Our ID and the initiator's ID */
|
||||
|
|
|
@ -4780,7 +4780,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
|
|||
SLIST_INIT(&scb_data->sg_maps);
|
||||
|
||||
/* Allocate SCB resources */
|
||||
scb_data->scbarray = (struct scb *)kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
|
||||
scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC);
|
||||
if (scb_data->scbarray == NULL)
|
||||
return (ENOMEM);
|
||||
memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
|
||||
|
|
|
@ -1412,7 +1412,7 @@ megaraid_isr_memmapped(int irq, void *devp)
|
|||
* @nstatus - number of completed commands
|
||||
* @status - status of the last command completed
|
||||
*
|
||||
* Complete the comamnds and call the scsi mid-layer callback hooks.
|
||||
* Complete the commands and call the scsi mid-layer callback hooks.
|
||||
*/
|
||||
static void
|
||||
mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
|
||||
|
@ -4296,7 +4296,7 @@ mega_support_cluster(adapter_t *adapter)
|
|||
* @adapter - pointer to our soft state
|
||||
* @dma_handle - DMA address of the buffer
|
||||
*
|
||||
* Issue internal comamnds while interrupts are available.
|
||||
* Issue internal commands while interrupts are available.
|
||||
* We only issue direct mailbox commands from within the driver. ioctl()
|
||||
* interface using these routines can issue passthru commands.
|
||||
*/
|
||||
|
|
|
@ -890,7 +890,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
|
|||
* @instance: Adapter soft state
|
||||
* @cmd_to_abort: Previously issued cmd to be aborted
|
||||
*
|
||||
* MFI firmware can abort previously issued AEN comamnd (automatic event
|
||||
* MFI firmware can abort previously issued AEN command (automatic event
|
||||
* notification). The megasas_issue_blocked_abort_cmd() issues such abort
|
||||
* cmd and waits for return status.
|
||||
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
|
||||
|
|
|
@ -1484,7 +1484,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst
|
|||
int dbg = debugging;
|
||||
#endif
|
||||
|
||||
if ((buffer = (unsigned char *)vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
|
||||
if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
|
||||
return (-EIO);
|
||||
|
||||
printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
|
||||
|
@ -2296,7 +2296,7 @@ static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRp
|
|||
if (STp->raw) return 0;
|
||||
|
||||
if (STp->header_cache == NULL) {
|
||||
if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
|
||||
if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
|
||||
printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
@ -2484,7 +2484,7 @@ static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request **
|
|||
name, ppos, update_frame_cntr);
|
||||
#endif
|
||||
if (STp->header_cache == NULL) {
|
||||
if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) {
|
||||
if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
|
||||
printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
|
||||
return 0;
|
||||
}
|
||||
|
@ -5851,9 +5851,7 @@ static int osst_probe(struct device *dev)
|
|||
/* if this is the first attach, build the infrastructure */
|
||||
write_lock(&os_scsi_tapes_lock);
|
||||
if (os_scsi_tapes == NULL) {
|
||||
os_scsi_tapes =
|
||||
(struct osst_tape **)kmalloc(osst_max_dev * sizeof(struct osst_tape *),
|
||||
GFP_ATOMIC);
|
||||
os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC);
|
||||
if (os_scsi_tapes == NULL) {
|
||||
write_unlock(&os_scsi_tapes_lock);
|
||||
printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
|
||||
|
|
|
@ -1027,7 +1027,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
|
|||
((ddb_entry->default_time2wait +
|
||||
4) * HZ);
|
||||
|
||||
DEBUG2(printk("scsi%ld: ddb [%d] initate"
|
||||
DEBUG2(printk("scsi%ld: ddb [%d] initiate"
|
||||
" RELOGIN after %d seconds\n",
|
||||
ha->host_no,
|
||||
ddb_entry->fw_ddb_index,
|
||||
|
|
|
@ -812,7 +812,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
|
|||
);
|
||||
start_dpc++;
|
||||
DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
|
||||
"initate relogin after"
|
||||
"initiate relogin after"
|
||||
" %d seconds\n",
|
||||
ha->host_no, ddb_entry->bus,
|
||||
ddb_entry->target,
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_device.h>
|
||||
#include <target/target_core_device.h>
|
||||
#include <target/target_core_tpg.h>
|
||||
#include <target/target_core_transport.h>
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ struct hvcs_struct {
|
|||
/*
|
||||
* Any variable below the kref is valid before a tty is connected and
|
||||
* stays valid after the tty is disconnected. These shouldn't be
|
||||
* whacked until the koject refcount reaches zero though some entries
|
||||
* whacked until the kobject refcount reaches zero though some entries
|
||||
* may be changed via sysfs initiatives.
|
||||
*/
|
||||
struct kref kref; /* ref count & hvcs_struct lifetime */
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
*Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
#include <linux/serial_reg.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/serial_core.h>
|
||||
|
|
|
@ -220,7 +220,7 @@ module_exit(watchdog_exit);
|
|||
MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
|
||||
MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
|
||||
"Note that there is no way to probe for this device -- "
|
||||
"so only use it if you are *sure* you are runnning on this specific "
|
||||
"so only use it if you are *sure* you are running on this specific "
|
||||
"SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
|
||||
|
|
|
@ -2493,7 +2493,7 @@ int close_ctree(struct btrfs_root *root)
|
|||
* ERROR state on disk.
|
||||
*
|
||||
* 2. when btrfs flips readonly just in btrfs_commit_super,
|
||||
* and in such case, btrfs cannnot write sb via btrfs_commit_super,
|
||||
* and in such case, btrfs cannot write sb via btrfs_commit_super,
|
||||
* and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
|
||||
* btrfs will cleanup all FS resources first and write sb then.
|
||||
*/
|
||||
|
|
|
@ -1808,7 +1808,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
|
|||
* false-negative result. d_lookup() protects against concurrent
|
||||
* renames using rename_lock seqlock.
|
||||
*
|
||||
* See Documentation/vfs/dcache-locking.txt for more details.
|
||||
* See Documentation/filesystems/path-lookup.txt for more details.
|
||||
*/
|
||||
hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
|
||||
struct inode *i;
|
||||
|
@ -1928,7 +1928,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
|
|||
* false-negative result. d_lookup() protects against concurrent
|
||||
* renames using rename_lock seqlock.
|
||||
*
|
||||
* See Documentation/vfs/dcache-locking.txt for more details.
|
||||
* See Documentation/filesystems/path-lookup.txt for more details.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
|
|
|
@ -645,11 +645,11 @@ static int dio_send_cur_page(struct dio *dio)
|
|||
/*
|
||||
* See whether this new request is contiguous with the old.
|
||||
*
|
||||
* Btrfs cannot handl having logically non-contiguous requests
|
||||
* submitted. For exmple if you have
|
||||
* Btrfs cannot handle having logically non-contiguous requests
|
||||
* submitted. For example if you have
|
||||
*
|
||||
* Logical: [0-4095][HOLE][8192-12287]
|
||||
* Phyiscal: [0-4095] [4096-8181]
|
||||
* Physical: [0-4095] [4096-8191]
|
||||
*
|
||||
* We cannot submit those pages together as one BIO. So if our
|
||||
* current logical offset in the file does not equal what would
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
* This mutex is acquired by ep_free() during the epoll file
|
||||
* cleanup path and it is also acquired by eventpoll_release_file()
|
||||
* if a file has been pushed inside an epoll set and it is then
|
||||
* close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
|
||||
* close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
|
||||
* It is also acquired when inserting an epoll fd onto another epoll
|
||||
* fd. We do this so that we walk the epoll tree and ensure that this
|
||||
* insertion does not create a cycle of epoll file descriptors, which
|
||||
|
@ -152,11 +152,11 @@ struct epitem {
|
|||
|
||||
/*
|
||||
* This structure is stored inside the "private_data" member of the file
|
||||
* structure and rapresent the main data sructure for the eventpoll
|
||||
* structure and represents the main data structure for the eventpoll
|
||||
* interface.
|
||||
*/
|
||||
struct eventpoll {
|
||||
/* Protect the this structure access */
|
||||
/* Protect the access to this structure */
|
||||
spinlock_t lock;
|
||||
|
||||
/*
|
||||
|
@ -793,7 +793,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
|
|||
|
||||
/*
|
||||
* This is the callback that is passed to the wait queue wakeup
|
||||
* machanism. It is called by the stored file descriptors when they
|
||||
* mechanism. It is called by the stored file descriptors when they
|
||||
* have events to report.
|
||||
*/
|
||||
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
|
@ -824,9 +824,9 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
|
|||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If we are trasfering events to userspace, we can hold no locks
|
||||
* If we are transferring events to userspace, we can hold no locks
|
||||
* (because we're accessing user memory, and because of linux f_op->poll()
|
||||
* semantics). All the events that happens during that period of time are
|
||||
* semantics). All the events that happen during that period of time are
|
||||
* chained in ep->ovflist and requeued later on.
|
||||
*/
|
||||
if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
|
||||
|
|
|
@ -131,7 +131,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
|
|||
* fragmenting the file system's free space. Maybe we
|
||||
* should have some hueristics or some way to allow
|
||||
* userspace to pass a hint to file system,
|
||||
* especiially if the latter case turns out to be
|
||||
* especially if the latter case turns out to be
|
||||
* common.
|
||||
*/
|
||||
ex = path[depth].p_ext;
|
||||
|
@ -2844,7 +2844,7 @@ fix_extent_len:
|
|||
* ext4_get_blocks_dio_write() when DIO to write
|
||||
* to an uninitialized extent.
|
||||
*
|
||||
* Writing to an uninitized extent may result in splitting the uninitialized
|
||||
* Writing to an uninitialized extent may result in splitting the uninitialized
|
||||
* extent into multiple /initialized uninitialized extents (up to three)
|
||||
* There are three possibilities:
|
||||
* a> There is no split required: Entire extent should be uninitialized
|
||||
|
|
|
@ -458,7 +458,7 @@ static void cuse_fc_release(struct fuse_conn *fc)
|
|||
* @file: file struct being opened
|
||||
*
|
||||
* Userland CUSE server can create a CUSE device by opening /dev/cuse
|
||||
* and replying to the initilaization request kernel sends. This
|
||||
* and replying to the initialization request kernel sends. This
|
||||
* function is responsible for handling CUSE device initialization.
|
||||
* Because the fd opened by this function is used during
|
||||
* initialization, this function only creates cuse_conn and sends
|
||||
|
|
|
@ -876,7 +876,7 @@ SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
|
|||
#endif
|
||||
|
||||
/*
|
||||
* fanotify_user_setup - Our initialization function. Note that we cannnot return
|
||||
* fanotify_user_setup - Our initialization function. Note that we cannot return
|
||||
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
|
||||
* must result in panic().
|
||||
*/
|
||||
|
|
|
@ -841,7 +841,7 @@ out:
|
|||
}
|
||||
|
||||
/*
|
||||
* inotify_user_setup - Our initialization function. Note that we cannnot return
|
||||
* inotify_user_setup - Our initialization function. Note that we cannot return
|
||||
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
|
||||
* must result in panic().
|
||||
*/
|
||||
|
|
|
@ -354,7 +354,7 @@ static inline int ocfs2_match(int len,
|
|||
/*
|
||||
* Returns 0 if not found, -1 on failure, and 1 on success
|
||||
*/
|
||||
static int inline ocfs2_search_dirblock(struct buffer_head *bh,
|
||||
static inline int ocfs2_search_dirblock(struct buffer_head *bh,
|
||||
struct inode *dir,
|
||||
const char *name, int namelen,
|
||||
unsigned long offset,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#ifndef __ASM_GENERIC_USER_H
|
||||
#define __ASM_GENERIC_USER_H
|
||||
/*
|
||||
* This file may define a 'struct user' structure. However, it it only
|
||||
* used for a.out file, which are not supported on new architectures.
|
||||
* This file may define a 'struct user' structure. However, it is only
|
||||
* used for a.out files, which are not supported on new architectures.
|
||||
*/
|
||||
|
||||
#endif /* __ASM_GENERIC_USER_H */
|
||||
|
|
|
@ -472,7 +472,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
|
|||
#ifdef CONFIG_NUMA
|
||||
|
||||
/*
|
||||
* The NUMA zonelists are doubled becausse we need zonelists that restrict the
|
||||
* The NUMA zonelists are doubled because we need zonelists that restrict the
|
||||
* allocations to a single node for GFP_THISNODE.
|
||||
*
|
||||
* [0] : Zonelist with fallback
|
||||
|
|
|
@ -745,9 +745,9 @@ config BLK_CGROUP
|
|||
|
||||
This option only enables generic Block IO controller infrastructure.
|
||||
One needs to also enable actual IO controlling logic/policy. For
|
||||
enabling proportional weight division of disk bandwidth in CFQ seti
|
||||
CONFIG_CFQ_GROUP_IOSCHED=y and for enabling throttling policy set
|
||||
CONFIG_BLK_THROTTLE=y.
|
||||
enabling proportional weight division of disk bandwidth in CFQ, set
|
||||
CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set
|
||||
CONFIG_BLK_DEV_THROTTLING=y.
|
||||
|
||||
See Documentation/cgroups/blkio-controller.txt for more information.
|
||||
|
||||
|
|
|
@ -668,7 +668,7 @@ static struct list_head *rb_list_head(struct list_head *list)
|
|||
* the reader page). But if the next page is a header page,
|
||||
* its flags will be non zero.
|
||||
*/
|
||||
static int inline
|
||||
static inline int
|
||||
rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
struct buffer_page *page, struct list_head *list)
|
||||
{
|
||||
|
|
|
@ -2172,10 +2172,10 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
|
|||
* handle_pte_fault chooses page fault handler according to an entry
|
||||
* which was read non-atomically. Before making any commitment, on
|
||||
* those architectures or configurations (e.g. i386 with PAE) which
|
||||
* might give a mix of unmatched parts, do_swap_page and do_file_page
|
||||
* might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
|
||||
* must check under lock before unmapping the pte and proceeding
|
||||
* (but do_wp_page is only called after already making such a check;
|
||||
* and do_anonymous_page and do_no_page can safely check later on).
|
||||
* and do_anonymous_page can safely check later on).
|
||||
*/
|
||||
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
|
||||
pte_t *page_table, pte_t orig_pte)
|
||||
|
@ -2371,7 +2371,7 @@ reuse:
|
|||
* bit after it clear all dirty ptes, but before a racing
|
||||
* do_wp_page installs a dirty pte.
|
||||
*
|
||||
* do_no_page is protected similarly.
|
||||
* __do_fault is protected similarly.
|
||||
*/
|
||||
if (!page_mkwrite) {
|
||||
wait_on_page_locked(dirty_page);
|
||||
|
|
|
@ -993,7 +993,7 @@ int do_migrate_pages(struct mm_struct *mm,
|
|||
* most recent <s, d> pair that moved (s != d). If we find a pair
|
||||
* that not only moved, but what's better, moved to an empty slot
|
||||
* (d is not set in tmp), then we break out then, with that pair.
|
||||
* Otherwise when we finish scannng from_tmp, we at least have the
|
||||
* Otherwise when we finish scanning from_tmp, we at least have the
|
||||
* most recent <s, d> pair that moved. If we get all the way through
|
||||
* the scan of tmp without finding any node that moved, much less
|
||||
* moved to an empty node, then there is nothing left worth migrating.
|
||||
|
|
|
@ -779,7 +779,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
|
|||
* If truncating down to a partial page, then
|
||||
* if that page is already allocated, hold it
|
||||
* in memory until the truncation is over, so
|
||||
* truncate_partial_page cannnot miss it were
|
||||
* truncate_partial_page cannot miss it were
|
||||
* it assigned to swap.
|
||||
*/
|
||||
if (newsize & (PAGE_CACHE_SIZE-1)) {
|
||||
|
|
|
@ -357,8 +357,8 @@ EXPORT_SYMBOL(dev_addr_add_multiple);
|
|||
/**
|
||||
* dev_addr_del_multiple - Delete device addresses by another device
|
||||
* @to_dev: device where the addresses will be deleted
|
||||
* @from_dev: device by which addresses the addresses will be deleted
|
||||
* @addr_type: address type - 0 means type will used from from_dev
|
||||
* @from_dev: device supplying the addresses to be deleted
|
||||
* @addr_type: address type - 0 means type will be used from from_dev
|
||||
*
|
||||
* Deletes addresses in to device by the list of addresses in from device.
|
||||
*
|
||||
|
|
|
@ -124,7 +124,7 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(__inet6_lookup_established);
|
||||
|
||||
static int inline compute_score(struct sock *sk, struct net *net,
|
||||
static inline int compute_score(struct sock *sk, struct net *net,
|
||||
const unsigned short hnum,
|
||||
const struct in6_addr *daddr,
|
||||
const int dif)
|
||||
|
|
|
@ -169,7 +169,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
|
|||
return cpu_to_le16(dur);
|
||||
}
|
||||
|
||||
static int inline is_ieee80211_device(struct ieee80211_local *local,
|
||||
static inline int is_ieee80211_device(struct ieee80211_local *local,
|
||||
struct net_device *dev)
|
||||
{
|
||||
return local == wdev_priv(dev->ieee80211_ptr);
|
||||
|
|
|
@ -211,7 +211,7 @@ static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma);
|
|||
//static void vortex_adbdma_stopfifo(vortex_t *vortex, int adbdma);
|
||||
static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma);
|
||||
static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma);
|
||||
static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma);
|
||||
static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma);
|
||||
static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma);
|
||||
|
||||
#ifndef CHIP_AU8810
|
||||
|
@ -219,7 +219,7 @@ static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma);
|
|||
static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma);
|
||||
static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma);
|
||||
static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma);
|
||||
static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma);
|
||||
static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma);
|
||||
#endif
|
||||
|
||||
/* global stuff. */
|
||||
|
|
|
@ -1249,7 +1249,7 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
|
|||
}
|
||||
}
|
||||
|
||||
static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
|
||||
static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
|
||||
{
|
||||
stream_t *dma = &vortex->dma_adb[adbdma];
|
||||
int temp, page, delta;
|
||||
|
@ -1506,7 +1506,7 @@ static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma)
|
|||
POS_SHIFT) & POS_MASK);
|
||||
}
|
||||
#endif
|
||||
static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
|
||||
static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
|
||||
{
|
||||
stream_t *dma = &vortex->dma_wt[wtdma];
|
||||
int temp;
|
||||
|
|
Загрузка…
Ссылка в новой задаче