Merge branch 'sched/core' into cpus4096

Conflicts:
	include/linux/ftrace.h
	kernel/sched.c
This commit is contained in:
Ingo Molnar 2008-12-12 13:48:57 +01:00
Родитель 81444a7995 d65bd5ecb2
Коммит 45ab6b0c76
166 изменённых файлов: 3020 добавлений и 1777 удалений

Просмотреть файл

@ -149,7 +149,7 @@ static void do_test_timer(unsigned long data)
int cpu;
/* Increment the counters */
on_each_cpu(test_each, NULL, 0, 1);
on_each_cpu(test_each, NULL, 1);
/* Read all the counters */
printk("Counters read from CPU %d\n", smp_processor_id());
for_each_online_cpu(cpu) {

Просмотреть файл

@ -3759,6 +3759,15 @@ M: drzeus-sdhci@drzeus.cx
L: sdhci-devel@list.drzeus.cx
S: Maintained
SECURITY SUBSYSTEM
F: security/
P: James Morris
M: jmorris@namei.org
L: linux-kernel@vger.kernel.org
L: linux-security-module@vger.kernel.org (suggested Cc:)
T: git kernel.org:pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
S: Supported
SECURITY CONTACT
P: Security Officers
M: security@kernel.org

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 28
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Erotic Pickled Herring
# *DOCUMENTATION*

Просмотреть файл

@ -237,6 +237,7 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffs.h>
@ -277,16 +278,19 @@ static inline int constant_fls(int x)
* the clz instruction for much better code efficiency.
*/
#define __fls(x) \
( __builtin_constant_p(x) ? constant_fls(x) : \
({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
/* Implement fls() in C so that 64-bit args are suitably truncated */
static inline int fls(int x)
{
return __fls(x);
int ret;
if (__builtin_constant_p(x))
return constant_fls(x);
asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
ret = 32 - ret;
return ret;
}
#define __fls(x) (fls(x) - 1)
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
#define __ffs(x) (ffs(x) - 1)
#define ffz(x) __ffs( ~(x) )

Просмотреть файл

@ -23,7 +23,7 @@
#include <asm/types.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
TASK_SIZE : TASK_SIZE_26)
#define STACK_TOP_MAX TASK_SIZE
#endif

Просмотреть файл

@ -128,7 +128,7 @@ void __init omap1_map_common_io(void)
* Common low-level hardware init for omap1. This should only get called from
* board specific init.
*/
void __init omap1_init_common_hw()
void __init omap1_init_common_hw(void)
{
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge".

Просмотреть файл

@ -70,6 +70,10 @@ static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
#ifdef CONFIG_PROC_FS
static const char *usermode_action[] = {
"ignored",
@ -754,7 +758,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
user:
ai_user += 1;
if (ai_usermode & 1)
if (ai_usermode & UM_WARN)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
@ -762,10 +766,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
thumb_mode(regs) ? tinstr : instr,
addr, fsr);
if (ai_usermode & 2)
if (ai_usermode & UM_FIXUP)
goto fixup;
if (ai_usermode & 4)
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else
set_cr(cr_no_alignment);
@ -796,6 +800,22 @@ static int __init alignment_init(void)
res->write_proc = proc_alignment_write;
#endif
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = UM_FIXUP;
}
hook_fault_code(1, do_alignment, SIGILL, "alignment exception");
hook_fault_code(3, do_alignment, SIGILL, "alignment exception");

Просмотреть файл

@ -353,8 +353,8 @@ struct omapfb_device {
u32 pseudo_palette[17];
struct lcd_panel *panel; /* LCD panel */
struct lcd_ctrl *ctrl; /* LCD controller */
struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
const struct lcd_ctrl *ctrl; /* LCD controller */
const struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
struct lcd_ctrl_extif *ext_if; /* LCD ctrl external
interface */
struct device *dev;

Просмотреть файл

@ -255,7 +255,7 @@ void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
if (!_omap_sram_reprogram_clock)
omap_sram_error();
return _omap_sram_reprogram_clock(dpllctl, ckctl);
_omap_sram_reprogram_clock(dpllctl, ckctl);
}
int __init omap1_sram_init(void)
@ -282,7 +282,7 @@ void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
if (!_omap2_sram_ddr_init)
omap_sram_error();
return _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
_omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
base_cs, force_unlock);
}
@ -294,7 +294,7 @@ void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type)
if (!_omap2_sram_reprogram_sdrc)
omap_sram_error();
return _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
_omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
}
static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);

Просмотреть файл

@ -35,7 +35,7 @@
#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
#define PCIE_CONF_FUNC(f) (((f) & 0x3) << 8)
#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
#define PCIE_CONF_DATA_OFF 0x18fc
#define PCIE_MASK_OFF 0x1910
#define PCIE_CTRL_OFF 0x1a00

Просмотреть файл

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.27-rc1
# Mon Aug 4 15:38:01 2008
# Linux kernel version: 2.6.28-rc7
# Mon Dec 8 08:12:07 2008
#
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@ -26,6 +26,7 @@ CONFIG_LOG_BUF_SHIFT=20
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
# CONFIG_CGROUP_NS is not set
# CONFIG_CGROUP_FREEZER is not set
# CONFIG_CGROUP_DEVICE is not set
CONFIG_CPUSETS=y
# CONFIG_GROUP_SCHED is not set
@ -46,7 +47,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
CONFIG_SYSCTL_SYSCALL=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@ -63,7 +63,9 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
@ -72,15 +74,11 @@ CONFIG_SLUB=y
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
# CONFIG_HAVE_IOREMAP_PROT is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
# CONFIG_HAVE_ARCH_TRACEHOOK is not set
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_HAVE_CLK is not set
CONFIG_PROC_PAGE_MONITOR=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@ -113,6 +111,7 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
CONFIG_CLASSIC_RCU=y
# CONFIG_FREEZER is not set
#
# Processor type and features
@ -125,8 +124,6 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_IOMMU_HELPER=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
@ -139,13 +136,16 @@ CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_IA64_UNCACHED_ALLOCATOR=y
CONFIG_AUDIT_ARCH=y
# CONFIG_PARAVIRT_GUEST is not set
CONFIG_IA64_GENERIC=y
# CONFIG_IA64_DIG is not set
# CONFIG_IA64_DIG_VTD is not set
# CONFIG_IA64_HP_ZX1 is not set
# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
# CONFIG_IA64_SGI_SN2 is not set
# CONFIG_IA64_SGI_UV is not set
# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_XEN_GUEST is not set
# CONFIG_ITANIUM is not set
CONFIG_MCKINLEY=y
# CONFIG_IA64_PAGE_SIZE_4KB is not set
@ -182,16 +182,17 @@ CONFIG_DISCONTIGMEM_MANUAL=y
CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_RESOURCES_64BIT=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_NR_QUICK=1
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_MMU_NOTIFIER=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
@ -231,12 +232,12 @@ CONFIG_EFI_VARS=y
CONFIG_EFI_PCDP=y
CONFIG_DMIID=y
CONFIG_BINFMT_ELF=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
# CONFIG_DMAR is not set
#
# Power management and ACPI
# Power management and ACPI options
#
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
@ -248,7 +249,6 @@ CONFIG_ACPI_PROC_EVENT=y
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
# CONFIG_ACPI_BAY is not set
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_THERMAL=m
@ -256,9 +256,7 @@ CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_CUSTOM_DSDT is not set
CONFIG_ACPI_BLACKLIST_YEAR=0
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_EC=y
# CONFIG_ACPI_PCI_SLOT is not set
CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
CONFIG_ACPI_CONTAINER=m
@ -275,7 +273,7 @@ CONFIG_PCI_DOMAINS=y
CONFIG_PCI_SYSCALL=y
# CONFIG_PCIEPORTBUS is not set
CONFIG_ARCH_SUPPORTS_MSI=y
# CONFIG_PCI_MSI is not set
CONFIG_PCI_MSI=y
CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
CONFIG_HOTPLUG_PCI=m
@ -286,6 +284,7 @@ CONFIG_HOTPLUG_PCI_ACPI=m
# CONFIG_HOTPLUG_PCI_SHPC is not set
# CONFIG_HOTPLUG_PCI_SGI is not set
# CONFIG_PCCARD is not set
CONFIG_DMAR=y
CONFIG_NET=y
#
@ -333,6 +332,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@ -353,11 +353,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
#
# Wireless
#
# CONFIG_PHONET is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
@ -385,7 +384,7 @@ CONFIG_PROC_EVENTS=y
# CONFIG_MTD is not set
# CONFIG_PARPORT is not set
CONFIG_PNP=y
# CONFIG_PNP_DEBUG is not set
# CONFIG_PNP_DEBUG_MESSAGES is not set
#
# Protocols
@ -419,10 +418,9 @@ CONFIG_SGI_XP=m
# CONFIG_HP_ILO is not set
CONFIG_SGI_GRU=m
# CONFIG_SGI_GRU_DEBUG is not set
# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
CONFIG_IDE_MAX_HWIFS=4
CONFIG_BLK_DEV_IDE=y
#
# Please see Documentation/ide/ide.txt for help/info on IDE drives
@ -430,12 +428,12 @@ CONFIG_BLK_DEV_IDE=y
CONFIG_IDE_TIMINGS=y
CONFIG_IDE_ATAPI=y
# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
CONFIG_IDE_GD=y
CONFIG_IDE_GD_ATA=y
# CONFIG_IDE_GD_ATAPI is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y
CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_BLK_DEV_IDEACPI is not set
# CONFIG_IDE_TASK_IOCTL is not set
@ -705,6 +703,9 @@ CONFIG_TULIP=m
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
@ -725,11 +726,11 @@ CONFIG_E100=m
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
CONFIG_E1000=y
# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
# CONFIG_E1000E is not set
# CONFIG_IP1000 is not set
CONFIG_IGB=y
@ -747,18 +748,22 @@ CONFIG_TIGON3=y
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
# CONFIG_JME is not set
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_NIU is not set
# CONFIG_MLX4_EN is not set
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_TR is not set
@ -826,9 +831,11 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_LIFEBOOK=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@ -942,15 +949,16 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_LIS3LV02D is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
CONFIG_THERMAL=m
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@ -959,6 +967,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
#
# Multimedia devices
@ -1009,6 +1019,7 @@ CONFIG_VGA_CONSOLE=y
# CONFIG_VGACON_SOFT_SCROLLBACK is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@ -1113,8 +1124,7 @@ CONFIG_HID=y
# USB Input Devices
#
CONFIG_USB_HID=m
# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
# CONFIG_HID_PID is not set
# CONFIG_USB_HIDDEV is not set
#
@ -1122,6 +1132,34 @@ CONFIG_USB_HID=m
#
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
#
# Special HID drivers
#
CONFIG_HID_COMPAT=y
CONFIG_HID_A4TECH=m
CONFIG_HID_APPLE=m
CONFIG_HID_BELKIN=m
CONFIG_HID_BRIGHT=m
CONFIG_HID_CHERRY=m
CONFIG_HID_CHICONY=m
CONFIG_HID_CYPRESS=m
CONFIG_HID_DELL=m
CONFIG_HID_EZKEY=m
CONFIG_HID_GYRATION=m
CONFIG_HID_LOGITECH=m
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MONTEREY=m
CONFIG_HID_PANTHERLORD=m
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=m
CONFIG_HID_SAMSUNG=m
CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
# CONFIG_THRUSTMASTER_FF is not set
# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@ -1138,6 +1176,9 @@ CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
CONFIG_USB_MON=y
# CONFIG_USB_WUSB is not set
# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
@ -1155,6 +1196,12 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_WHCI_HCD is not set
# CONFIG_USB_HWA_HCD is not set
#
# Enable Host or Gadget support to see Inventra options
#
#
# USB Device Class drivers
@ -1162,13 +1209,14 @@ CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_WDM is not set
# CONFIG_USB_TMC is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
# may also be needed; see USB_STORAGE Help for more information
# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
@ -1191,7 +1239,6 @@ CONFIG_USB_STORAGE=m
#
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_MICROTEK is not set
CONFIG_USB_MON=y
#
# USB port drivers
@ -1204,7 +1251,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_ADUTUX is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_SEVSEG is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
@ -1222,7 +1269,9 @@ CONFIG_USB_MON=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@ -1246,6 +1295,15 @@ CONFIG_INFINIBAND_IPOIB_DEBUG=y
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
CONFIG_STAGING_EXCLUDE_BUILD=y
#
# HP Simulator drivers
#
# CONFIG_HP_SIMETH is not set
# CONFIG_HP_SIMSERIAL is not set
# CONFIG_HP_SIMSCSI is not set
CONFIG_MSPEC=m
#
@ -1260,7 +1318,7 @@ CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
# CONFIG_EXT4DEV_FS is not set
# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
CONFIG_FS_MBCACHE=y
CONFIG_REISERFS_FS=y
@ -1271,6 +1329,7 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
CONFIG_FILE_LOCKING=y
CONFIG_XFS_FS=y
# CONFIG_XFS_QUOTA is not set
# CONFIG_XFS_POSIX_ACL is not set
@ -1282,8 +1341,8 @@ CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
# CONFIG_FUSE_FS is not set
#
@ -1314,6 +1373,7 @@ CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_VMCORE=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
@ -1356,6 +1416,7 @@ CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_SUNRPC_XPRT_RDMA=m
# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@ -1433,38 +1494,6 @@ CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
CONFIG_HAVE_KVM=y
CONFIG_VIRTUALIZATION=y
# CONFIG_KVM is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
# CONFIG_GENERIC_FIND_FIRST_BIT is not set
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_GENERIC_ALLOCATOR=y
CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_IRQ_PER_CPU=y
#
# HP Simulator drivers
#
# CONFIG_HP_SIMETH is not set
# CONFIG_HP_SIMSERIAL is not set
# CONFIG_HP_SIMSCSI is not set
#
# Kernel hacking
@ -1503,8 +1532,19 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_SG is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
#
# Tracers
#
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_IA64_GRANULE_16MB=y
# CONFIG_IA64_GRANULE_64MB is not set
@ -1519,14 +1559,19 @@ CONFIG_SYSVIPC_COMPAT=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_BLKCIPHER=m
CONFIG_CRYPTO_HASH=m
CONFIG_CRYPTO_RNG=m
CONFIG_CRYPTO_MANAGER=m
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
@ -1599,5 +1644,36 @@ CONFIG_CRYPTO_DES=m
#
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_LZO is not set
#
# Random Number Generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
CONFIG_HAVE_KVM=y
CONFIG_VIRTUALIZATION=y
# CONFIG_KVM is not set
# CONFIG_VIRTIO_PCI is not set
# CONFIG_VIRTIO_BALLOON is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_GENERIC_ALLOCATOR=y
CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_IRQ_PER_CPU=y

Просмотреть файл

@ -83,7 +83,6 @@ extern unsigned long ia64_native_getreg_func(int regnum);
#define paravirt_getreg(reg) \
({ \
unsigned long res; \
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
if ((reg) == _IA64_REG_IP) \
res = ia64_native_getreg(_IA64_REG_IP); \
else \

Просмотреть файл

@ -53,10 +53,12 @@ int __ref arch_register_cpu(int num)
}
EXPORT_SYMBOL(arch_register_cpu);
void arch_unregister_cpu(int num)
void __ref arch_unregister_cpu(int num)
{
unregister_cpu(&sysfs_cpus[num].cpu);
#ifdef CONFIG_ACPI
unmap_cpu_from_node(num, cpu_to_node(num));
#endif
}
EXPORT_SYMBOL(arch_unregister_cpu);
#else

Просмотреть файл

@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
@ -375,6 +375,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
int cpu = nasid_slice_to_cpuid(nasid, slice);
#ifdef CONFIG_SMP
int cpuphys;
irq_desc_t *desc;
#endif
pci_dev_get(pci_dev);
@ -391,6 +392,12 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpu);
set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
desc = irq_to_desc(sn_irq_info->irq_irq);
/*
* Affinity was set by the PROM, prevent it from
* being reset by the request_irq() path.
*/
desc->status |= IRQ_AFFINITY_SET;
#endif
}

Просмотреть файл

@ -200,7 +200,7 @@ static int __cpuinitdata shub_1_1_found;
* Set flag for enabling shub specific wars
*/
static inline int __init is_shub_1_1(int nasid)
static inline int __cpuinit is_shub_1_1(int nasid)
{
unsigned long id;
int rev;
@ -212,7 +212,7 @@ static inline int __init is_shub_1_1(int nasid)
return rev <= 2;
}
static void __init sn_check_for_wars(void)
static void __cpuinit sn_check_for_wars(void)
{
int cnode;
@ -512,7 +512,6 @@ static void __init sn_init_pdas(char **cmdline_p)
for_each_online_node(cnode) {
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@ -521,11 +520,9 @@ static void __init sn_init_pdas(char **cmdline_p)
/*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
}
/*
* Now copy the array of nodepda pointers to each nodepda.

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -79,6 +79,11 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
/* We don't do dynamic PCI IRQ allocation */
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.

Просмотреть файл

@ -262,14 +262,11 @@ bad_alignment:
LEAF(sys_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
sll t1, t0, 3
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
li v1, 4000 - __NR_O32_Linux # index of sys_syscall
beq t0, v1, einval # do not recurse
/* Some syscalls like execve get their arguments from struct pt_regs
and claim zero arguments in the syscall table. Thus we have to
assume the worst case and shuffle around all potential arguments.
@ -627,7 +624,7 @@ einval: li v0, -ENOSYS
sys sys_pselect6 6
sys sys_ppoll 5
sys sys_unshare 1
sys sys_splice 4
sys sys_splice 6
sys sys_sync_file_range 7 /* 4305 */
sys sys_tee 4
sys sys_vmsplice 4

Просмотреть файл

@ -390,7 +390,7 @@ EXPORT(sysn32_call_table)
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee
PTR sys_vmsplice /* 6270 */
PTR compat_sys_vmsplice /* 6270 */
PTR sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list

Просмотреть файл

@ -174,14 +174,12 @@ not_o32_scall:
END(handle_sys)
LEAF(sys32_syscall)
sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
dsll t1, t0, 3
beqz v0, einval
dsll v0, a0, 3
ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0)
li v1, 4000 # indirect syscall number
beq a0, v1, einval # do not recurse
ld t2, sys_call_table(t1) # syscall routine
move a0, a1 # shift argument registers
move a1, a2
@ -198,7 +196,7 @@ LEAF(sys32_syscall)
jr t2
/* Unreached */
einval: li v0, -EINVAL
einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
@ -512,7 +510,7 @@ sys_call_table:
PTR sys_splice
PTR sys32_sync_file_range /* 4305 */
PTR sys_tee
PTR sys_vmsplice
PTR compat_sys_vmsplice
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list /* 4310 */

Просмотреть файл

@ -1134,7 +1134,7 @@ static int vpe_release(struct inode *inode, struct file *filp)
/* It's good to be able to run the SP and if it chokes have a look at
the /dev/rt?. But if we reset the pointer to the shared struct we
loose what has happened. So perhaps if garbage is sent to the vpe
lose what has happened. So perhaps if garbage is sent to the vpe
device, use it as a trigger for the reset. Hopefully a nice
executable will be along shortly. */
if (ret < 0)

Просмотреть файл

@ -7,9 +7,8 @@
#
obj-y := malta-amon.o malta-cmdline.o \
malta-display.o malta-init.o malta-int.o \
malta-memory.o malta-mtd.o \
malta-platform.o malta-reset.o \
malta-setup.o malta-time.o
malta-memory.o malta-platform.o \
malta-reset.o malta-setup.o malta-time.o
obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
obj-$(CONFIG_PCI) += malta-pci.o

Просмотреть файл

@ -1,63 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <mtd/mtd-abi.h>
static struct mtd_partition malta_mtd_partitions[] = {
{
.name = "YAMON",
.offset = 0x0,
.size = 0x100000,
.mask_flags = MTD_WRITEABLE
}, {
.name = "User FS",
.offset = 0x100000,
.size = 0x2e0000
}, {
.name = "Board Config",
.offset = 0x3e0000,
.size = 0x020000,
.mask_flags = MTD_WRITEABLE
}
};
static struct physmap_flash_data malta_flash_data = {
.width = 4,
.nr_parts = ARRAY_SIZE(malta_mtd_partitions),
.parts = malta_mtd_partitions
};
static struct resource malta_flash_resource = {
.start = 0x1e000000,
.end = 0x1e3fffff,
.flags = IORESOURCE_MEM
};
static struct platform_device malta_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &malta_flash_data,
},
.num_resources = 1,
.resource = &malta_flash_resource,
};
static int __init malta_mtd_init(void)
{
platform_device_register(&malta_flash);
return 0;
}
module_init(malta_mtd_init)

Просмотреть файл

@ -3,10 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2006, 07 MIPS Technologies, Inc.
* written by Ralf Baechle (ralf@linux-mips.org)
* written by Ralf Baechle <ralf@linux-mips.org>
*
* Probe driver for the Malta's UART ports:
* Copyright (C) 2008 Wind River Systems, Inc.
* updated by Tiejun Chen <tiejun.chen@windriver.com>
*
* 1. Probe driver for the Malta's UART ports:
*
* o 2 ports in the SMC SuperIO
* o 1 port in the CBUS UART, a discrete 16550 which normally is only used
@ -14,10 +18,17 @@
*
* We don't use 8250_platform.c on Malta as it would result in the CBUS
* UART becoming ttyS0.
*
* 2. Register RTC-CMOS platform device on Malta.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
{ \
@ -45,21 +56,93 @@ static struct plat_serial8250_port uart8250_data[] = {
{ },
};
static struct platform_device uart8250_device = {
static struct platform_device malta_uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM2,
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = uart8250_data,
},
};
static int __init uart8250_init(void)
struct resource malta_rtc_resources[] = {
{
return platform_device_register(&uart8250_device);
.start = RTC_PORT(0),
.end = RTC_PORT(7),
.flags = IORESOURCE_IO,
}, {
.start = RTC_IRQ,
.end = RTC_IRQ,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device malta_rtc_device = {
.name = "rtc_cmos",
.id = -1,
.resource = malta_rtc_resources,
.num_resources = ARRAY_SIZE(malta_rtc_resources),
};
static struct mtd_partition malta_mtd_partitions[] = {
{
.name = "YAMON",
.offset = 0x0,
.size = 0x100000,
.mask_flags = MTD_WRITEABLE
}, {
.name = "User FS",
.offset = 0x100000,
.size = 0x2e0000
}, {
.name = "Board Config",
.offset = 0x3e0000,
.size = 0x020000,
.mask_flags = MTD_WRITEABLE
}
};
static struct physmap_flash_data malta_flash_data = {
.width = 4,
.nr_parts = ARRAY_SIZE(malta_mtd_partitions),
.parts = malta_mtd_partitions
};
static struct resource malta_flash_resource = {
.start = 0x1e000000,
.end = 0x1e3fffff,
.flags = IORESOURCE_MEM
};
static struct platform_device malta_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &malta_flash_data,
},
.num_resources = 1,
.resource = &malta_flash_resource,
};
static struct platform_device *malta_devices[] __initdata = {
&malta_uart8250_device,
&malta_rtc_device,
&malta_flash_device,
};
static int __init malta_add_devices(void)
{
int err;
err = platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
if (err)
return err;
/*
* Set RTC to BCD mode to support current alarm code.
*/
CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL);
return 0;
}
module_init(uart8250_init);
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("8250 UART probe driver for the Malta CBUS UART");
device_initcall(malta_add_devices);

Просмотреть файл

@ -354,6 +354,30 @@ EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
#endif
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
char * (*pcibios_plat_setup)(char *str) __devinitdata;
char *__devinit pcibios_setup(char *str)

Просмотреть файл

@ -180,6 +180,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
__cli
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
@ -190,7 +191,7 @@ need_resched:
mov (REG_EPSW,fp),d0
and EPSW_IM,d0
cmp EPSW_IM_7,d0 # interrupts off (exception path) ?
beq restore_all
bne restore_all
call preempt_schedule_irq[],0
jmp need_resched
#endif

Просмотреть файл

@ -99,6 +99,7 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
try_again:
/* pull chars out of the buffer */
ix = gdbstub_rx_outp;
barrier();
if (ix == gdbstub_rx_inp) {
if (nonblock)
return -EAGAIN;
@ -110,6 +111,7 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
ch = gdbstub_rx_buffer[ix++];
st = gdbstub_rx_buffer[ix++];
barrier();
gdbstub_rx_outp = ix & 0x00000fff;
if (st & UART_LSR_BI) {

Просмотреть файл

@ -566,6 +566,11 @@ static void mn10300_serial_transmit_interrupt(struct mn10300_serial_port *port)
{
_enter("%s", port->name);
if (!port->uart.info || !port->uart.info->port.tty) {
mn10300_serial_dis_tx_intr(port);
return;
}
if (uart_tx_stopped(&port->uart) ||
uart_circ_empty(&port->uart.info->xmit))
mn10300_serial_dis_tx_intr(port);

Просмотреть файл

@ -161,7 +161,7 @@ void __init setup_arch(char **cmdline_p)
reserve the page it is occupying. */
if (CONFIG_INTERRUPT_VECTOR_BASE >= CONFIG_KERNEL_RAM_BASE_ADDRESS &&
CONFIG_INTERRUPT_VECTOR_BASE < memory_end)
reserve_bootmem(CONFIG_INTERRUPT_VECTOR_BASE, 1,
reserve_bootmem(CONFIG_INTERRUPT_VECTOR_BASE, PAGE_SIZE,
BOOTMEM_DEFAULT);
reserve_bootmem(PAGE_ALIGN(PFN_PHYS(free_pfn)), bootmap_size,

Просмотреть файл

@ -11,6 +11,7 @@
#define __VMLINUX_LDS__
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
OUTPUT_FORMAT("elf32-am33lin", "elf32-am33lin", "elf32-am33lin")
OUTPUT_ARCH(mn10300)
@ -55,13 +56,13 @@ SECTIONS
CONSTRUCTORS
}
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.idt) }
. = ALIGN(32);
@ -78,7 +79,7 @@ SECTIONS
.data.init_task : { *(.data.init_task) }
/* might get freed after init */
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
@ -86,7 +87,7 @@ SECTIONS
}
/* will be freed after init */
. = ALIGN(4096); /* Init code and data */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
.init.text : {
_sinittext = .;
@ -120,17 +121,14 @@ SECTIONS
.exit.data : { *(.exit.data) }
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
#endif
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
PERCPU(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
@ -145,7 +143,7 @@ SECTIONS
_end = . ;
/* This is where the kernel creates the early boot page tables */
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
pg0 = .;
/* Sections to be discarded */

Просмотреть файл

@ -723,7 +723,7 @@ CONFIG_CICADA_PHY=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
# CONFIG_FIXED_PHY is not set
CONFIG_FIXED_PHY=y
# CONFIG_MDIO_BITBANG is not set
# CONFIG_NET_ETHERNET is not set
CONFIG_NETDEV_1000=y

Просмотреть файл

@ -682,7 +682,7 @@ CONFIG_VITESSE_PHY=y
# CONFIG_BROADCOM_PHY is not set
CONFIG_ICPLUS_PHY=y
# CONFIG_REALTEK_PHY is not set
# CONFIG_FIXED_PHY is not set
CONFIG_FIXED_PHY=y
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y

Просмотреть файл

@ -40,6 +40,7 @@ _GLOBAL(__setup_cpu_460gt)
mtlr r4
blr
_GLOBAL(__setup_cpu_440x5)
_GLOBAL(__setup_cpu_440gx)
_GLOBAL(__setup_cpu_440spe)
b __fixup_440A_mcheck

Просмотреть файл

@ -39,6 +39,7 @@ extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
@ -1500,6 +1501,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440x5,
.machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 460EX */

Просмотреть файл

@ -212,7 +212,7 @@ static void update_cpu_core_map(void)
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
}
void arch_update_cpu_topology(void)
int arch_update_cpu_topology(void)
{
struct tl_info *info = tl_info;
struct sys_device *sysdev;
@ -221,7 +221,7 @@ void arch_update_cpu_topology(void)
if (!machine_has_topology) {
update_cpu_core_map();
topology_update_polarization_simple();
return;
return 0;
}
stsi(info, 15, 1, 2);
tl_to_cores(info);
@ -230,6 +230,7 @@ void arch_update_cpu_topology(void)
sysdev = get_cpu_sysdev(cpu);
kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
}
return 1;
}
static void topology_work_fn(struct work_struct *work)

Просмотреть файл

@ -98,6 +98,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>

Просмотреть файл

@ -1014,7 +1014,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_SETFPREGS64:
ret = copy_regset_to_user(child, view, REGSET_FP,
ret = copy_regset_from_user(child, view, REGSET_FP,
0 * sizeof(u64),
33 * sizeof(u64),
fps);

Просмотреть файл

@ -131,7 +131,7 @@
#define VIS_OPF_SHIFT 5
#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
#define RS1(INSN) (((INSN) >> 24) & 0x1f)
#define RS1(INSN) (((INSN) >> 14) & 0x1f)
#define RS2(INSN) (((INSN) >> 0) & 0x1f)
#define RD(INSN) (((INSN) >> 25) & 0x1f)
@ -445,7 +445,7 @@ static void pdist(struct pt_regs *regs, unsigned int insn)
unsigned long i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd = fpd_regaddr(f, RD(insn));
rd_val = *rd;
@ -807,6 +807,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
if (get_user(insn, (u32 __user *) pc))
return -EFAULT;
save_and_clear_fpu();
opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
switch (opf) {
default:

Просмотреть файл

@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@ -785,7 +787,7 @@ static int __init mconsole_init(void)
/* long to avoid size mismatch warnings from gcc */
long sock;
int err;
char file[256];
char file[UNIX_PATH_MAX];
if (umid_file_name("mconsole", file, sizeof(file)))
return -1;

Просмотреть файл

@ -251,13 +251,6 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
* pointers.
*/
u16 cap_ptr;
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
@ -266,6 +259,13 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
* pointers.
*/
u16 cap_ptr;
/* pci domain of this IOMMU */
u16 pci_seg;
@ -284,19 +284,19 @@ struct amd_iommu {
/* size of command buffer */
u32 cmd_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
/* size of event buffer */
u32 evt_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
/* MSI number for event interrupt */
u16 evt_msi_num;
/* if one, we need to send a completion wait command */
int need_sync;
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
/* if one, we need to send a completion wait command */
int need_sync;
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
};

Просмотреть файл

@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_X86_32
return 0;
#else
#ifdef CONFIG_X86_64
struct dma_mapping_ops *ops = get_dma_ops(dev);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
#endif
return (dma_addr == bad_dma_address);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)

Просмотреть файл

@ -239,7 +239,7 @@ struct pci_bus;
void set_pci_bus_resources_arch_default(struct pci_bus *b);
#ifdef CONFIG_SMP
#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
#define smt_capable() (smp_num_siblings > 1)
#endif

Просмотреть файл

@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command(iommu, cmd);
if (!ret)
iommu->need_sync = 1;
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
iommu->need_sync = 0;
spin_lock_irqsave(&iommu->lock, flags);
if (!iommu->need_sync)
goto out;
iommu->need_sync = 0;
ret = __iommu_queue_command(iommu, &cmd);
if (ret)
@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
ret = iommu_queue_command(iommu, &cmd);
iommu->need_sync = 1;
return ret;
}
@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
ret = iommu_queue_command(iommu, &cmd);
iommu->need_sync = 1;
return ret;
}
@ -343,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
u64 __pte, *pte, *page;
bus_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(phys_addr);
/* only support 512GB address spaces for now */
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@ -599,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
continue;
p2 = IOMMU_PTE_PAGE(p1[i]);
for (j = 0; j < 512; ++i) {
for (j = 0; j < 512; ++j) {
if (!IOMMU_PTE_PRESENT(p2[j]))
continue;
p3 = IOMMU_PTE_PAGE(p2[j]);
@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
iommu_queue_inv_dev_entry(iommu, devid);
iommu->need_sync = 1;
}
/*****************************************************************************
@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
print_devid(_bdf, 1);
}
if (domain_for_device(_bdf) == NULL)
set_device_domain(*iommu, *domain, _bdf);
return 1;
}
@ -908,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
if (address >= dom->aperture_size)
return;
WARN_ON(address & 0xfffULL || address > dom->aperture_size);
WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
pte += IOMMU_PTE_L0_INDEX(address);
@ -920,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
/*
* This function contains common code for mapping of a physically
* contiguous memory region into DMA address space. It is uses by all
* mapping functions provided by this IOMMU driver.
* contiguous memory region into DMA address space. It is used by all
* mapping functions provided with this IOMMU driver.
* Must be called with the domain lock held.
*/
static dma_addr_t __map_single(struct device *dev,
@ -981,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_addr_t i, start;
unsigned int pages;
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
if ((dma_addr == bad_dma_address) ||
(dma_addr + size > dma_dom->aperture_size))
return;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@ -1031,7 +1034,6 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
if (addr == bad_dma_address)
goto out;
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
out:
@ -1060,7 +1062,6 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
@ -1127,7 +1128,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
goto unmap;
}
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
out:
@ -1173,7 +1173,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
s->dma_address = s->dma_length = 0;
}
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
@ -1225,7 +1224,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
goto out;
}
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
out:
@ -1257,7 +1255,6 @@ static void free_coherent(struct device *dev, size_t size,
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);

Просмотреть файл

@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
printk(KERN_INFO "Using ACPI for processor (LAPIC) "
"configuration information\n");
if (!mpf)
return;
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
mpf->mpf_specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)

Просмотреть файл

@ -7,7 +7,8 @@
#include <asm/paravirt.h>
static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
static inline void
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
}

Просмотреть файл

@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
spin_lock_irqsave(&iommu_bitmap_lock, flags);
iommu_area_free(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}

Просмотреть файл

@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type)
*cpu_type = "i386/pii";
break;
case 6 ... 8:
case 10 ... 11:
*cpu_type = "i386/piii";
break;
case 9:
case 13:
*cpu_type = "i386/p6_mobile";
break;
case 10 ... 13:
*cpu_type = "i386/p6";
break;
case 14:
*cpu_type = "i386/core";
break;

Просмотреть файл

@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs)
unsigned int low, high;
int i;
if (!reset_value)
return;
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
CTRL_READ(low, high, msrs, i);
@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs)
unsigned int low, high;
int i;
if (!reset_value)
return;
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;

Просмотреть файл

@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}

Просмотреть файл

@ -677,6 +677,29 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case DVD_WRITE_STRUCT:
case DVD_AUTH:
arg = (unsigned long)compat_ptr(arg);
/* These intepret arg as an unsigned long, not as a pointer,
* so we must not do compat_ptr() conversion. */
case HDIO_SET_MULTCOUNT:
case HDIO_SET_UNMASKINTR:
case HDIO_SET_KEEPSETTINGS:
case HDIO_SET_32BIT:
case HDIO_SET_NOWERR:
case HDIO_SET_DMA:
case HDIO_SET_PIO_MODE:
case HDIO_SET_NICE:
case HDIO_SET_WCACHE:
case HDIO_SET_ACOUSTIC:
case HDIO_SET_BUSSTATE:
case HDIO_SET_ADDRESS:
case CDROMEJECT_SW:
case CDROM_SET_OPTIONS:
case CDROM_CLEAR_OPTIONS:
case CDROM_SELECT_SPEED:
case CDROM_SELECT_DISC:
case CDROM_MEDIA_CHANGED:
case CDROM_DRIVE_STATUS:
case CDROM_LOCKDOOR:
case CDROM_DEBUG:
break;
default:
/* unknown ioctl number */
@ -699,8 +722,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
struct backing_dev_info *bdi;
loff_t size;
/*
* O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
* to updated it before every ioctl.
*/
if (file->f_flags & O_NDELAY)
mode |= FMODE_NDELAY_NOW;
mode |= FMODE_NDELAY;
else
mode &= ~FMODE_NDELAY;
switch (cmd) {
case HDIO_GETGEO:

Просмотреть файл

@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}

Просмотреть файл

@ -31,35 +31,63 @@ config CRYPTO_FIPS
config CRYPTO_ALGAPI
tristate
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
tristate
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
config CRYPTO_AEAD2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_BLKCIPHER
tristate
select CRYPTO_BLKCIPHER2
select CRYPTO_ALGAPI
select CRYPTO_RNG
config CRYPTO_BLKCIPHER2
tristate
select CRYPTO_ALGAPI2
select CRYPTO_RNG2
config CRYPTO_HASH
tristate
select CRYPTO_HASH2
select CRYPTO_ALGAPI
config CRYPTO_HASH2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_RNG
tristate
select CRYPTO_RNG2
select CRYPTO_ALGAPI
config CRYPTO_RNG2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_AEAD
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER2
help
Create default cryptographic template instantiations such as
cbc(aes).
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
select CRYPTO_AEAD2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
depends on EXPERIMENTAL

Просмотреть файл

@ -9,24 +9,24 @@ obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD) += aead.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
crypto_blkcipher-objs := ablkcipher.o
crypto_blkcipher-objs += blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
@ -73,8 +73,8 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_RNG) += rng.o
obj-$(CONFIG_CRYPTO_RNG) += krng.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o

Просмотреть файл

@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = battery->current_now * 1000;
/* if power units are mW, convert to mA by
dividing by current voltage (mV/1000) */
if (!battery->power_unit) {
if (battery->voltage_now) {
val->intval /= battery->voltage_now;
val->intval *= 1000;
} else
val->intval = -1;
}
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:

Просмотреть файл

@ -153,7 +153,7 @@ config SATA_PROMISE
If unsure, say N.
config SATA_SX4
tristate "Promise SATA SX4 support"
tristate "Promise SATA SX4 support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for Promise Serial ATA SX4.
@ -219,8 +219,8 @@ config PATA_ACPI
otherwise unsupported hardware.
config PATA_ALI
tristate "ALi PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "ALi PATA support"
depends on PCI
help
This option enables support for the ALi ATA interfaces
found on the many ALi chipsets.
@ -263,7 +263,7 @@ config PATA_ATIIXP
If unsure, say N.
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Very Experimental)"
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the CMD640 PCI IDE
@ -291,8 +291,8 @@ config PATA_CS5520
If unsure, say N.
config PATA_CS5530
tristate "CS5530 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "CS5530 PATA support"
depends on PCI
help
This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family.
@ -309,8 +309,8 @@ config PATA_CS5535
If unsure, say N.
config PATA_CS5536
tristate "CS5536 PATA support (Experimental)"
depends on PCI && X86 && !X86_64 && EXPERIMENTAL
tristate "CS5536 PATA support"
depends on PCI && X86 && !X86_64
help
This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family.
@ -363,7 +363,7 @@ config PATA_HPT37X
If unsure, say N.
config PATA_HPT3X2N
tristate "HPT 372N/302N PATA support (Very Experimental)"
tristate "HPT 372N/302N PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the N variant HPT PATA
@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
problems with DMA on this chipset.
config PATA_ISAPNP
tristate "ISA Plug and Play PATA support (Experimental)"
depends on EXPERIMENTAL && ISAPNP
tristate "ISA Plug and Play PATA support"
depends on ISAPNP
help
This option enables support for ISA plug & play ATA
controllers such as those found on old soundcards.
@ -498,8 +498,8 @@ config PATA_NINJA32
If unsure, say N.
config PATA_NS87410
tristate "Nat Semi NS87410 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Nat Semi NS87410 PATA support"
depends on PCI
help
This option enables support for the National Semiconductor
NS87410 PCI-IDE controller.
@ -507,8 +507,8 @@ config PATA_NS87410
If unsure, say N.
config PATA_NS87415
tristate "Nat Semi NS87415 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Nat Semi NS87415 PATA support"
depends on PCI
help
This option enables support for the National Semiconductor
NS87415 PCI-IDE controller.
@ -544,8 +544,8 @@ config PATA_PCMCIA
If unsure, say N.
config PATA_PDC_OLD
tristate "Older Promise PATA controller support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Older Promise PATA controller support"
depends on PCI
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
@ -559,7 +559,7 @@ config PATA_QDI
Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Very Experimental)"
tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the RADISYS 82600
@ -586,8 +586,8 @@ config PATA_RZ1000
If unsure, say N.
config PATA_SC1200
tristate "SC1200 PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
tristate "SC1200 PATA support"
depends on PCI
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
@ -620,8 +620,8 @@ config PATA_SIL680
If unsure, say N.
config PATA_SIS
tristate "SiS PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "SiS PATA support"
depends on PCI
help
This option enables support for SiS PATA controllers

Просмотреть файл

@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void)
* matching is necessary because dmi_system_id.matches is
* limited to four entries.
*/
if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
if (dmi_get_system_info(DMI_SYS_VENDOR) &&
dmi_get_system_info(DMI_PRODUCT_NAME) &&
dmi_get_system_info(DMI_PRODUCT_VERSION) &&
dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
dmi_get_system_info(DMI_BOARD_VENDOR) &&
dmi_get_system_info(DMI_BOARD_NAME) &&
dmi_get_system_info(DMI_BOARD_VERSION) &&
!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&

Просмотреть файл

@ -382,10 +382,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
switch((reg1 & 0x700) >> 8) {
case 5:
case 9:
hpriv = &hpt366_40;
break;
case 9:
case 5:
hpriv = &hpt366_25;
break;
default:

Просмотреть файл

@ -44,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_ninja32"
#define DRV_VERSION "0.1.1"
#define DRV_VERSION "0.1.3"
/**
@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc;
pci_set_master(dev);
/* Set up the register mappings */
/* Set up the register mappings. We use the I/O mapping as only the
older chips also have MMIO on BAR 1 */
base = host->iomap[0];
if (!base)
return -ENOMEM;
@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
#endif
static const struct pci_device_id ninja32[] = {
{ 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ },
};

Просмотреть файл

@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
{ 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
/* end marker */
{ 0, }
};

Просмотреть файл

@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = {
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL,
pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
"%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
return 0;
out_mem:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return ret;
@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE);
blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);

Просмотреть файл

@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
TTY_OVERRUN);
/*
If the flip buffer itself is
overflowing, we still loose
overflowing, we still lose
the next incoming character.
*/
if (tty_buffer_request_room(tty, 1) !=

Просмотреть файл

@ -974,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
packet->ack = RCODE_SEND_ERROR;
return -1;
}
packet->payload_bus = payload_bus;
d[2].req_count = cpu_to_le16(packet->payload_length);
d[2].data_address = cpu_to_le32(payload_bus);
@ -1025,7 +1026,6 @@ static int handle_at_packet(struct context *context,
struct driver_data *driver_data;
struct fw_packet *packet;
struct fw_ohci *ohci = context->ohci;
dma_addr_t payload_bus;
int evt;
if (last->transfer_status == 0)
@ -1038,9 +1038,8 @@ static int handle_at_packet(struct context *context,
/* This packet was cancelled, just continue. */
return 1;
payload_bus = le32_to_cpu(last->data_address);
if (payload_bus != 0)
dma_unmap_single(ohci->card.device, payload_bus,
if (packet->payload_bus)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
evt = le16_to_cpu(last->transfer_status) & 0x1f;
@ -1697,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
if (packet->ack != 0)
goto out;
if (packet->payload_bus)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
log_ar_at_event('T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;

Просмотреть файл

@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
packet->payload_bus = 0;
}
/**
@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
BUG();
return;
}
response->payload_bus = 0;
}
EXPORT_SYMBOL(fw_fill_response);

Просмотреть файл

@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
@ -153,6 +154,7 @@ struct fw_packet {
size_t header_length;
void *payload;
size_t payload_length;
dma_addr_t payload_bus;
u32 timestamp;
/*

Просмотреть файл

@ -847,9 +847,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
intel_opregion_init(dev);

Просмотреть файл

@ -244,6 +244,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
@ -253,6 +257,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
@ -261,6 +267,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
@ -371,8 +379,8 @@ struct drm_i915_gem_object {
uint32_t agp_type;
/**
* Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
* GEM_DOMAIN_CPU is not in the object's read domain.
* If present, while GEM_DOMAIN_CPU is in the read domain this array
* flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
};
@ -394,9 +402,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** Cache domains that were flushed at the start of the request. */
uint32_t flush_domains;
struct list_head list;
};

Просмотреть файл

@ -33,21 +33,21 @@
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
static int
i915_gem_object_set_domain_range(struct drm_gem_object *obj,
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
int write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size,
uint32_t read_domains,
uint32_t write_domain);
static int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain);
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
I915_GEM_DOMAIN_CPU, 0);
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
args->size);
if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
return ret;
}
ret = i915_gem_set_domain(obj, file_priv,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
goto fail;
@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
ret = i915_gem_set_domain(obj, file_priv,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/**
* Called when user space prepares to use an object
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
/* Only handle setting domains to types used by the CPU. */
if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
return -EINVAL;
if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
return -EINVAL;
/* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
obj, obj->size, args->read_domains, args->write_domain);
obj, obj->size, read_domains, write_domain);
#endif
ret = i915_gem_set_domain(obj, file_priv,
args->read_domains, args->write_domain);
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
if (ret == -EINVAL)
ret = 0;
} else {
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
/* Pinned buffers may be scanout, so flush the cache */
if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
}
if (obj_priv->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
}
static void
i915_gem_object_move_to_active(struct drm_gem_object *obj)
i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
obj_priv->last_rendering_seqno = seqno;
}
static void
i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
obj_priv->last_rendering_seqno = 0;
}
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
if (flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
}
}
if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
@ -676,33 +731,13 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj);
#endif
if (obj->write_domain != 0) {
list_move_tail(&obj_priv->list,
&dev_priv->mm.flushing_list);
} else {
if (obj->write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
}
}
if (request->flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
/* Clear the write domain and activity from any buffers
* that are just waiting for a flush matching the one retired.
*/
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if (obj->write_domain & request->flush_domains) {
obj->write_domain = 0;
i915_gem_object_move_to_inactive(obj);
}
}
}
}
/**
* Returns true if seq1 is later than seq2.
*/
@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
/* If there are writes queued to the buffer, flush and
* create a new seqno to wait for.
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
uint32_t write_domain = obj->write_domain;
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
__func__, obj, write_domain);
#endif
i915_gem_flush(dev, 0, write_domain);
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = i915_add_request(dev,
write_domain);
BUG_ON(obj_priv->last_rendering_seqno == 0);
#if WATCH_LRU
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
#endif
}
BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
@ -950,23 +970,15 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
/* Wait for any rendering to complete
*/
ret = i915_gem_object_wait_rendering(obj);
if (ret) {
DRM_ERROR("wait_rendering failed: %d\n", ret);
return ret;
}
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
I915_GEM_DOMAIN_CPU);
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("set_domain failed: %d\n", ret);
return ret;
}
@ -1082,6 +1094,21 @@ i915_gem_evict_something(struct drm_device *dev)
return ret;
}
static int
i915_gem_evict_everything(struct drm_device *dev)
{
int ret;
for (;;) {
ret = i915_gem_evict_something(dev);
if (ret != 0)
break;
}
if (ret == -ENOMEM)
return 0;
return ret;
}
static int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
@ -1168,6 +1195,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev);
if (ret != 0) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
obj->write_domain = 0;
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
}
/**
* Moves a single object to the GTT read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
/* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
if (write)
obj->read_domains &= I915_GEM_DOMAIN_GTT;
i915_gem_object_flush_cpu_write_domain(obj);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
}
return 0;
}
/**
* Moves a single object to the CPU read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
struct drm_device *dev = obj->dev;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we have a partially-valid cache of the object in the CPU,
* finish invalidating it and free the per-page flags.
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
obj->read_domains &= I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
return 0;
}
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
@ -1339,8 +1504,8 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain)
{
@ -1348,7 +1513,9 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
int ret;
BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
#endif
/*
* If we're invaliding the CPU cache and flushing a GPU cache,
* then pause for rendering so that the GPU caches will be
* flushed before the cpu cache is invalidated
*/
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
(flush_domains & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT))) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
i915_gem_clflush_object(obj);
}
if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain;
/* If we're invalidating the CPU domain, clear the per-page CPU
* domain list as well.
*/
if (obj_priv->page_cpu_valid != NULL &&
(write_domain != 0 ||
read_domains & I915_GEM_DOMAIN_CPU)) {
drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
obj_priv->page_cpu_valid = NULL;
}
obj->read_domains = read_domains;
dev->invalidate_domains |= invalidate_domains;
@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
return 0;
}
/**
* Set the read/write domain on a range of the object.
* Moves the object from a partially CPU read to a full one.
*
* Currently only implemented for CPU reads, otherwise drops to normal
* i915_gem_object_set_domain().
* Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (!obj_priv->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
int i;
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
if (obj_priv->page_cpu_valid[i])
continue;
drm_clflush_pages(obj_priv->page_list + i, 1);
}
drm_agp_chipset_flush(dev);
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
obj_priv->page_cpu_valid = NULL;
}
/**
* Set the CPU read domain on a range of the object.
*
* The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
* not entirely valid. The page_cpu_valid member of the object flags which
* pages have been flushed, and will be respected by
* i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
* of the whole object.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size,
uint32_t read_domains,
uint32_t write_domain)
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret, i;
int i, ret;
if (obj->read_domains & I915_GEM_DOMAIN_CPU)
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
if (obj_priv->page_cpu_valid == NULL &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
if (read_domains != I915_GEM_DOMAIN_CPU ||
write_domain != 0)
return i915_gem_object_set_domain(obj,
read_domains, write_domain);
/* Wait on any GPU rendering to the object to be flushed. */
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
}
if (obj_priv->page_cpu_valid == NULL)
return -ENOMEM;
} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
if (obj_priv->page_cpu_valid[i])
continue;
@ -1472,41 +1663,16 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
obj_priv->page_cpu_valid[i] = 1;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
obj->read_domains |= I915_GEM_DOMAIN_CPU;
return 0;
}
/**
* Once all of the objects have been set in the proper domain,
* perform the necessary flush and invalidate operations.
*
* Returns the write domains flushed, for use in flush tracking.
*/
static uint32_t
i915_gem_dev_set_domain(struct drm_device *dev)
{
uint32_t flush_domains = dev->flush_domains;
/*
* Now that all the buffers are synced to the proper domains,
* flush and invalidate the collected domains
*/
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
dev->invalidate_domains,
dev->flush_domains);
#endif
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
dev->invalidate_domains = 0;
dev->flush_domains = 0;
}
return flush_domains;
}
/**
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
reloc.read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc.target_handle,
(int) reloc.offset,
reloc.read_domains,
reloc.write_domain);
return -EINVAL;
}
if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
/* Now that we're going to actually write some data in,
* make sure that any rendering using this buffer's contents
* is completed.
*/
i915_gem_object_wait_rendering(obj);
/* As we're writing through the gtt, flush
* any CPU writes before we write the relocations
*/
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
/* Map the page containing the relocation we're going to
@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
int pin_tries;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EBUSY;
}
/* Zero the gloabl flush/invalidate flags. These
* will be modified as each object is bound to the
* gtt
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
/* Look up object handles and perform the relocations */
/* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF;
goto err;
}
}
/* Pin and relocate */
for (pin_tries = 0; ; pin_tries++) {
ret = 0;
for (i = 0; i < args->buffer_count; i++) {
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
ret = i915_gem_object_pin_and_relocate(object_list[i],
file_priv,
&exec_list[i]);
if (ret) {
DRM_ERROR("object bind and relocate failed %d\n", ret);
if (ret)
break;
pinned = i + 1;
}
/* success */
if (ret == 0)
break;
/* error other than GTT full, or we've already tried again */
if (ret != -ENOMEM || pin_tries >= 1) {
DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
}
pinned = i + 1;
/* unpin all of our buffers */
for (i = 0; i < pinned; i++)
i915_gem_object_unpin(object_list[i]);
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
/* Zero the global flush/invalidate flags. These
* will be modified as new domains are computed
* for each object
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
/* make sure all previous memory operations have passed */
ret = i915_gem_object_set_domain(obj,
/* Compute new gpu domains and update invalidate/flush */
i915_gem_object_set_to_gpu_domain(obj,
obj->pending_read_domains,
obj->pending_write_domain);
if (ret)
goto err;
}
i915_verify_inactive(dev, __FILE__, __LINE__);
/* Flush/invalidate caches and chipset buffer */
flush_domains = i915_gem_dev_set_domain(dev);
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
dev->invalidate_domains,
dev->flush_domains);
#endif
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains)
(void)i915_add_request(dev, dev->flush_domains);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
(void)i915_add_request(dev, flush_domains);
/* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) {
@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = seqno;
i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
}
i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@ -2167,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
static int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
int ret;
uint32_t flush_domains;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
if (ret)
return ret;
flush_domains = i915_gem_dev_set_domain(obj->dev);
if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
(void) i915_add_request(dev, flush_domains);
return 0;
}
/** Unbinds all objects that are on the given buffer list. */
static int
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)

Просмотреть файл

@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d %08x\n",
DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies),
gem_request->flush_domains);
(int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;

Просмотреть файл

@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_I965GM(dev) || IS_GM45(dev)) {
/* GM965 only does bit 11-based channel
* randomization
} else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
(dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* GM965/GM45 does either bit 11 or bit 17
* swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;

Просмотреть файл

@ -522,6 +522,7 @@
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206

Просмотреть файл

@ -299,7 +299,6 @@ typedef struct drm_radeon_private {
atomic_t swi_emitted;
int vblank_crtc;
uint32_t irq_enable_reg;
int irq_enabled;
uint32_t r500_disp_irq_reg;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];

Просмотреть файл

@ -44,6 +44,7 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
else
dev_priv->irq_enable_reg &= ~mask;
if (!dev->irq_enabled)
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
}
@ -56,6 +57,7 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
else
dev_priv->r500_disp_irq_reg &= ~mask;
if (!dev->irq_enabled)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
}
@ -355,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
dev_priv->irq_enabled = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */

Просмотреть файл

@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
endif
# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
config BLK_DEV_IDE_PMAC
tristate "PowerMac on-board IDE support"
depends on PPC_PMAC && IDE=y
select IDE_TIMINGS
select BLK_DEV_IDEDMA_PCI
help
This driver provides support for the on-board IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks.
@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
CD-ROM on hda. This option changes this to more natural hda for
hard disk and hdc for CD-ROM.
config BLK_DEV_IDEDMA_PMAC
bool "PowerMac IDE DMA support"
depends on BLK_DEV_IDE_PMAC
select BLK_DEV_IDEDMA_PCI
help
This option allows the driver for the on-board IDE controller on
Power Macintoshes and PowerBooks to use DMA (direct memory access)
to transfer data to and from memory. Saying Y is safe and improves
performance.
config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200"
depends on SOC_AU1200
@ -912,7 +904,7 @@ config BLK_DEV_UMC8672
endif
config BLK_DEV_IDEDMA
def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \
def_bool BLK_DEV_IDEDMA_SFF || \
BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
endif # IDE

Просмотреть файл

@ -208,7 +208,9 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
*/
if (drive->hwif->dma_ops == NULL)
break;
if (drive->dev_flags & IDE_DFLAG_USING_DMA)
/*
* TODO: respect IDE_DFLAG_USING_DMA
*/
ide_set_dma(drive);
break;
}

Просмотреть файл

@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
struct macio_dev *mdev;
u32 timings[4];
volatile u32 __iomem * *kauai_fcr;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* Those fields are duplicating what is in hwif. We currently
* can't use the hwif ones because of some assumptions that are
* beeing done by the generic code about the kind of dma controller
@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
*/
volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu;
#endif
} pmac_ide_hwif_t;
enum {
@ -222,8 +219,6 @@ static const char* model_name[] = {
#define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* Rounded Multiword DMA timings
*
* I gave up finding a generic formula for all controller
@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
static void pmac_ide_selectproc(ide_drive_t *drive);
static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
#define PMAC_IDE_REG(x) \
((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
pmac_ide_do_update_timings(drive);
}
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/*
* Calculate KeyLargo ATA/66 UDMA timings
*/
@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
drive->name, speed & 0xf, *timings);
#endif
}
#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
{
@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
tl[0] = *timings;
tl[1] = *timings2;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (speed >= XFER_UDMA_0) {
if (pmif->kind == controller_kl_ata4)
ret = set_timings_udma_ata4(&tl[0], speed);
@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
ret = -1;
} else
set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
if (ret)
return;
@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
.chipset = ide_pmac,
.tp_ops = &pmac_tp_ops,
.port_ops = &pmac_ide_port_ops,
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
.dma_ops = &pmac_dma_ops,
#endif
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_MMIO |
@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
pmif->dma_regs = NULL;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
dev_set_drvdata(&mdev->ofdev.dev, pmif);
memset(&hw, 0, sizeof(hw));
@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
base = ioremap(rbase, rlen);
pmif->regbase = (unsigned long) base + 0x2000;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
pmif->dma_regs = base + 0x1000;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
pmif->kauai_fcr = base;
pmif->irq = pdev->irq;
@ -1434,8 +1419,6 @@ out:
return error;
}
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/*
* pmac_ide_build_dmatable builds the DBDMA command list
* for a transfer and sets the DBDMA channel to point to it.
@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
return 0;
}
#else
static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
module_init(pmac_ide_probe);

Просмотреть файл

@ -550,7 +550,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
.dma_timeout = ide_dma_timeout,
};
static const struct ide_port_info sgiioc4_port_info __devinitdata = {
static const struct ide_port_info sgiioc4_port_info __devinitconst = {
.name = DRV_NAME,
.chipset = ide_pci,
.init_dma = ide_dma_sgiioc4,
@ -633,7 +633,7 @@ out:
return ret;
}
int
int __devinit
ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
/* PCI-RT does not bring out IDE connection.
@ -645,7 +645,7 @@ ioc4_ide_attach_one(struct ioc4_driver_data *idd)
return pci_init_sgiioc4(idd->idd_pdev);
}
static struct ioc4_submodule ioc4_ide_submodule = {
static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
.is_name = "IOC4_ide",
.is_owner = THIS_MODULE,
.is_probe = ioc4_ide_attach_one,

Просмотреть файл

@ -1685,6 +1685,7 @@ static int nodemgr_host_thread(void *data)
g = get_hpsb_generation(host);
for (i = 0; i < 4 ; i++) {
msleep_interruptible(63);
try_to_freeze();
if (kthread_should_stop())
goto exit;
@ -1725,6 +1726,7 @@ static int nodemgr_host_thread(void *data)
/* Sleep 3 seconds */
for (i = 3000/200; i; i--) {
msleep_interruptible(200);
try_to_freeze();
if (kthread_should_stop())
goto exit;

Просмотреть файл

@ -233,9 +233,7 @@ static void __exit b1isa_exit(void)
int i;
for (i = 0; i < MAX_CARDS; i++) {
if (!io[i])
break;
if (isa_dev[i].resource[0].start)
b1isa_remove(&isa_dev[i]);
}
unregister_capi_driver(&capi_driver_b1isa);

Просмотреть файл

@ -83,12 +83,12 @@ net_open(struct net_device *dev)
/* Fill in the MAC-level header (if not already set) */
if (!card->mac_addr[0]) {
for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = 0xfc;
if ((in_dev = dev->ip_ptr) != NULL) {
struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa != NULL)
memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long));
memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
}
} else
memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);

Просмотреть файл

@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
* @req: the request to prepare
*
* Allocate the necessary i2o_block_request struct and connect it to
* the request. This is needed that we not loose the SG list later on.
* the request. This is needed that we not lose the SG list later on.
*
* Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
*/

Просмотреть файл

@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c);
/**
* i2o_msg_get_wait - obtain an I2O message from the IOP
* @c: I2O controller
* @msg: pointer to a I2O message pointer
* @wait: how long to wait until timeout
*
* This function waits up to wait seconds for a message slot to be

Просмотреть файл

@ -254,7 +254,11 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
return 1;
*paddr = pte_pfn(pte) << PAGE_SHIFT;
#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
#else
*pageshift = PAGE_SHIFT;
#endif
return 0;
err:

Просмотреть файл

@ -39,7 +39,7 @@
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
#define OPCODE_BE 0xc7 /* Erase whole flash block */
#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash)
*
* Returns 0 if successful, non-zero otherwise.
*/
static int erase_block(struct m25p *flash)
static int erase_chip(struct m25p *flash)
{
DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
flash->spi->dev.bus_id, __func__,
@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash)
write_enable(flash);
/* Set up command buffer. */
flash->command[0] = OPCODE_BE;
flash->command[0] = OPCODE_CHIP_ERASE;
spi_write(flash->spi, flash->command, 1);
@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
mutex_lock(&flash->lock);
/* REVISIT in some cases we could speed up erasing large regions
* by using OPCODE_SE instead of OPCODE_BE_4K
*/
/* now erase those sectors */
if (len == flash->mtd.size && erase_block(flash)) {
/* whole-chip erase? */
if (len == flash->mtd.size && erase_chip(flash)) {
instr->state = MTD_ERASE_FAILED;
mutex_unlock(&flash->lock);
return -EIO;
/* REVISIT in some cases we could speed up erasing large regions
* by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
* to use "small sector erase", but that's not always optimal.
*/
/* "sector"-at-a-time erase */
} else {
while (len) {
if (erase_sector(flash, addr)) {
@ -574,11 +577,12 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
for (tmp = 0, info = m25p_data;
tmp < ARRAY_SIZE(m25p_data);
tmp++, info++) {
if (info->jedec_id == jedec)
if (ext_jedec != 0 && info->ext_id != ext_jedec)
if (info->jedec_id == jedec) {
if (info->ext_id != 0 && info->ext_id != ext_jedec)
continue;
return info;
}
}
dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return NULL;
}

Просмотреть файл

@ -19,7 +19,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/concat.h>
#include <asm/io.h>
#include <linux/io.h>
#define MAX_RESOURCES 4
@ -27,7 +27,6 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES];
struct resource *res;
#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
struct mtd_partition *parts;
@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev)
#endif
map_destroy(info->mtd[i]);
}
if (info->map[i].virt != NULL)
iounmap(info->map[i].virt);
}
if (info->res != NULL) {
release_resource(info->res);
kfree(info->res);
}
return 0;
}
@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev)
if (physmap_data == NULL)
return -ENODEV;
info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto err_out;
@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev)
(unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
(unsigned long long)dev->resource[i].start);
info->res = request_mem_region(dev->resource[i].start,
if (!devm_request_mem_region(&dev->dev,
dev->resource[i].start,
dev->resource[i].end - dev->resource[i].start + 1,
dev->dev.bus_id);
if (info->res == NULL) {
dev->dev.bus_id)) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev)
info->map[i].bankwidth = physmap_data->width;
info->map[i].set_vpp = physmap_data->set_vpp;
info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size);
info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
info->map[i].size);
if (info->map[i].virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = EIO;

Просмотреть файл

@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
#ifdef CONFIG_MTD_OF_PARTS
if (ret == 0)
ret = of_mtd_parse_partitions(fun->dev, &fun->mtd,
flash_np, &fun->parts);
if (ret == 0) {
ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
if (ret < 0)
goto err;
}
#endif
if (ret > 0)
ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);

Просмотреть файл

@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
}
lpcctl = pci_resource_start(pdev, 0);
pci_dev_put(pdev);
if (!request_region(lpcctl, 4, driver_name)) {
err = -EBUSY;

Просмотреть файл

@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = {
static struct pxa3xx_nand_flash stm2GbX16 = {
.timing = &stm2GbX16_timing,
.cmdset = &largepage_cmdset,
.page_per_block = 64,
.page_size = 2048,
.flash_width = 16,

Просмотреть файл

@ -32,19 +32,18 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/mach/flash.h>
#include <asm/arch/gpmc.h>
#include <asm/arch/onenand.h>
#include <asm/arch/gpio.h>
#include <asm/arch/pm.h>
#include <linux/dma-mapping.h>
#include <asm/dma-mapping.h>
#include <asm/arch/dma.h>
#include <linux/io.h>
#include <asm/arch/board.h>
#include <asm/mach/flash.h>
#include <mach/gpmc.h>
#include <mach/onenand.h>
#include <mach/gpio.h>
#include <mach/pm.h>
#include <mach/dma.h>
#include <mach/board.h>
#define DRIVER_NAME "omap2-onenand"

Просмотреть файл

@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
return 0;
}
static void
bnx2_chk_missed_msi(struct bnx2 *bp)
{
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
u32 msi_ctrl;
if (bnx2_has_work(bnapi)) {
msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
return;
if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
~BNX2_PCICFG_MSI_CONTROL_ENABLE);
REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
bnx2_msi(bp->irq_tbl[0].vector, bnapi);
}
}
bp->idle_chk_status_idx = bnapi->last_status_idx;
}
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct status_block *sblk = bnapi->status_blk.msi;
@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
if (unlikely(work_done >= budget))
break;
/* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before
* checking for more work.
*/
bnapi->last_status_idx = sblk->status_idx;
if (unlikely(work_done >= budget))
break;
rmb();
if (likely(!bnx2_has_work(bnapi))) {
netif_rx_complete(bp->dev, napi);
@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
bp->bnx2_napi[i].last_status_idx = 0;
bp->idle_chk_status_idx = 0xffff;
bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
/* Set up how to generate a link change interrupt. */
@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnx2_restart_timer;
if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
BNX2_FLAG_USING_MSI)
bnx2_chk_missed_msi(bp);
bnx2_send_heart_beat(bp);
bp->stats_blk->stat_FwRxDrop =

Просмотреть файл

@ -378,6 +378,9 @@ struct l2_fhdr {
* pci_config_l definition
* offset: 0000
*/
#define BNX2_PCICFG_MSI_CONTROL 0x00000058
#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
#define BNX2_PCICFG_MISC_CONFIG 0x00000068
#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@ -6863,6 +6866,9 @@ struct bnx2 {
u8 num_tx_rings;
u8 num_rx_rings;
u32 idle_chk_status_idx;
};
#define REG_RD(bp, offset) \

Просмотреть файл

@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
return erxrdpt;
}
/*
* Calculate wrap around when reading beyond the end of the RX buffer
*/
static u16 rx_packet_start(u16 ptr)
{
if (ptr + RSV_SIZE > RXEND_INIT)
return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
else
return ptr + RSV_SIZE;
}
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
u16 erxrdpt;
@ -938,7 +949,8 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb->dev = ndev;
skb_reserve(skb, NET_IP_ALIGN);
/* copy the packet from the receive buffer */
enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
enc28j60_mem_read(priv,
rx_packet_start(priv->next_pk_ptr),
len, skb_put(skb, len));
if (netif_msg_pktdata(priv))
dump_packet(__func__, skb->len, skb->data);

Просмотреть файл

@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv->xmac_base = priv->xc->xmac_base;
priv->sram_base = priv->xc->sram_base;
spin_lock_init(&priv->lock);
ret = pfifo_request(PFIFO_MASK(priv->id));
if (ret) {
printk("unable to request PFIFO\n");

Просмотреть файл

@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
return 0;
ipw_send_disassociate(data, 0);
netif_carrier_off(priv->net_dev);
return 1;
}
@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes;
int fc;
if (!(priv->status & STATUS_ASSOCIATED))
goto drop;
hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC:

Просмотреть файл

@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
priv->num_stations = 0;
memset(priv->stations, 0, sizeof(priv->stations));
/* clean ucode key table bit map */
priv->ucode_key_table = 0;
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
EXPORT_SYMBOL(iwl_clear_stations_table);

Просмотреть файл

@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
if (!test_and_set_bit(i, &priv->ucode_key_table))
return i;
return -1;
return WEP_INVALID_OFFSET;
}
int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for new kew");
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
{
unsigned long flags;
__le16 key_flags = 0;
int ret;
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for new kew");
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
return ret;
}
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for new kew");
/* This copy is acutally not needed: we get the key with each TX */
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
return 0;
}
if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
IWL_WARNING("Removing wrong key %d 0x%x\n",
keyconf->keyidx, key_flags);
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
&priv->ucode_key_table))
IWL_ERROR("index %d not used in uCode key table.\n",

Просмотреть файл

@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
struct ieee80211_hdr *tx_hdr;
tx_hdr = (struct ieee80211_hdr *)skb->data;
if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
{
__skb_unlink(skb, q);
tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/pci-aspm.h>
#include "../pci.h"
@ -161,11 +162,12 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
*/
static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
{
int pos, child_pos;
int pos, child_pos, i = 0;
u16 reg16 = 0;
struct pci_dev *child_dev;
int same_clock = 1;
unsigned long start_jiffies;
u16 child_regs[8], parent_reg;
/*
* all functions of a slot should have the same Slot Clock
* Configuration, so just check one function
@ -191,16 +193,19 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
&reg16);
child_regs[i] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
reg16);
i++;
}
/* Configure upstream component */
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
@ -212,12 +217,30 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
/* Wait for link training end */
while (1) {
/* break out after waiting for 1 second */
start_jiffies = jiffies;
while ((jiffies - start_jiffies) < HZ) {
pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
cpu_relax();
}
/* training failed -> recover */
if ((jiffies - start_jiffies) >= HZ) {
dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
" common clock\n");
i = 0;
list_for_each_entry(child_dev, &pdev->subordinate->devices,
bus_list) {
child_pos = pci_find_capability(child_dev,
PCI_CAP_ID_EXP);
pci_write_config_word(child_dev,
child_pos + PCI_EXP_LNKCTL,
child_regs[i]);
i++;
}
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
}
}
/*

Просмотреть файл

@ -253,6 +253,7 @@ placeholder:
__func__, pci_domain_nr(parent), parent->number, slot_nr);
out:
kfree(slot_name);
up_write(&pci_bus_sem);
return slot;
err:

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше