Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (72 commits) powerpc/pseries: Fix build of topology stuff without CONFIG_NUMA powerpc/pseries: Fix VPHN build errors on non-SMP systems powerpc/83xx: add mpc8308_p1m DMA controller device-tree node powerpc/83xx: add DMA controller to mpc8308 device-tree node powerpc/512x: try to free dma descriptors in case of allocation failure powerpc/512x: add MPC8308 dma support powerpc/512x: fix the hanged dma transfer issue powerpc/512x: scatter/gather dma fix powerpc/powermac: Make auto-loading of therm_pm72 possible of/address: Use propper endianess in get_flags powerpc/pci: Use printf extension %pR for struct resource powerpc: Remove unnecessary casts of void ptr powerpc: Disable VPHN polling during a suspend operation powerpc/pseries: Poll VPA for topology changes and update NUMA maps powerpc: iommu: Add device name to iommu error printks powerpc: Record vma->phys_addr in ioremap() powerpc: Update compat_arch_ptrace powerpc: Fix PPC_PTRACE_SETHWDEBUG on PPC_BOOK3S powerpc/time: printk time stamp init not correct powerpc: Minor cleanups for machdep.h ...
This commit is contained in:
Коммит
5a62f99544
|
@ -403,6 +403,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
bttv.pll= See Documentation/video4linux/bttv/Insmod-options
|
||||
bttv.tuner= and Documentation/video4linux/bttv/CARDLIST
|
||||
|
||||
bulk_remove=off [PPC] This parameter disables the use of the pSeries
|
||||
firmware feature for flushing multiple hpte entries
|
||||
at a time.
|
||||
|
||||
c101= [NET] Moxa C101 synchronous serial card
|
||||
|
||||
cachesize= [BUGS=X86-32] Override level 2 CPU cache size detection.
|
||||
|
@ -1490,6 +1494,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
mtdparts= [MTD]
|
||||
See drivers/mtd/cmdlinepart.c.
|
||||
|
||||
multitce=off [PPC] This parameter disables the use of the pSeries
|
||||
firmware feature for updating multiple TCE entries
|
||||
at a time.
|
||||
|
||||
onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
|
||||
|
||||
Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
|
||||
|
|
|
@ -131,7 +131,7 @@ order to avoid the degeneration that had become the ppc32 kernel entry
|
|||
point and the way a new platform should be added to the kernel. The
|
||||
legacy iSeries platform breaks those rules as it predates this scheme,
|
||||
but no new board support will be accepted in the main tree that
|
||||
doesn't follows them properly. In addition, since the advent of the
|
||||
doesn't follow them properly. In addition, since the advent of the
|
||||
arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
|
||||
platforms and 32-bit platforms which move into arch/powerpc will be
|
||||
required to use these rules as well.
|
||||
|
@ -1025,7 +1025,7 @@ dtc source code can be found at
|
|||
|
||||
WARNING: This version is still in early development stage; the
|
||||
resulting device-tree "blobs" have not yet been validated with the
|
||||
kernel. The current generated bloc lacks a useful reserve map (it will
|
||||
kernel. The current generated block lacks a useful reserve map (it will
|
||||
be fixed to generate an empty one, it's up to the bootloader to fill
|
||||
it up) among others. The error handling needs work, bugs are lurking,
|
||||
etc...
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
PPC4xx Clock Power Management (CPM) node
|
||||
|
||||
Required properties:
|
||||
- compatible : compatible list, currently only "ibm,cpm"
|
||||
- dcr-access-method : "native"
|
||||
- dcr-reg : < DCR register range >
|
||||
|
||||
Optional properties:
|
||||
- er-offset : All 4xx SoCs with a CPM controller have
|
||||
one of two different order for the CPM
|
||||
registers. Some have the CPM registers
|
||||
in the following order (ER,FR,SR). The
|
||||
others have them in the following order
|
||||
(SR,ER,FR). For the second case set
|
||||
er-offset = <1>.
|
||||
- unused-units : specifier consist of one cell. For each
|
||||
bit in the cell, the corresponding bit
|
||||
in CPM will be set to turn off unused
|
||||
devices.
|
||||
- idle-doze : specifier consist of one cell. For each
|
||||
bit in the cell, the corresponding bit
|
||||
in CPM will be set to turn off unused
|
||||
devices. This is usually just CPM[CPU].
|
||||
- standby : specifier consist of one cell. For each
|
||||
bit in the cell, the corresponding bit
|
||||
in CPM will be set on standby and
|
||||
restored on resume.
|
||||
- suspend : specifier consist of one cell. For each
|
||||
bit in the cell, the corresponding bit
|
||||
in CPM will be set on suspend (mem) and
|
||||
restored on resume. Note, for standby
|
||||
and suspend the corresponding bits can
|
||||
be different or the same. Usually for
|
||||
standby only class 2 and 3 units are set.
|
||||
However, the interface does not care.
|
||||
If they are the same, the additional
|
||||
power saving will be seeing if support
|
||||
is available to put the DDR in self
|
||||
refresh mode and any additional power
|
||||
saving techniques for the specific SoC.
|
||||
|
||||
Example:
|
||||
CPM0: cpm {
|
||||
compatible = "ibm,cpm";
|
||||
dcr-access-method = "native";
|
||||
dcr-reg = <0x160 0x003>;
|
||||
er-offset = <0>;
|
||||
unused-units = <0x00000100>;
|
||||
idle-doze = <0x02000000>;
|
||||
standby = <0xfeff0000>;
|
||||
suspend = <0xfeff791d>;
|
||||
};
|
|
@ -20,6 +20,9 @@ config WORD_SIZE
|
|||
config ARCH_PHYS_ADDR_T_64BIT
|
||||
def_bool PPC64 || PHYS_64BIT
|
||||
|
||||
config ARCH_DMA_ADDR_T_64BIT
|
||||
def_bool ARCH_PHYS_ADDR_T_64BIT
|
||||
|
||||
config MMU
|
||||
bool
|
||||
default y
|
||||
|
@ -209,7 +212,7 @@ config ARCH_HIBERNATION_POSSIBLE
|
|||
config ARCH_SUSPEND_POSSIBLE
|
||||
def_bool y
|
||||
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
|
||||
PPC_85xx || PPC_86xx || PPC_PSERIES
|
||||
PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
|
||||
|
||||
config PPC_DCR_NATIVE
|
||||
bool
|
||||
|
@ -595,13 +598,11 @@ config EXTRA_TARGETS
|
|||
|
||||
If unsure, leave blank
|
||||
|
||||
if !44x || BROKEN
|
||||
config ARCH_WANTS_FREEZER_CONTROL
|
||||
def_bool y
|
||||
depends on ADB_PMU
|
||||
|
||||
source kernel/power/Kconfig
|
||||
endif
|
||||
|
||||
config SECCOMP
|
||||
bool "Enable seccomp to safely compute untrusted bytecode"
|
||||
|
@ -682,6 +683,15 @@ config FSL_PMC
|
|||
Freescale MPC85xx/MPC86xx power management controller support
|
||||
(suspend/resume). For MPC83xx see platforms/83xx/suspend.c
|
||||
|
||||
config PPC4xx_CPM
|
||||
bool
|
||||
default y
|
||||
depends on SUSPEND && (44x || 40x)
|
||||
help
|
||||
PPC4xx Clock Power Management (CPM) support (suspend/resume).
|
||||
It also enables support for two different idle states (idle-wait
|
||||
and idle-doze).
|
||||
|
||||
config 4xx_SOC
|
||||
bool
|
||||
|
||||
|
|
|
@ -105,6 +105,15 @@
|
|||
dcr-reg = <0x00c 0x002>;
|
||||
};
|
||||
|
||||
CPM0: cpm {
|
||||
compatible = "ibm,cpm";
|
||||
dcr-access-method = "native";
|
||||
dcr-reg = <0x160 0x003>;
|
||||
unused-units = <0x00000100>;
|
||||
idle-doze = <0x02000000>;
|
||||
standby = <0xfeff791d>;
|
||||
};
|
||||
|
||||
L2C0: l2c {
|
||||
compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
|
||||
dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */
|
||||
|
@ -270,28 +279,6 @@
|
|||
interrupts = <0x1 0x4>;
|
||||
};
|
||||
|
||||
UART2: serial@ef600500 {
|
||||
device_type = "serial";
|
||||
compatible = "ns16550";
|
||||
reg = <0xef600500 0x00000008>;
|
||||
virtual-reg = <0xef600500>;
|
||||
clock-frequency = <0>; /* Filled in by U-Boot */
|
||||
current-speed = <0>; /* Filled in by U-Boot */
|
||||
interrupt-parent = <&UIC1>;
|
||||
interrupts = <28 0x4>;
|
||||
};
|
||||
|
||||
UART3: serial@ef600600 {
|
||||
device_type = "serial";
|
||||
compatible = "ns16550";
|
||||
reg = <0xef600600 0x00000008>;
|
||||
virtual-reg = <0xef600600>;
|
||||
clock-frequency = <0>; /* Filled in by U-Boot */
|
||||
current-speed = <0>; /* Filled in by U-Boot */
|
||||
interrupt-parent = <&UIC1>;
|
||||
interrupts = <29 0x4>;
|
||||
};
|
||||
|
||||
IIC0: i2c@ef600700 {
|
||||
compatible = "ibm,iic-460ex", "ibm,iic";
|
||||
reg = <0xef600700 0x00000014>;
|
||||
|
|
|
@ -82,6 +82,15 @@
|
|||
interrupt-parent = <&UIC0>;
|
||||
};
|
||||
|
||||
CPM0: cpm {
|
||||
compatible = "ibm,cpm";
|
||||
dcr-access-method = "native";
|
||||
dcr-reg = <0x0b0 0x003>;
|
||||
unused-units = <0x00000000>;
|
||||
idle-doze = <0x02000000>;
|
||||
standby = <0xe3e74800>;
|
||||
};
|
||||
|
||||
plb {
|
||||
compatible = "ibm,plb-405ex", "ibm,plb4";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -297,6 +297,14 @@
|
|||
interrupt-parent = < &ipic >;
|
||||
};
|
||||
|
||||
dma@2c000 {
|
||||
compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
|
||||
reg = <0x2c000 0x1800>;
|
||||
interrupts = <3 0x8
|
||||
94 0x8>;
|
||||
interrupt-parent = < &ipic >;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
pci0: pcie@e0009000 {
|
||||
|
|
|
@ -265,6 +265,14 @@
|
|||
interrupt-parent = < &ipic >;
|
||||
};
|
||||
|
||||
dma@2c000 {
|
||||
compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
|
||||
reg = <0x2c000 0x1800>;
|
||||
interrupts = <3 0x8
|
||||
94 0x8>;
|
||||
interrupt-parent = < &ipic >;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
pci0: pcie@e0009000 {
|
||||
|
|
|
@ -12,6 +12,8 @@ CONFIG_MODULES=y
|
|||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_KILAUEA=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
# CONFIG_WALNUT is not set
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
CONFIG_PCI=y
|
||||
|
@ -42,6 +44,9 @@ CONFIG_MTD_PHYSMAP_OF=y
|
|||
CONFIG_MTD_NAND=y
|
||||
CONFIG_MTD_NAND_NDFC=y
|
||||
CONFIG_PROC_DEVICETREE=y
|
||||
CONFIG_PM=y
|
||||
CONFIG_SUSPEND=y
|
||||
CONFIG_PPC4xx_CPM=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=35000
|
||||
# CONFIG_MISC_DEVICES is not set
|
||||
|
|
|
@ -42,6 +42,9 @@ CONFIG_MTD_PHYSMAP_OF=y
|
|||
CONFIG_MTD_NAND=y
|
||||
CONFIG_MTD_NAND_NDFC=y
|
||||
CONFIG_PROC_DEVICETREE=y
|
||||
CONFIG_PM=y
|
||||
CONFIG_SUSPEND=y
|
||||
CONFIG_PPC4xx_CPM=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=35000
|
||||
# CONFIG_MISC_DEVICES is not set
|
||||
|
|
|
@ -267,7 +267,16 @@ static __inline__ int fls64(__u64 x)
|
|||
#include <asm-generic/bitops/fls64.h>
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned int __arch_hweight8(unsigned int w);
|
||||
unsigned int __arch_hweight16(unsigned int w);
|
||||
unsigned int __arch_hweight32(unsigned int w);
|
||||
unsigned long __arch_hweight64(__u64 w);
|
||||
#include <asm-generic/bitops/const_hweight.h>
|
||||
#else
|
||||
#include <asm-generic/bitops/hweight.h>
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
/* Little-endian versions */
|
||||
|
|
|
@ -199,6 +199,8 @@ extern const char *powerpc_base_platform;
|
|||
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
|
||||
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
|
||||
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
|
||||
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
|
||||
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -403,21 +405,22 @@ extern const char *powerpc_base_platform;
|
|||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
|
||||
CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
|
||||
CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
|
||||
CPU_FTR_POPCNTB)
|
||||
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS)
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
|
||||
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS)
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
|
||||
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
|
|
|
@ -61,22 +61,25 @@ static inline cpumask_t cpu_online_cores_map(void)
|
|||
return cpu_thread_mask_to_cores(cpu_online_map);
|
||||
}
|
||||
|
||||
static inline int cpu_thread_to_core(int cpu)
|
||||
{
|
||||
return cpu >> threads_shift;
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu_core_index_of_thread(int cpu);
|
||||
int cpu_first_thread_of_core(int core);
|
||||
#else
|
||||
static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
|
||||
static inline int cpu_first_thread_of_core(int core) { return core; }
|
||||
#endif
|
||||
|
||||
static inline int cpu_thread_in_core(int cpu)
|
||||
{
|
||||
return cpu & (threads_per_core - 1);
|
||||
}
|
||||
|
||||
static inline int cpu_first_thread_in_core(int cpu)
|
||||
static inline int cpu_first_thread_sibling(int cpu)
|
||||
{
|
||||
return cpu & ~(threads_per_core - 1);
|
||||
}
|
||||
|
||||
static inline int cpu_last_thread_in_core(int cpu)
|
||||
static inline int cpu_last_thread_sibling(int cpu)
|
||||
{
|
||||
return cpu | (threads_per_core - 1);
|
||||
}
|
||||
|
|
|
@ -9,6 +9,12 @@
|
|||
struct dma_map_ops;
|
||||
struct device_node;
|
||||
|
||||
/*
|
||||
* Arch extensions to struct device.
|
||||
*
|
||||
* When adding fields, consider macio_add_one_device in
|
||||
* drivers/macintosh/macio_asic.c
|
||||
*/
|
||||
struct dev_archdata {
|
||||
/* DMA operations on that device */
|
||||
struct dma_map_ops *dma_ops;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
|
||||
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
|
||||
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
|
||||
#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -59,7 +60,7 @@ enum {
|
|||
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
|
||||
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
|
||||
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
|
||||
FW_FEATURE_CMO,
|
||||
FW_FEATURE_CMO | FW_FEATURE_VPHN,
|
||||
FW_FEATURE_PSERIES_ALWAYS = 0,
|
||||
FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
|
||||
FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
|
||||
|
|
|
@ -232,7 +232,9 @@
|
|||
#define H_GET_EM_PARMS 0x2B8
|
||||
#define H_SET_MPP 0x2D0
|
||||
#define H_GET_MPP 0x2D4
|
||||
#define MAX_HCALL_OPCODE H_GET_MPP
|
||||
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
|
||||
#define H_BEST_ENERGY 0x2F4
|
||||
#define MAX_HCALL_OPCODE H_BEST_ENERGY
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -62,7 +62,10 @@ struct lppaca {
|
|||
volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
|
||||
u32 dsei_data; // DSEI data x24-x27
|
||||
u64 sprg3; // SPRG3 value x28-x2F
|
||||
u8 reserved3[80]; // Reserved x30-x7F
|
||||
u8 reserved3[40]; // Reserved x30-x57
|
||||
volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
|
||||
// associativity change counters x58-x5F
|
||||
u8 reserved4[32]; // Reserved x60-x7F
|
||||
|
||||
//=============================================================================
|
||||
// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
|
||||
|
|
|
@ -27,9 +27,7 @@ struct iommu_table;
|
|||
struct rtc_time;
|
||||
struct file;
|
||||
struct pci_controller;
|
||||
#ifdef CONFIG_KEXEC
|
||||
struct kimage;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
struct smp_ops_t {
|
||||
|
@ -72,7 +70,7 @@ struct machdep_calls {
|
|||
int psize, int ssize);
|
||||
void (*flush_hash_range)(unsigned long number, int local);
|
||||
|
||||
/* special for kexec, to be called in real mode, linar mapping is
|
||||
/* special for kexec, to be called in real mode, linear mapping is
|
||||
* destroyed as well */
|
||||
void (*hpte_clear_all)(void);
|
||||
|
||||
|
@ -324,8 +322,6 @@ extern sys_ctrler_t sys_ctrler;
|
|||
|
||||
#endif /* CONFIG_PPC_PMAC */
|
||||
|
||||
extern void setup_pci_ptrs(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Poor default implementations */
|
||||
extern void __devinit smp_generic_give_timebase(void);
|
||||
|
|
|
@ -33,6 +33,9 @@ extern int numa_cpu_lookup_table[];
|
|||
extern cpumask_var_t node_to_cpumask_map[];
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
extern unsigned long max_pfn;
|
||||
u64 memory_hotplug_max(void);
|
||||
#else
|
||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -42,6 +45,8 @@ extern unsigned long max_pfn;
|
|||
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
|
||||
#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
|
||||
|
||||
#else
|
||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||
#endif /* CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -10,31 +10,7 @@
|
|||
#ifndef _ASM_POWERPC_NVRAM_H
|
||||
#define _ASM_POWERPC_NVRAM_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
#define NVRW_CNT 0x20
|
||||
#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
|
||||
#define NVRAM_BLOCK_LEN 16
|
||||
#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
|
||||
#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
|
||||
|
||||
#define NVRAM_AS0 0x74
|
||||
#define NVRAM_AS1 0x75
|
||||
#define NVRAM_DATA 0x77
|
||||
|
||||
|
||||
/* RTC Offsets */
|
||||
|
||||
#define MOTO_RTC_SECONDS 0x1FF9
|
||||
#define MOTO_RTC_MINUTES 0x1FFA
|
||||
#define MOTO_RTC_HOURS 0x1FFB
|
||||
#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
|
||||
#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
|
||||
#define MOTO_RTC_MONTH 0x1FFE
|
||||
#define MOTO_RTC_YEAR 0x1FFF
|
||||
#define MOTO_RTC_CONTROLA 0x1FF8
|
||||
#define MOTO_RTC_CONTROLB 0x1FF9
|
||||
|
||||
/* Signatures for nvram partitions */
|
||||
#define NVRAM_SIG_SP 0x02 /* support processor */
|
||||
#define NVRAM_SIG_OF 0x50 /* open firmware config */
|
||||
#define NVRAM_SIG_FW 0x51 /* general firmware */
|
||||
|
@ -49,32 +25,19 @@
|
|||
#define NVRAM_SIG_OS 0xa0 /* OS defined */
|
||||
#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
|
||||
|
||||
/* If change this size, then change the size of NVNAME_LEN */
|
||||
struct nvram_header {
|
||||
unsigned char signature;
|
||||
unsigned char checksum;
|
||||
unsigned short length;
|
||||
char name[12];
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct nvram_partition {
|
||||
struct list_head partition;
|
||||
struct nvram_header header;
|
||||
unsigned int index;
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
extern int nvram_write_error_log(char * buff, int length,
|
||||
unsigned int err_type, unsigned int err_seq);
|
||||
extern int nvram_read_error_log(char * buff, int length,
|
||||
unsigned int * err_type, unsigned int *err_seq);
|
||||
extern int nvram_clear_error_log(void);
|
||||
|
||||
extern int pSeries_nvram_init(void);
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
|
||||
#ifdef CONFIG_MMIO_NVRAM
|
||||
extern int mmio_nvram_init(void);
|
||||
|
@ -85,6 +48,13 @@ static inline int mmio_nvram_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern int __init nvram_scan_partitions(void);
|
||||
extern loff_t nvram_create_partition(const char *name, int sig,
|
||||
int req_size, int min_size);
|
||||
extern int nvram_remove_partition(const char *name, int sig);
|
||||
extern int nvram_get_partition_size(loff_t data_index);
|
||||
extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/* PowerMac specific nvram stuffs */
|
||||
|
|
|
@ -36,6 +36,8 @@
|
|||
#define PPC_INST_NOP 0x60000000
|
||||
#define PPC_INST_POPCNTB 0x7c0000f4
|
||||
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
|
||||
#define PPC_INST_POPCNTD 0x7c0003f4
|
||||
#define PPC_INST_POPCNTW 0x7c0002f4
|
||||
#define PPC_INST_RFCI 0x4c000066
|
||||
#define PPC_INST_RFDI 0x4c00004e
|
||||
#define PPC_INST_RFMCI 0x4c00004c
|
||||
|
@ -88,6 +90,12 @@
|
|||
__PPC_RB(b) | __PPC_EH(eh))
|
||||
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
|
||||
__PPC_RB(b))
|
||||
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
|
||||
__PPC_RA(a) | __PPC_RS(s))
|
||||
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
|
||||
__PPC_RA(a) | __PPC_RS(s))
|
||||
#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \
|
||||
__PPC_RA(a) | __PPC_RS(s))
|
||||
#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
|
||||
#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
|
||||
#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
|
||||
|
|
|
@ -122,7 +122,6 @@ extern struct task_struct *last_task_used_spe;
|
|||
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef __powerpc64__
|
||||
|
||||
#define STACK_TOP_USER64 TASK_SIZE_USER64
|
||||
|
@ -139,7 +138,6 @@ extern struct task_struct *last_task_used_spe;
|
|||
#define STACK_TOP_MAX STACK_TOP
|
||||
|
||||
#endif /* __powerpc64__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
|
|
|
@ -106,9 +106,22 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
|
|||
int nid)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
|
||||
extern int start_topology_update(void);
|
||||
extern int stop_topology_update(void);
|
||||
#else
|
||||
static inline int start_topology_update(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int stop_topology_update(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -116,9 +116,7 @@ struct vdso_data {
|
|||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
extern struct vdso_data *vdso_data;
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -29,8 +29,10 @@ endif
|
|||
obj-y := cputable.o ptrace.o syscalls.o \
|
||||
irq.o align.o signal_32.o pmc.o vdso.o \
|
||||
init_task.o process.o systbl.o idle.o \
|
||||
signal.o sysfs.o cacheinfo.o
|
||||
obj-y += vdso32/
|
||||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o dma.o \
|
||||
misc_$(CONFIG_WORD_SIZE).o vdso32/
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o nvram_64.o firmware.o
|
||||
|
@ -80,9 +82,6 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
|
|||
extra-$(CONFIG_8xx) := head_8xx.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y += time.o prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o dma.o \
|
||||
misc_$(CONFIG_WORD_SIZE).o
|
||||
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
|
||||
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
|
|
|
@ -209,7 +209,6 @@ int main(void)
|
|||
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
|
||||
|
||||
/* Interrupt register frame */
|
||||
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
|
||||
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
|
||||
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
|
||||
#ifdef CONFIG_PPC64
|
||||
|
|
|
@ -457,16 +457,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
.dcache_bsize = 128,
|
||||
.num_pmcs = 6,
|
||||
.pmc_type = PPC_PMC_IBM,
|
||||
.cpu_setup = __setup_cpu_power7,
|
||||
.cpu_restore = __restore_cpu_power7,
|
||||
.oprofile_cpu_type = "ppc64/power7",
|
||||
.oprofile_type = PPC_OPROFILE_POWER4,
|
||||
.oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
|
||||
.oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
|
||||
.oprofile_mmcra_clear = POWER6_MMCRA_THRM |
|
||||
POWER6_MMCRA_OTHER,
|
||||
.platform = "power7",
|
||||
},
|
||||
{ /* Power7+ */
|
||||
.pvr_mask = 0xffff0000,
|
||||
.pvr_value = 0x004A0000,
|
||||
.cpu_name = "POWER7+ (raw)",
|
||||
.cpu_features = CPU_FTRS_POWER7,
|
||||
.cpu_user_features = COMMON_USER_POWER7,
|
||||
.mmu_features = MMU_FTR_HPTE_TABLE |
|
||||
MMU_FTR_TLBIE_206,
|
||||
.icache_bsize = 128,
|
||||
.dcache_bsize = 128,
|
||||
.num_pmcs = 6,
|
||||
.pmc_type = PPC_PMC_IBM,
|
||||
.oprofile_cpu_type = "ppc64/power7",
|
||||
.oprofile_type = PPC_OPROFILE_POWER4,
|
||||
.platform = "power7+",
|
||||
},
|
||||
{ /* Cell Broadband Engine */
|
||||
.pvr_mask = 0xffff0000,
|
||||
.pvr_value = 0x00700000,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <asm/prom.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/rtas.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#include <asm/udbg.h>
|
||||
|
@ -141,3 +142,35 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|||
|
||||
return csize;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_RTAS
|
||||
/*
|
||||
* The crashkernel region will almost always overlap the RTAS region, so
|
||||
* we have to be careful when shrinking the crashkernel region.
|
||||
*/
|
||||
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
|
||||
{
|
||||
unsigned long addr;
|
||||
const u32 *basep, *sizep;
|
||||
unsigned int rtas_start = 0, rtas_end = 0;
|
||||
|
||||
basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
|
||||
sizep = of_get_property(rtas.dev, "rtas-size", NULL);
|
||||
|
||||
if (basep && sizep) {
|
||||
rtas_start = *basep;
|
||||
rtas_end = *basep + *sizep;
|
||||
}
|
||||
|
||||
for (addr = begin; addr < end; addr += PAGE_SIZE) {
|
||||
/* Does this page overlap with the RTAS region? */
|
||||
if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
|
||||
continue;
|
||||
|
||||
ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
|
||||
init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
|
||||
free_page((unsigned long)__va(addr));
|
||||
totalram_pages++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -19,7 +19,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
|
||||
dma_handle, device_to_mask(dev), flag,
|
||||
dma_handle, dev->coherent_dma_mask, flag,
|
||||
dev_to_node(dev));
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#undef SHOW_SYSCALLS
|
||||
#undef SHOW_SYSCALLS_TASK
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
*/
|
||||
|
||||
#include <asm/exception-64s.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/*
|
||||
* We layout physical memory as follows:
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
#define REST_32FPVSRS(n,c,base) \
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* As with the other PowerPC ports, it is expected that when code
|
||||
* execution begins here, the following registers contain valid, yet
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/synch.h>
|
||||
#include "head_booke.h"
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <asm/page_64.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/kvm_book3s_asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* The physical memory is layed out such that the secondary processor
|
||||
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
|
||||
|
@ -96,7 +97,7 @@ __secondary_hold_acknowledge:
|
|||
.llong hvReleaseData-KERNELBASE
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
/* This flag is set to 1 by a loader if the kernel should run
|
||||
* at the loaded address instead of the linked address. This
|
||||
* is used by kexec-tools to keep the the kdump kernel in the
|
||||
|
@ -384,12 +385,10 @@ _STATIC(__after_prom_start)
|
|||
/* process relocations for the final address of the kernel */
|
||||
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
|
||||
sldi r25,r25,32
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
lwz r7,__run_at_load-_stext(r26)
|
||||
cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */
|
||||
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
|
||||
bne 1f
|
||||
add r25,r25,r26
|
||||
#endif
|
||||
1: mr r3,r25
|
||||
bl .relocate
|
||||
#endif
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* Macro to make the code more readable. */
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include "head_booke.h"
|
||||
|
||||
/* As with the other PowerPC ports, it is expected that when code
|
||||
|
|
|
@ -311,8 +311,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
/* Handle failure */
|
||||
if (unlikely(entry == DMA_ERROR_CODE)) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
|
||||
" npages %lx\n", tbl, vaddr, npages);
|
||||
dev_info(dev, "iommu_alloc failed, tbl %p "
|
||||
"vaddr %lx npages %lu\n", tbl, vaddr,
|
||||
npages);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
|
@ -579,9 +580,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|||
attrs);
|
||||
if (dma_handle == DMA_ERROR_CODE) {
|
||||
if (printk_ratelimit()) {
|
||||
printk(KERN_INFO "iommu_alloc failed, "
|
||||
"tbl %p vaddr %p npages %d\n",
|
||||
tbl, vaddr, npages);
|
||||
dev_info(dev, "iommu_alloc failed, tbl %p "
|
||||
"vaddr %p npages %d\n", tbl, vaddr,
|
||||
npages);
|
||||
}
|
||||
} else
|
||||
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
|
||||
|
@ -627,7 +628,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|||
* the tce tables.
|
||||
*/
|
||||
if (order >= IOMAP_MAX_ORDER) {
|
||||
printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
|
||||
dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
|
||||
size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -122,8 +122,3 @@ _GLOBAL(longjmp)
|
|||
mtlr r0
|
||||
mr r3,r4
|
||||
blr
|
||||
|
||||
_GLOBAL(__setup_cpu_power7)
|
||||
_GLOBAL(__restore_cpu_power7)
|
||||
/* place holder */
|
||||
blr
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
.text
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <asm/cputable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
.text
|
||||
|
||||
|
|
|
@ -34,15 +34,26 @@
|
|||
|
||||
#undef DEBUG_NVRAM
|
||||
|
||||
static struct nvram_partition * nvram_part;
|
||||
static long nvram_error_log_index = -1;
|
||||
static long nvram_error_log_size = 0;
|
||||
#define NVRAM_HEADER_LEN sizeof(struct nvram_header)
|
||||
#define NVRAM_BLOCK_LEN NVRAM_HEADER_LEN
|
||||
|
||||
struct err_log_info {
|
||||
int error_type;
|
||||
unsigned int seq_num;
|
||||
/* If change this size, then change the size of NVNAME_LEN */
|
||||
struct nvram_header {
|
||||
unsigned char signature;
|
||||
unsigned char checksum;
|
||||
unsigned short length;
|
||||
/* Terminating null required only for names < 12 chars. */
|
||||
char name[12];
|
||||
};
|
||||
|
||||
struct nvram_partition {
|
||||
struct list_head partition;
|
||||
struct nvram_header header;
|
||||
unsigned int index;
|
||||
};
|
||||
|
||||
static LIST_HEAD(nvram_partitions);
|
||||
|
||||
static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
|
||||
{
|
||||
int size;
|
||||
|
@ -186,14 +197,12 @@ static struct miscdevice nvram_dev = {
|
|||
#ifdef DEBUG_NVRAM
|
||||
static void __init nvram_print_partitions(char * label)
|
||||
{
|
||||
struct list_head * p;
|
||||
struct nvram_partition * tmp_part;
|
||||
|
||||
printk(KERN_WARNING "--------%s---------\n", label);
|
||||
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
|
||||
list_for_each(p, &nvram_part->partition) {
|
||||
tmp_part = list_entry(p, struct nvram_partition, partition);
|
||||
printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
|
||||
list_for_each_entry(tmp_part, &nvram_partitions, partition) {
|
||||
printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n",
|
||||
tmp_part->index, tmp_part->header.signature,
|
||||
tmp_part->header.checksum, tmp_part->header.length,
|
||||
tmp_part->header.name);
|
||||
|
@ -228,95 +237,113 @@ static unsigned char __init nvram_checksum(struct nvram_header *p)
|
|||
return c_sum;
|
||||
}
|
||||
|
||||
static int __init nvram_remove_os_partition(void)
|
||||
/**
|
||||
* nvram_remove_partition - Remove one or more partitions in nvram
|
||||
* @name: name of the partition to remove, or NULL for a
|
||||
* signature only match
|
||||
* @sig: signature of the partition(s) to remove
|
||||
*/
|
||||
|
||||
int __init nvram_remove_partition(const char *name, int sig)
|
||||
{
|
||||
struct list_head *i;
|
||||
struct list_head *j;
|
||||
struct nvram_partition * part;
|
||||
struct nvram_partition * cur_part;
|
||||
struct nvram_partition *part, *prev, *tmp;
|
||||
int rc;
|
||||
|
||||
list_for_each(i, &nvram_part->partition) {
|
||||
part = list_entry(i, struct nvram_partition, partition);
|
||||
if (part->header.signature != NVRAM_SIG_OS)
|
||||
list_for_each_entry(part, &nvram_partitions, partition) {
|
||||
if (part->header.signature != sig)
|
||||
continue;
|
||||
|
||||
/* Make os partition a free partition */
|
||||
if (name && strncmp(name, part->header.name, 12))
|
||||
continue;
|
||||
|
||||
/* Make partition a free partition */
|
||||
part->header.signature = NVRAM_SIG_FREE;
|
||||
sprintf(part->header.name, "wwwwwwwwwwww");
|
||||
strncpy(part->header.name, "wwwwwwwwwwww", 12);
|
||||
part->header.checksum = nvram_checksum(&part->header);
|
||||
|
||||
/* Merge contiguous free partitions backwards */
|
||||
list_for_each_prev(j, &part->partition) {
|
||||
cur_part = list_entry(j, struct nvram_partition, partition);
|
||||
if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
|
||||
break;
|
||||
}
|
||||
|
||||
part->header.length += cur_part->header.length;
|
||||
part->header.checksum = nvram_checksum(&part->header);
|
||||
part->index = cur_part->index;
|
||||
|
||||
list_del(&cur_part->partition);
|
||||
kfree(cur_part);
|
||||
j = &part->partition; /* fixup our loop */
|
||||
}
|
||||
|
||||
/* Merge contiguous free partitions forwards */
|
||||
list_for_each(j, &part->partition) {
|
||||
cur_part = list_entry(j, struct nvram_partition, partition);
|
||||
if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
|
||||
break;
|
||||
}
|
||||
|
||||
part->header.length += cur_part->header.length;
|
||||
part->header.checksum = nvram_checksum(&part->header);
|
||||
|
||||
list_del(&cur_part->partition);
|
||||
kfree(cur_part);
|
||||
j = &part->partition; /* fixup our loop */
|
||||
}
|
||||
|
||||
rc = nvram_write_header(part);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc);
|
||||
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge contiguous ones */
|
||||
prev = NULL;
|
||||
list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
|
||||
if (part->header.signature != NVRAM_SIG_FREE) {
|
||||
prev = NULL;
|
||||
continue;
|
||||
}
|
||||
if (prev) {
|
||||
prev->header.length += part->header.length;
|
||||
prev->header.checksum = nvram_checksum(&part->header);
|
||||
rc = nvram_write_header(part);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
list_del(&part->partition);
|
||||
kfree(part);
|
||||
} else
|
||||
prev = part;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* nvram_create_os_partition
|
||||
/**
|
||||
* nvram_create_partition - Create a partition in nvram
|
||||
* @name: name of the partition to create
|
||||
* @sig: signature of the partition to create
|
||||
* @req_size: size of data to allocate in bytes
|
||||
* @min_size: minimum acceptable size (0 means req_size)
|
||||
*
|
||||
* Create a OS linux partition to buffer error logs.
|
||||
* Will create a partition starting at the first free
|
||||
* space found if space has enough room.
|
||||
* Returns a negative error code or a positive nvram index
|
||||
* of the beginning of the data area of the newly created
|
||||
* partition. If you provided a min_size smaller than req_size
|
||||
* you need to query for the actual size yourself after the
|
||||
* call using nvram_partition_get_size().
|
||||
*/
|
||||
static int __init nvram_create_os_partition(void)
|
||||
loff_t __init nvram_create_partition(const char *name, int sig,
|
||||
int req_size, int min_size)
|
||||
{
|
||||
struct nvram_partition *part;
|
||||
struct nvram_partition *new_part;
|
||||
struct nvram_partition *free_part = NULL;
|
||||
int seq_init[2] = { 0, 0 };
|
||||
static char nv_init_vals[16];
|
||||
loff_t tmp_index;
|
||||
long size = 0;
|
||||
int rc;
|
||||
|
||||
|
||||
/* Convert sizes from bytes to blocks */
|
||||
req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
|
||||
min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
|
||||
|
||||
/* If no minimum size specified, make it the same as the
|
||||
* requested size
|
||||
*/
|
||||
if (min_size == 0)
|
||||
min_size = req_size;
|
||||
if (min_size > req_size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Now add one block to each for the header */
|
||||
req_size += 1;
|
||||
min_size += 1;
|
||||
|
||||
/* Find a free partition that will give us the maximum needed size
|
||||
If can't find one that will give us the minimum size needed */
|
||||
list_for_each_entry(part, &nvram_part->partition, partition) {
|
||||
list_for_each_entry(part, &nvram_partitions, partition) {
|
||||
if (part->header.signature != NVRAM_SIG_FREE)
|
||||
continue;
|
||||
|
||||
if (part->header.length >= NVRAM_MAX_REQ) {
|
||||
size = NVRAM_MAX_REQ;
|
||||
if (part->header.length >= req_size) {
|
||||
size = req_size;
|
||||
free_part = part;
|
||||
break;
|
||||
}
|
||||
if (!size && part->header.length >= NVRAM_MIN_REQ) {
|
||||
size = NVRAM_MIN_REQ;
|
||||
if (part->header.length > size &&
|
||||
part->header.length >= min_size) {
|
||||
size = part->header.length;
|
||||
free_part = part;
|
||||
}
|
||||
}
|
||||
|
@ -326,136 +353,95 @@ static int __init nvram_create_os_partition(void)
|
|||
/* Create our OS partition */
|
||||
new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
|
||||
if (!new_part) {
|
||||
printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n");
|
||||
pr_err("nvram_create_os_partition: kmalloc failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
new_part->index = free_part->index;
|
||||
new_part->header.signature = NVRAM_SIG_OS;
|
||||
new_part->header.signature = sig;
|
||||
new_part->header.length = size;
|
||||
strcpy(new_part->header.name, "ppc64,linux");
|
||||
strncpy(new_part->header.name, name, 12);
|
||||
new_part->header.checksum = nvram_checksum(&new_part->header);
|
||||
|
||||
rc = nvram_write_header(new_part);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
|
||||
"failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* make sure and initialize to zero the sequence number and the error
|
||||
type logged */
|
||||
tmp_index = new_part->index + NVRAM_HEADER_LEN;
|
||||
rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_create_os_partition: nvram_write "
|
||||
pr_err("nvram_create_os_partition: nvram_write_header "
|
||||
"failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN;
|
||||
nvram_error_log_size = ((part->header.length - 1) *
|
||||
NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
|
||||
|
||||
list_add_tail(&new_part->partition, &free_part->partition);
|
||||
|
||||
if (free_part->header.length <= size) {
|
||||
/* Adjust or remove the partition we stole the space from */
|
||||
if (free_part->header.length > size) {
|
||||
free_part->index += size * NVRAM_BLOCK_LEN;
|
||||
free_part->header.length -= size;
|
||||
free_part->header.checksum = nvram_checksum(&free_part->header);
|
||||
rc = nvram_write_header(free_part);
|
||||
if (rc <= 0) {
|
||||
pr_err("nvram_create_os_partition: nvram_write_header "
|
||||
"failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
list_del(&free_part->partition);
|
||||
kfree(free_part);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Adjust the partition we stole the space from */
|
||||
free_part->index += size * NVRAM_BLOCK_LEN;
|
||||
free_part->header.length -= size;
|
||||
free_part->header.checksum = nvram_checksum(&free_part->header);
|
||||
|
||||
rc = nvram_write_header(free_part);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
|
||||
"failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* nvram_setup_partition
|
||||
*
|
||||
* This will setup the partition we need for buffering the
|
||||
* error logs and cleanup partitions if needed.
|
||||
*
|
||||
* The general strategy is the following:
|
||||
* 1.) If there is ppc64,linux partition large enough then use it.
|
||||
* 2.) If there is not a ppc64,linux partition large enough, search
|
||||
* for a free partition that is large enough.
|
||||
* 3.) If there is not a free partition large enough remove
|
||||
* _all_ OS partitions and consolidate the space.
|
||||
* 4.) Will first try getting a chunk that will satisfy the maximum
|
||||
* error log size (NVRAM_MAX_REQ).
|
||||
* 5.) If the max chunk cannot be allocated then try finding a chunk
|
||||
* that will satisfy the minum needed (NVRAM_MIN_REQ).
|
||||
*/
|
||||
static int __init nvram_setup_partition(void)
|
||||
{
|
||||
struct list_head * p;
|
||||
struct nvram_partition * part;
|
||||
int rc;
|
||||
|
||||
/* For now, we don't do any of this on pmac, until I
|
||||
* have figured out if it's worth killing some unused stuffs
|
||||
* in our nvram, as Apple defined partitions use pretty much
|
||||
* all of the space
|
||||
*/
|
||||
if (machine_is(powermac))
|
||||
return -ENOSPC;
|
||||
|
||||
/* see if we have an OS partition that meets our needs.
|
||||
will try getting the max we need. If not we'll delete
|
||||
partitions and try again. */
|
||||
list_for_each(p, &nvram_part->partition) {
|
||||
part = list_entry(p, struct nvram_partition, partition);
|
||||
if (part->header.signature != NVRAM_SIG_OS)
|
||||
continue;
|
||||
|
||||
if (strcmp(part->header.name, "ppc64,linux"))
|
||||
continue;
|
||||
|
||||
if (part->header.length >= NVRAM_MIN_REQ) {
|
||||
/* found our partition */
|
||||
nvram_error_log_index = part->index + NVRAM_HEADER_LEN;
|
||||
nvram_error_log_size = ((part->header.length - 1) *
|
||||
NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
|
||||
return 0;
|
||||
/* Clear the new partition */
|
||||
for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
|
||||
tmp_index < ((size - 1) * NVRAM_BLOCK_LEN);
|
||||
tmp_index += NVRAM_BLOCK_LEN) {
|
||||
rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
|
||||
if (rc <= 0) {
|
||||
pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* try creating a partition with the free space we have */
|
||||
rc = nvram_create_os_partition();
|
||||
if (!rc) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* need to free up some space */
|
||||
rc = nvram_remove_os_partition();
|
||||
if (rc) {
|
||||
return rc;
|
||||
}
|
||||
return new_part->index + NVRAM_HEADER_LEN;
|
||||
}
|
||||
|
||||
/**
|
||||
* nvram_get_partition_size - Get the data size of an nvram partition
|
||||
* @data_index: This is the offset of the start of the data of
|
||||
* the partition. The same value that is returned by
|
||||
* nvram_create_partition().
|
||||
*/
|
||||
int nvram_get_partition_size(loff_t data_index)
|
||||
{
|
||||
struct nvram_partition *part;
|
||||
|
||||
/* create a partition in this new space */
|
||||
rc = nvram_create_os_partition();
|
||||
if (rc) {
|
||||
printk(KERN_ERR "nvram_create_os_partition: Could not find a "
|
||||
"NVRAM partition large enough\n");
|
||||
return rc;
|
||||
list_for_each_entry(part, &nvram_partitions, partition) {
|
||||
if (part->index + NVRAM_HEADER_LEN == data_index)
|
||||
return (part->header.length - 1) * NVRAM_BLOCK_LEN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
static int __init nvram_scan_partitions(void)
|
||||
/**
|
||||
* nvram_find_partition - Find an nvram partition by signature and name
|
||||
* @name: Name of the partition or NULL for any name
|
||||
* @sig: Signature to test against
|
||||
* @out_size: if non-NULL, returns the size of the data part of the partition
|
||||
*/
|
||||
loff_t nvram_find_partition(const char *name, int sig, int *out_size)
|
||||
{
|
||||
struct nvram_partition *p;
|
||||
|
||||
list_for_each_entry(p, &nvram_partitions, partition) {
|
||||
if (p->header.signature == sig &&
|
||||
(!name || !strncmp(p->header.name, name, 12))) {
|
||||
if (out_size)
|
||||
*out_size = (p->header.length - 1) *
|
||||
NVRAM_BLOCK_LEN;
|
||||
return p->index + NVRAM_HEADER_LEN;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init nvram_scan_partitions(void)
|
||||
{
|
||||
loff_t cur_index = 0;
|
||||
struct nvram_header phead;
|
||||
|
@ -465,7 +451,7 @@ static int __init nvram_scan_partitions(void)
|
|||
int total_size;
|
||||
int err;
|
||||
|
||||
if (ppc_md.nvram_size == NULL)
|
||||
if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
|
||||
return -ENODEV;
|
||||
total_size = ppc_md.nvram_size();
|
||||
|
||||
|
@ -512,12 +498,16 @@ static int __init nvram_scan_partitions(void)
|
|||
|
||||
memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
|
||||
tmp_part->index = cur_index;
|
||||
list_add_tail(&tmp_part->partition, &nvram_part->partition);
|
||||
list_add_tail(&tmp_part->partition, &nvram_partitions);
|
||||
|
||||
cur_index += phead.length * NVRAM_BLOCK_LEN;
|
||||
}
|
||||
err = 0;
|
||||
|
||||
#ifdef DEBUG_NVRAM
|
||||
nvram_print_partitions("NVRAM Partitions");
|
||||
#endif
|
||||
|
||||
out:
|
||||
kfree(header);
|
||||
return err;
|
||||
|
@ -525,9 +515,10 @@ static int __init nvram_scan_partitions(void)
|
|||
|
||||
static int __init nvram_init(void)
|
||||
{
|
||||
int error;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
|
||||
|
||||
if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -537,29 +528,6 @@ static int __init nvram_init(void)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* initialize our anchor for the nvram partition list */
|
||||
nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
|
||||
if (!nvram_part) {
|
||||
printk(KERN_ERR "nvram_init: Failed kmalloc\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&nvram_part->partition);
|
||||
|
||||
/* Get all the NVRAM partitions */
|
||||
error = nvram_scan_partitions();
|
||||
if (error) {
|
||||
printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
if(nvram_setup_partition())
|
||||
printk(KERN_WARNING "nvram_init: Could not find nvram partition"
|
||||
" for nvram buffered error logging.\n");
|
||||
|
||||
#ifdef DEBUG_NVRAM
|
||||
nvram_print_partitions("NVRAM Partitions");
|
||||
#endif
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -568,135 +536,6 @@ void __exit nvram_cleanup(void)
|
|||
misc_deregister( &nvram_dev );
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
|
||||
/* nvram_write_error_log
|
||||
*
|
||||
* We need to buffer the error logs into nvram to ensure that we have
|
||||
* the failure information to decode. If we have a severe error there
|
||||
* is no way to guarantee that the OS or the machine is in a state to
|
||||
* get back to user land and write the error to disk. For example if
|
||||
* the SCSI device driver causes a Machine Check by writing to a bad
|
||||
* IO address, there is no way of guaranteeing that the device driver
|
||||
* is in any state that is would also be able to write the error data
|
||||
* captured to disk, thus we buffer it in NVRAM for analysis on the
|
||||
* next boot.
|
||||
*
|
||||
* In NVRAM the partition containing the error log buffer will looks like:
|
||||
* Header (in bytes):
|
||||
* +-----------+----------+--------+------------+------------------+
|
||||
* | signature | checksum | length | name | data |
|
||||
* |0 |1 |2 3|4 15|16 length-1|
|
||||
* +-----------+----------+--------+------------+------------------+
|
||||
*
|
||||
* The 'data' section would look like (in bytes):
|
||||
* +--------------+------------+-----------------------------------+
|
||||
* | event_logged | sequence # | error log |
|
||||
* |0 3|4 7|8 nvram_error_log_size-1|
|
||||
* +--------------+------------+-----------------------------------+
|
||||
*
|
||||
* event_logged: 0 if event has not been logged to syslog, 1 if it has
|
||||
* sequence #: The unique sequence # for each event. (until it wraps)
|
||||
* error log: The error log from event_scan
|
||||
*/
|
||||
int nvram_write_error_log(char * buff, int length,
|
||||
unsigned int err_type, unsigned int error_log_cnt)
|
||||
{
|
||||
int rc;
|
||||
loff_t tmp_index;
|
||||
struct err_log_info info;
|
||||
|
||||
if (nvram_error_log_index == -1) {
|
||||
return -ESPIPE;
|
||||
}
|
||||
|
||||
if (length > nvram_error_log_size) {
|
||||
length = nvram_error_log_size;
|
||||
}
|
||||
|
||||
info.error_type = err_type;
|
||||
info.seq_num = error_log_cnt;
|
||||
|
||||
tmp_index = nvram_error_log_index;
|
||||
|
||||
rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ppc_md.nvram_write(buff, length, &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* nvram_read_error_log
|
||||
*
|
||||
* Reads nvram for error log for at most 'length'
|
||||
*/
|
||||
int nvram_read_error_log(char * buff, int length,
|
||||
unsigned int * err_type, unsigned int * error_log_cnt)
|
||||
{
|
||||
int rc;
|
||||
loff_t tmp_index;
|
||||
struct err_log_info info;
|
||||
|
||||
if (nvram_error_log_index == -1)
|
||||
return -1;
|
||||
|
||||
if (length > nvram_error_log_size)
|
||||
length = nvram_error_log_size;
|
||||
|
||||
tmp_index = nvram_error_log_index;
|
||||
|
||||
rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ppc_md.nvram_read(buff, length, &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
*error_log_cnt = info.seq_num;
|
||||
*err_type = info.error_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This doesn't actually zero anything, but it sets the event_logged
|
||||
* word to tell that this event is safely in syslog.
|
||||
*/
|
||||
int nvram_clear_error_log(void)
|
||||
{
|
||||
loff_t tmp_index;
|
||||
int clear_word = ERR_FLAG_ALREADY_LOGGED;
|
||||
int rc;
|
||||
|
||||
if (nvram_error_log_index == -1)
|
||||
return -1;
|
||||
|
||||
tmp_index = nvram_error_log_index;
|
||||
|
||||
rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
|
||||
module_init(nvram_init);
|
||||
module_exit(nvram_cleanup);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -193,8 +193,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
|
|||
hose->io_resource.start += io_virt_offset;
|
||||
hose->io_resource.end += io_virt_offset;
|
||||
|
||||
pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n",
|
||||
hose->io_resource.start, hose->io_resource.end);
|
||||
pr_debug(" hose->io_resource=%pR\n", &hose->io_resource);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -186,3 +186,10 @@ EXPORT_SYMBOL(__mtdcr);
|
|||
EXPORT_SYMBOL(__mfdcr);
|
||||
#endif
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
EXPORT_SYMBOL(__arch_hweight8);
|
||||
EXPORT_SYMBOL(__arch_hweight16);
|
||||
EXPORT_SYMBOL(__arch_hweight32);
|
||||
EXPORT_SYMBOL(__arch_hweight64);
|
||||
#endif
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/*
|
||||
* Grab the register values as they are now.
|
||||
|
|
|
@ -1316,6 +1316,10 @@ static int set_dac_range(struct task_struct *child,
|
|||
static long ppc_set_hwdebug(struct task_struct *child,
|
||||
struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
unsigned long dabr;
|
||||
#endif
|
||||
|
||||
if (bp_info->version != 1)
|
||||
return -ENOTSUPP;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
|
@ -1353,11 +1357,10 @@ static long ppc_set_hwdebug(struct task_struct *child,
|
|||
/*
|
||||
* We only support one data breakpoint
|
||||
*/
|
||||
if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
|
||||
((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
|
||||
(bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
|
||||
(bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
|
||||
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
|
||||
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
|
||||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
|
||||
bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
|
||||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
|
||||
return -EINVAL;
|
||||
|
||||
if (child->thread.dabr)
|
||||
|
@ -1366,7 +1369,14 @@ static long ppc_set_hwdebug(struct task_struct *child,
|
|||
if ((unsigned long)bp_info->addr >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
child->thread.dabr = (unsigned long)bp_info->addr;
|
||||
dabr = (unsigned long)bp_info->addr & ~7UL;
|
||||
dabr |= DABR_TRANSLATION;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
||||
dabr |= DABR_DATA_READ;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
dabr |= DABR_DATA_WRITE;
|
||||
|
||||
child->thread.dabr = dabr;
|
||||
|
||||
return 1;
|
||||
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
|
||||
|
|
|
@ -280,7 +280,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
/* We only support one DABR and no IABRS at the moment */
|
||||
if (addr > 0)
|
||||
break;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
ret = put_user(child->thread.dac1, (u32 __user *)data);
|
||||
#else
|
||||
ret = put_user(child->thread.dabr, (u32 __user *)data);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -312,6 +316,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
case PTRACE_SET_DEBUGREG:
|
||||
case PTRACE_SYSCALL:
|
||||
case PTRACE_CONT:
|
||||
case PPC_PTRACE_GETHWDBGINFO:
|
||||
case PPC_PTRACE_SETHWDEBUG:
|
||||
case PPC_PTRACE_DELHWDEBUG:
|
||||
ret = arch_ptrace(child, request, addr, data);
|
||||
break;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/atomic.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
struct rtas_t rtas = {
|
||||
.lock = __ARCH_SPIN_LOCK_UNLOCKED
|
||||
|
@ -713,6 +714,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
|
|||
int cpu;
|
||||
|
||||
slb_set_size(SLB_MIN_SIZE);
|
||||
stop_topology_update();
|
||||
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
|
||||
|
||||
while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
|
||||
|
@ -728,6 +730,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
|
|||
rc = atomic_read(&data->error);
|
||||
|
||||
atomic_set(&data->error, rc);
|
||||
start_topology_update();
|
||||
|
||||
if (wake_when_done) {
|
||||
atomic_set(&data->done, 1);
|
||||
|
|
|
@ -437,8 +437,8 @@ static void __init irqstack_early_init(void)
|
|||
unsigned int i;
|
||||
|
||||
/*
|
||||
* interrupt stacks must be under 256MB, we cannot afford to take
|
||||
* SLB misses on them.
|
||||
* Interrupt stacks must be in the first segment since we
|
||||
* cannot afford to take SLB misses on them.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
softirq_ctx[i] = (struct thread_info *)
|
||||
|
|
|
@ -466,7 +466,20 @@ out:
|
|||
return id;
|
||||
}
|
||||
|
||||
/* Must be called when no change can occur to cpu_present_mask,
|
||||
/* Helper routines for cpu to core mapping */
|
||||
int cpu_core_index_of_thread(int cpu)
|
||||
{
|
||||
return cpu >> threads_shift;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
|
||||
|
||||
int cpu_first_thread_of_core(int core)
|
||||
{
|
||||
return core << threads_shift;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
|
||||
|
||||
/* Must be called when no change can occur to cpu_present_map,
|
||||
* i.e. during cpu online or offline.
|
||||
*/
|
||||
static struct device_node *cpu_to_l2cache(int cpu)
|
||||
|
@ -514,7 +527,7 @@ int __devinit start_secondary(void *unused)
|
|||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_in_core(cpu);
|
||||
base = cpu_first_thread_sibling(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
if (cpu_is_offline(base + i))
|
||||
continue;
|
||||
|
@ -600,7 +613,7 @@ int __cpu_disable(void)
|
|||
return err;
|
||||
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_in_core(cpu);
|
||||
base = cpu_first_thread_sibling(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
|
||||
cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
|
||||
|
|
|
@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(rtc_lock);
|
|||
|
||||
static u64 tb_to_ns_scale __read_mostly;
|
||||
static unsigned tb_to_ns_shift __read_mostly;
|
||||
static unsigned long boot_tb __read_mostly;
|
||||
static u64 boot_tb __read_mostly;
|
||||
|
||||
extern struct timezone sys_tz;
|
||||
static long timezone_offset;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <asm/cputable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/*
|
||||
* load_up_altivec(unused, unused, tsk)
|
||||
|
|
|
@ -600,6 +600,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
|||
vio_cmo_dealloc(viodev, alloc_size);
|
||||
}
|
||||
|
||||
static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return dma_iommu_ops.dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
struct dma_map_ops vio_dma_mapping_ops = {
|
||||
.alloc_coherent = vio_dma_iommu_alloc_coherent,
|
||||
.free_coherent = vio_dma_iommu_free_coherent,
|
||||
|
@ -607,6 +612,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
|
|||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||
.map_page = vio_dma_iommu_map_page,
|
||||
.unmap_page = vio_dma_iommu_unmap_page,
|
||||
.dma_supported = vio_dma_iommu_dma_supported,
|
||||
|
||||
};
|
||||
|
||||
|
@ -858,8 +864,7 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
|
|||
|
||||
static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
|
||||
{
|
||||
vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
|
||||
viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
|
||||
set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1244,7 +1249,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
|
|||
if (firmware_has_feature(FW_FEATURE_CMO))
|
||||
vio_cmo_set_dma_ops(viodev);
|
||||
else
|
||||
viodev->dev.archdata.dma_ops = &dma_iommu_ops;
|
||||
set_dma_ops(&viodev->dev, &dma_iommu_ops);
|
||||
set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
|
||||
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
|
||||
|
||||
|
@ -1252,6 +1257,10 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
|
|||
viodev->dev.parent = &vio_bus_device.dev;
|
||||
viodev->dev.bus = &vio_bus_type;
|
||||
viodev->dev.release = vio_dev_release;
|
||||
/* needed to ensure proper operation of coherent allocations
|
||||
* later, in case driver doesn't set it explicitly */
|
||||
dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
|
||||
dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
|
||||
|
||||
/* register with generic device framework */
|
||||
if (device_register(&viodev->dev)) {
|
||||
|
|
|
@ -16,7 +16,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
|
|||
|
||||
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
|
||||
memcpy_64.o usercopy_64.o mem_64.o string.o \
|
||||
checksum_wrappers_64.o
|
||||
checksum_wrappers_64.o hweight_64.o
|
||||
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
|
||||
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2010
|
||||
*
|
||||
* Author: Anton Blanchard <anton@au.ibm.com>
|
||||
*/
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
|
||||
/* Note: This code relies on -mminimal-toc */
|
||||
|
||||
_GLOBAL(__arch_hweight8)
|
||||
BEGIN_FTR_SECTION
|
||||
b .__sw_hweight8
|
||||
nop
|
||||
nop
|
||||
FTR_SECTION_ELSE
|
||||
PPC_POPCNTB(r3,r3)
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
|
||||
|
||||
_GLOBAL(__arch_hweight16)
|
||||
BEGIN_FTR_SECTION
|
||||
b .__sw_hweight16
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
FTR_SECTION_ELSE
|
||||
BEGIN_FTR_SECTION_NESTED(50)
|
||||
PPC_POPCNTB(r3,r3)
|
||||
srdi r4,r3,8
|
||||
add r3,r4,r3
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
FTR_SECTION_ELSE_NESTED(50)
|
||||
clrlwi r3,r3,16
|
||||
PPC_POPCNTW(r3,r3)
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
|
||||
|
||||
_GLOBAL(__arch_hweight32)
|
||||
BEGIN_FTR_SECTION
|
||||
b .__sw_hweight32
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
FTR_SECTION_ELSE
|
||||
BEGIN_FTR_SECTION_NESTED(51)
|
||||
PPC_POPCNTB(r3,r3)
|
||||
srdi r4,r3,16
|
||||
add r3,r4,r3
|
||||
srdi r4,r3,8
|
||||
add r3,r4,r3
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
FTR_SECTION_ELSE_NESTED(51)
|
||||
PPC_POPCNTW(r3,r3)
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
|
||||
|
||||
_GLOBAL(__arch_hweight64)
|
||||
BEGIN_FTR_SECTION
|
||||
b .__sw_hweight64
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
FTR_SECTION_ELSE
|
||||
BEGIN_FTR_SECTION_NESTED(52)
|
||||
PPC_POPCNTB(r3,r3)
|
||||
srdi r4,r3,32
|
||||
add r3,r4,r3
|
||||
srdi r4,r3,16
|
||||
add r3,r4,r3
|
||||
srdi r4,r3,8
|
||||
add r3,r4,r3
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
FTR_SECTION_ELSE_NESTED(52)
|
||||
PPC_POPCNTD(r3,r3)
|
||||
clrldi r3,r3,64-8
|
||||
blr
|
||||
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
|
|
@ -1070,7 +1070,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
|||
unsigned long access, unsigned long trap)
|
||||
{
|
||||
unsigned long vsid;
|
||||
void *pgdir;
|
||||
pgd_t *pgdir;
|
||||
pte_t *ptep;
|
||||
unsigned long flags;
|
||||
int rc, ssize, local = 0;
|
||||
|
|
|
@ -111,8 +111,8 @@ static unsigned int steal_context_smp(unsigned int id)
|
|||
* a core map instead but this will do for now.
|
||||
*/
|
||||
for_each_cpu(cpu, mm_cpumask(mm)) {
|
||||
for (i = cpu_first_thread_in_core(cpu);
|
||||
i <= cpu_last_thread_in_core(cpu); i++)
|
||||
for (i = cpu_first_thread_sibling(cpu);
|
||||
i <= cpu_last_thread_sibling(cpu); i++)
|
||||
__set_bit(id, stale_map[i]);
|
||||
cpu = i - 1;
|
||||
}
|
||||
|
@ -264,14 +264,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
|
|||
*/
|
||||
if (test_bit(id, stale_map[cpu])) {
|
||||
pr_hardcont(" | stale flush %d [%d..%d]",
|
||||
id, cpu_first_thread_in_core(cpu),
|
||||
cpu_last_thread_in_core(cpu));
|
||||
id, cpu_first_thread_sibling(cpu),
|
||||
cpu_last_thread_sibling(cpu));
|
||||
|
||||
local_flush_tlb_mm(next);
|
||||
|
||||
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
|
||||
for (i = cpu_first_thread_in_core(cpu);
|
||||
i <= cpu_last_thread_in_core(cpu); i++) {
|
||||
for (i = cpu_first_thread_sibling(cpu);
|
||||
i <= cpu_last_thread_sibling(cpu); i++) {
|
||||
__clear_bit(id, stale_map[i]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,10 +20,15 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/node.h>
|
||||
#include <asm/sparsemem.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/paca.h>
|
||||
#include <asm/hvcall.h>
|
||||
|
||||
static int numa_enabled = 1;
|
||||
|
||||
|
@ -163,7 +168,7 @@ static void __init get_node_active_region(unsigned long start_pfn,
|
|||
work_with_active_regions(nid, get_active_region_work_fn, node_ar);
|
||||
}
|
||||
|
||||
static void __cpuinit map_cpu_to_node(int cpu, int node)
|
||||
static void map_cpu_to_node(int cpu, int node)
|
||||
{
|
||||
numa_cpu_lookup_table[cpu] = node;
|
||||
|
||||
|
@ -173,7 +178,7 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
|
|||
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
|
||||
static void unmap_cpu_from_node(unsigned long cpu)
|
||||
{
|
||||
int node = numa_cpu_lookup_table[cpu];
|
||||
|
@ -187,7 +192,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
|
|||
cpu, node);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
|
||||
|
||||
/* must hold reference to node during call */
|
||||
static const int *of_get_associativity(struct device_node *dev)
|
||||
|
@ -246,32 +251,41 @@ static void initialize_distance_lookup_table(int nid,
|
|||
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
|
||||
* info is found.
|
||||
*/
|
||||
static int of_node_to_nid_single(struct device_node *device)
|
||||
static int associativity_to_nid(const unsigned int *associativity)
|
||||
{
|
||||
int nid = -1;
|
||||
const unsigned int *tmp;
|
||||
|
||||
if (min_common_depth == -1)
|
||||
goto out;
|
||||
|
||||
tmp = of_get_associativity(device);
|
||||
if (!tmp)
|
||||
goto out;
|
||||
|
||||
if (tmp[0] >= min_common_depth)
|
||||
nid = tmp[min_common_depth];
|
||||
if (associativity[0] >= min_common_depth)
|
||||
nid = associativity[min_common_depth];
|
||||
|
||||
/* POWER4 LPAR uses 0xffff as invalid node */
|
||||
if (nid == 0xffff || nid >= MAX_NUMNODES)
|
||||
nid = -1;
|
||||
|
||||
if (nid > 0 && tmp[0] >= distance_ref_points_depth)
|
||||
initialize_distance_lookup_table(nid, tmp);
|
||||
if (nid > 0 && associativity[0] >= distance_ref_points_depth)
|
||||
initialize_distance_lookup_table(nid, associativity);
|
||||
|
||||
out:
|
||||
return nid;
|
||||
}
|
||||
|
||||
/* Returns the nid associated with the given device tree node,
|
||||
* or -1 if not found.
|
||||
*/
|
||||
static int of_node_to_nid_single(struct device_node *device)
|
||||
{
|
||||
int nid = -1;
|
||||
const unsigned int *tmp;
|
||||
|
||||
tmp = of_get_associativity(device);
|
||||
if (tmp)
|
||||
nid = associativity_to_nid(tmp);
|
||||
return nid;
|
||||
}
|
||||
|
||||
/* Walk the device tree upwards, looking for an associativity id */
|
||||
int of_node_to_nid(struct device_node *device)
|
||||
{
|
||||
|
@ -1247,4 +1261,275 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
|
|||
return nid;
|
||||
}
|
||||
|
||||
static u64 hot_add_drconf_memory_max(void)
|
||||
{
|
||||
struct device_node *memory = NULL;
|
||||
unsigned int drconf_cell_cnt = 0;
|
||||
u64 lmb_size = 0;
|
||||
const u32 *dm = 0;
|
||||
|
||||
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
||||
if (memory) {
|
||||
drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
|
||||
lmb_size = of_get_lmb_size(memory);
|
||||
of_node_put(memory);
|
||||
}
|
||||
return lmb_size * drconf_cell_cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* memory_hotplug_max - return max address of memory that may be added
|
||||
*
|
||||
* This is currently only used on systems that support drconfig memory
|
||||
* hotplug.
|
||||
*/
|
||||
u64 memory_hotplug_max(void)
|
||||
{
|
||||
return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
||||
/* Vrtual Processor Home Node (VPHN) support */
|
||||
#ifdef CONFIG_PPC_SPLPAR
|
||||
#define VPHN_NR_CHANGE_CTRS (8)
|
||||
static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
|
||||
static cpumask_t cpu_associativity_changes_mask;
|
||||
static int vphn_enabled;
|
||||
static void set_topology_timer(void);
|
||||
|
||||
/*
|
||||
* Store the current values of the associativity change counters in the
|
||||
* hypervisor.
|
||||
*/
|
||||
static void setup_cpu_associativity_change_counters(void)
|
||||
{
|
||||
int cpu = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
int i = 0;
|
||||
u8 *counts = vphn_cpu_change_counts[cpu];
|
||||
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
|
||||
|
||||
for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
|
||||
counts[i] = hypervisor_counts[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The hypervisor maintains a set of 8 associativity change counters in
|
||||
* the VPA of each cpu that correspond to the associativity levels in the
|
||||
* ibm,associativity-reference-points property. When an associativity
|
||||
* level changes, the corresponding counter is incremented.
|
||||
*
|
||||
* Set a bit in cpu_associativity_changes_mask for each cpu whose home
|
||||
* node associativity levels have changed.
|
||||
*
|
||||
* Returns the number of cpus with unhandled associativity changes.
|
||||
*/
|
||||
static int update_cpu_associativity_changes_mask(void)
|
||||
{
|
||||
int cpu = 0, nr_cpus = 0;
|
||||
cpumask_t *changes = &cpu_associativity_changes_mask;
|
||||
|
||||
cpumask_clear(changes);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
int i, changed = 0;
|
||||
u8 *counts = vphn_cpu_change_counts[cpu];
|
||||
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
|
||||
|
||||
for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
|
||||
if (hypervisor_counts[i] > counts[i]) {
|
||||
counts[i] = hypervisor_counts[i];
|
||||
changed = 1;
|
||||
}
|
||||
}
|
||||
if (changed) {
|
||||
cpumask_set_cpu(cpu, changes);
|
||||
nr_cpus++;
|
||||
}
|
||||
}
|
||||
|
||||
return nr_cpus;
|
||||
}
|
||||
|
||||
/* 6 64-bit registers unpacked into 12 32-bit associativity values */
|
||||
#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
|
||||
|
||||
/*
|
||||
* Convert the associativity domain numbers returned from the hypervisor
|
||||
* to the sequence they would appear in the ibm,associativity property.
|
||||
*/
|
||||
static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
|
||||
{
|
||||
int i = 0;
|
||||
int nr_assoc_doms = 0;
|
||||
const u16 *field = (const u16*) packed;
|
||||
|
||||
#define VPHN_FIELD_UNUSED (0xffff)
|
||||
#define VPHN_FIELD_MSB (0x8000)
|
||||
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
|
||||
|
||||
for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
|
||||
if (*field == VPHN_FIELD_UNUSED) {
|
||||
/* All significant fields processed, and remaining
|
||||
* fields contain the reserved value of all 1's.
|
||||
* Just store them.
|
||||
*/
|
||||
unpacked[i] = *((u32*)field);
|
||||
field += 2;
|
||||
}
|
||||
else if (*field & VPHN_FIELD_MSB) {
|
||||
/* Data is in the lower 15 bits of this field */
|
||||
unpacked[i] = *field & VPHN_FIELD_MASK;
|
||||
field++;
|
||||
nr_assoc_doms++;
|
||||
}
|
||||
else {
|
||||
/* Data is in the lower 15 bits of this field
|
||||
* concatenated with the next 16 bit field
|
||||
*/
|
||||
unpacked[i] = *((u32*)field);
|
||||
field += 2;
|
||||
nr_assoc_doms++;
|
||||
}
|
||||
}
|
||||
|
||||
return nr_assoc_doms;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve the new associativity information for a virtual processor's
|
||||
* home node.
|
||||
*/
|
||||
static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
|
||||
{
|
||||
long rc = 0;
|
||||
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
|
||||
u64 flags = 1;
|
||||
int hwcpu = get_hard_smp_processor_id(cpu);
|
||||
|
||||
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
|
||||
vphn_unpack_associativity(retbuf, associativity);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static long vphn_get_associativity(unsigned long cpu,
|
||||
unsigned int *associativity)
|
||||
{
|
||||
long rc = 0;
|
||||
|
||||
rc = hcall_vphn(cpu, associativity);
|
||||
|
||||
switch (rc) {
|
||||
case H_FUNCTION:
|
||||
printk(KERN_INFO
|
||||
"VPHN is not supported. Disabling polling...\n");
|
||||
stop_topology_update();
|
||||
break;
|
||||
case H_HARDWARE:
|
||||
printk(KERN_ERR
|
||||
"hcall_vphn() experienced a hardware fault "
|
||||
"preventing VPHN. Disabling polling...\n");
|
||||
stop_topology_update();
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the node maps and sysfs entries for each cpu whose home node
|
||||
* has changed.
|
||||
*/
|
||||
int arch_update_cpu_topology(void)
|
||||
{
|
||||
int cpu = 0, nid = 0, old_nid = 0;
|
||||
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||
struct sys_device *sysdev = NULL;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
|
||||
vphn_get_associativity(cpu, associativity);
|
||||
nid = associativity_to_nid(associativity);
|
||||
|
||||
if (nid < 0 || !node_online(nid))
|
||||
nid = first_online_node;
|
||||
|
||||
old_nid = numa_cpu_lookup_table[cpu];
|
||||
|
||||
/* Disable hotplug while we update the cpu
|
||||
* masks and sysfs.
|
||||
*/
|
||||
get_online_cpus();
|
||||
unregister_cpu_under_node(cpu, old_nid);
|
||||
unmap_cpu_from_node(cpu);
|
||||
map_cpu_to_node(cpu, nid);
|
||||
register_cpu_under_node(cpu, nid);
|
||||
put_online_cpus();
|
||||
|
||||
sysdev = get_cpu_sysdev(cpu);
|
||||
if (sysdev)
|
||||
kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void topology_work_fn(struct work_struct *work)
|
||||
{
|
||||
rebuild_sched_domains();
|
||||
}
|
||||
static DECLARE_WORK(topology_work, topology_work_fn);
|
||||
|
||||
void topology_schedule_update(void)
|
||||
{
|
||||
schedule_work(&topology_work);
|
||||
}
|
||||
|
||||
static void topology_timer_fn(unsigned long ignored)
|
||||
{
|
||||
if (!vphn_enabled)
|
||||
return;
|
||||
if (update_cpu_associativity_changes_mask() > 0)
|
||||
topology_schedule_update();
|
||||
set_topology_timer();
|
||||
}
|
||||
static struct timer_list topology_timer =
|
||||
TIMER_INITIALIZER(topology_timer_fn, 0, 0);
|
||||
|
||||
static void set_topology_timer(void)
|
||||
{
|
||||
topology_timer.data = 0;
|
||||
topology_timer.expires = jiffies + 60 * HZ;
|
||||
add_timer(&topology_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start polling for VPHN associativity changes.
|
||||
*/
|
||||
int start_topology_update(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_VPHN)) {
|
||||
vphn_enabled = 1;
|
||||
setup_cpu_associativity_change_counters();
|
||||
init_timer_deferrable(&topology_timer);
|
||||
set_topology_timer();
|
||||
rc = 1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
__initcall(start_topology_update);
|
||||
|
||||
/*
|
||||
* Disable polling for VPHN associativity changes.
|
||||
*/
|
||||
int stop_topology_update(void)
|
||||
{
|
||||
vphn_enabled = 0;
|
||||
return del_timer_sync(&topology_timer);
|
||||
}
|
||||
#endif /* CONFIG_PPC_SPLPAR */
|
||||
|
|
|
@ -78,7 +78,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
|
||||
/* pgdir take page or two with 4K pages and a page fraction otherwise */
|
||||
#ifndef CONFIG_PPC_4K_PAGES
|
||||
ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
|
||||
ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
|
||||
#else
|
||||
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
|
||||
PGDIR_ORDER - PAGE_SHIFT);
|
||||
|
@ -230,6 +230,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
|
|||
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
||||
if (area == 0)
|
||||
return NULL;
|
||||
area->phys_addr = p;
|
||||
v = (unsigned long) area->addr;
|
||||
} else {
|
||||
v = (ioremap_bot -= size);
|
||||
|
|
|
@ -223,6 +223,8 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
|
|||
caller);
|
||||
if (area == NULL)
|
||||
return NULL;
|
||||
|
||||
area->phys_addr = paligned;
|
||||
ret = __ioremap_at(paligned, area->addr, size, flags);
|
||||
if (!ret)
|
||||
vunmap(area->addr);
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
obj-$(CONFIG_44x) := misc_44x.o idle.o
|
||||
obj-$(CONFIG_44x) += misc_44x.o
|
||||
ifneq ($(CONFIG_PPC4xx_CPM),y)
|
||||
obj-$(CONFIG_44x) += idle.o
|
||||
endif
|
||||
obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
|
||||
obj-$(CONFIG_EBONY) += ebony.o
|
||||
obj-$(CONFIG_SAM440EP) += sam440ep.o
|
||||
|
|
|
@ -313,13 +313,14 @@ config OF_RTC
|
|||
source "arch/powerpc/sysdev/bestcomm/Kconfig"
|
||||
|
||||
config MPC8xxx_GPIO
|
||||
bool "MPC8xxx GPIO support"
|
||||
depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
|
||||
bool "MPC512x/MPC8xxx GPIO support"
|
||||
depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
|
||||
FSL_SOC_BOOKE || PPC_86xx
|
||||
select GENERIC_GPIO
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
help
|
||||
Say Y here if you're going to use hardware that connects to the
|
||||
MPC831x/834x/837x/8572/8610 GPIOs.
|
||||
MPC512x/831x/834x/837x/8572/8610 GPIOs.
|
||||
|
||||
config SIMPLE_GPIO
|
||||
bool "Support for simple, memory-mapped GPIO controllers"
|
||||
|
|
|
@ -76,7 +76,7 @@ static void __init celleb_init_direct_mapping(void)
|
|||
|
||||
static void celleb_dma_dev_setup(struct device *dev)
|
||||
{
|
||||
dev->archdata.dma_ops = get_pci_dma_ops();
|
||||
set_dma_ops(dev, &dma_direct_ops);
|
||||
set_dma_offset(dev, celleb_dma_direct_offset);
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,6 @@ static struct notifier_block celleb_of_bus_notifier = {
|
|||
static int __init celleb_init_iommu(void)
|
||||
{
|
||||
celleb_init_direct_mapping();
|
||||
set_pci_dma_ops(&dma_direct_ops);
|
||||
ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
|
||||
bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
|
||||
|
||||
|
|
|
@ -36,10 +36,9 @@ static int spu_alloc_lscsa_std(struct spu_state *csa)
|
|||
struct spu_lscsa *lscsa;
|
||||
unsigned char *p;
|
||||
|
||||
lscsa = vmalloc(sizeof(struct spu_lscsa));
|
||||
lscsa = vzalloc(sizeof(struct spu_lscsa));
|
||||
if (!lscsa)
|
||||
return -ENOMEM;
|
||||
memset(lscsa, 0, sizeof(struct spu_lscsa));
|
||||
csa->lscsa = lscsa;
|
||||
|
||||
/* Set LS pages reserved to allow for user-space mapping. */
|
||||
|
|
|
@ -29,6 +29,10 @@
|
|||
|
||||
extern spinlock_t rtc_lock;
|
||||
|
||||
#define NVRAM_AS0 0x74
|
||||
#define NVRAM_AS1 0x75
|
||||
#define NVRAM_DATA 0x77
|
||||
|
||||
static int nvram_as1 = NVRAM_AS1;
|
||||
static int nvram_as0 = NVRAM_AS0;
|
||||
static int nvram_data = NVRAM_DATA;
|
||||
|
|
|
@ -1045,71 +1045,9 @@ static const struct file_operations mf_side_proc_fops = {
|
|||
.write = mf_side_proc_write,
|
||||
};
|
||||
|
||||
#if 0
|
||||
static void mf_getSrcHistory(char *buffer, int size)
|
||||
{
|
||||
struct IplTypeReturnStuff return_stuff;
|
||||
struct pending_event *ev = new_pending_event();
|
||||
int rc = 0;
|
||||
char *pages[4];
|
||||
|
||||
pages[0] = kmalloc(4096, GFP_ATOMIC);
|
||||
pages[1] = kmalloc(4096, GFP_ATOMIC);
|
||||
pages[2] = kmalloc(4096, GFP_ATOMIC);
|
||||
pages[3] = kmalloc(4096, GFP_ATOMIC);
|
||||
if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
|
||||
|| (pages[2] == NULL) || (pages[3] == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
return_stuff.xType = 0;
|
||||
return_stuff.xRc = 0;
|
||||
return_stuff.xDone = 0;
|
||||
ev->event.hp_lp_event.xSubtype = 6;
|
||||
ev->event.hp_lp_event.x.xSubtypeData =
|
||||
subtype_data('M', 'F', 'V', 'I');
|
||||
ev->event.data.vsp_cmd.xEvent = &return_stuff;
|
||||
ev->event.data.vsp_cmd.cmd = 4;
|
||||
ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
|
||||
ev->event.data.vsp_cmd.result_code = 0xFF;
|
||||
ev->event.data.vsp_cmd.reserved = 0;
|
||||
ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
|
||||
ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
|
||||
ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
|
||||
ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
|
||||
mb();
|
||||
if (signal_event(ev) != 0)
|
||||
return;
|
||||
|
||||
while (return_stuff.xDone != 1)
|
||||
udelay(10);
|
||||
if (return_stuff.xRc == 0)
|
||||
memcpy(buffer, pages[0], size);
|
||||
kfree(pages[0]);
|
||||
kfree(pages[1]);
|
||||
kfree(pages[2]);
|
||||
kfree(pages[3]);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mf_src_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
#if 0
|
||||
int len;
|
||||
|
||||
mf_getSrcHistory(page, count);
|
||||
len = count;
|
||||
len -= off;
|
||||
if (len < count) {
|
||||
*eof = 1;
|
||||
if (len <= 0)
|
||||
return 0;
|
||||
} else
|
||||
len = count;
|
||||
*start = page + off;
|
||||
return len;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int mf_src_proc_open(struct inode *inode, struct file *file)
|
||||
|
|
|
@ -156,20 +156,12 @@ static void iommu_table_iobmap_setup(void)
|
|||
|
||||
static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
|
||||
{
|
||||
struct device_node *dn;
|
||||
|
||||
pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
|
||||
|
||||
if (!iommu_table_iobmap_inited) {
|
||||
iommu_table_iobmap_inited = 1;
|
||||
iommu_table_iobmap_setup();
|
||||
}
|
||||
|
||||
dn = pci_bus_to_OF_node(bus);
|
||||
|
||||
if (dn)
|
||||
PCI_DN(dn)->iommu_table = &iommu_table_iobmap;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -192,9 +184,6 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
|
|||
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
|
||||
}
|
||||
|
||||
static void pci_dma_bus_setup_null(struct pci_bus *b) { }
|
||||
static void pci_dma_dev_setup_null(struct pci_dev *d) { }
|
||||
|
||||
int __init iob_init(struct device_node *dn)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
@ -251,14 +240,8 @@ void __init iommu_init_early_pasemi(void)
|
|||
iommu_off = of_chosen &&
|
||||
of_get_property(of_chosen, "linux,iommu-off", NULL);
|
||||
#endif
|
||||
if (iommu_off) {
|
||||
/* Direct I/O, IOMMU off */
|
||||
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null;
|
||||
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null;
|
||||
set_pci_dma_ops(&dma_direct_ops);
|
||||
|
||||
if (iommu_off)
|
||||
return;
|
||||
}
|
||||
|
||||
iob_init(NULL);
|
||||
|
||||
|
|
|
@ -506,6 +506,15 @@ static int __init pmac_declare_of_platform_devices(void)
|
|||
of_platform_device_create(np, "smu", NULL);
|
||||
of_node_put(np);
|
||||
}
|
||||
np = of_find_node_by_type(NULL, "fcu");
|
||||
if (np == NULL) {
|
||||
/* Some machines have strangely broken device-tree */
|
||||
np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
|
||||
}
|
||||
if (np) {
|
||||
of_platform_device_create(np, "temperature", NULL);
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,6 +33,16 @@ config PSERIES_MSI
|
|||
depends on PCI_MSI && EEH
|
||||
default y
|
||||
|
||||
config PSERIES_ENERGY
|
||||
tristate "pSeries energy management capabilities driver"
|
||||
depends on PPC_PSERIES
|
||||
default y
|
||||
help
|
||||
Provides interface to platform energy management capabilities
|
||||
on supported PSERIES platforms.
|
||||
Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
|
||||
and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
|
||||
|
||||
config SCANLOG
|
||||
tristate "Scanlog dump interface"
|
||||
depends on RTAS_PROC && PPC_PSERIES
|
||||
|
|
|
@ -11,6 +11,7 @@ obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
|
|||
obj-$(CONFIG_KEXEC) += kexec.o
|
||||
obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
|
||||
obj-$(CONFIG_PSERIES_MSI) += msi.o
|
||||
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
|
||||
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
|
||||
obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
|
||||
|
|
|
@ -55,6 +55,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
|
|||
{FW_FEATURE_XDABR, "hcall-xdabr"},
|
||||
{FW_FEATURE_MULTITCE, "hcall-multi-tce"},
|
||||
{FW_FEATURE_SPLPAR, "hcall-splpar"},
|
||||
{FW_FEATURE_VPHN, "hcall-vphn"},
|
||||
};
|
||||
|
||||
/* Build up the firmware features bitmask using the contents of
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define STK_PARM(i) (48 + ((i)-3)*8)
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
|
||||
static DEFINE_PER_CPU(u64 *, tce_page);
|
||||
|
||||
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||||
long npages, unsigned long uaddr,
|
||||
|
@ -323,14 +323,13 @@ static void iommu_table_setparms(struct pci_controller *phb,
|
|||
static void iommu_table_setparms_lpar(struct pci_controller *phb,
|
||||
struct device_node *dn,
|
||||
struct iommu_table *tbl,
|
||||
const void *dma_window,
|
||||
int bussubno)
|
||||
const void *dma_window)
|
||||
{
|
||||
unsigned long offset, size;
|
||||
|
||||
tbl->it_busno = bussubno;
|
||||
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
|
||||
|
||||
tbl->it_busno = phb->bus->number;
|
||||
tbl->it_base = 0;
|
||||
tbl->it_blocksize = 16;
|
||||
tbl->it_type = TCE_PCI;
|
||||
|
@ -450,14 +449,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
|
|||
if (!ppci->iommu_table) {
|
||||
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
|
||||
ppci->phb->node);
|
||||
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
|
||||
bus->number);
|
||||
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
|
||||
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
|
||||
pr_debug(" created table: %p\n", ppci->iommu_table);
|
||||
}
|
||||
|
||||
if (pdn != dn)
|
||||
PCI_DN(dn)->iommu_table = ppci->iommu_table;
|
||||
}
|
||||
|
||||
|
||||
|
@ -533,21 +528,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
|||
}
|
||||
pr_debug(" parent is %s\n", pdn->full_name);
|
||||
|
||||
/* Check for parent == NULL so we don't try to setup the empty EADS
|
||||
* slots on POWER4 machines.
|
||||
*/
|
||||
if (dma_window == NULL || pdn->parent == NULL) {
|
||||
pr_debug(" no dma window for device, linking to parent\n");
|
||||
set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
|
||||
return;
|
||||
}
|
||||
|
||||
pci = PCI_DN(pdn);
|
||||
if (!pci->iommu_table) {
|
||||
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
|
||||
pci->phb->node);
|
||||
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
|
||||
pci->phb->bus->number);
|
||||
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
|
||||
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
|
||||
pr_debug(" created table: %p\n", pci->iommu_table);
|
||||
} else {
|
||||
|
@ -571,8 +556,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
|
|||
|
||||
switch (action) {
|
||||
case PSERIES_RECONFIG_REMOVE:
|
||||
if (pci && pci->iommu_table &&
|
||||
of_get_property(np, "ibm,dma-window", NULL))
|
||||
if (pci && pci->iommu_table)
|
||||
iommu_free_table(pci->iommu_table, np->full_name);
|
||||
break;
|
||||
default:
|
||||
|
@ -589,13 +573,8 @@ static struct notifier_block iommu_reconfig_nb = {
|
|||
/* These are called very early. */
|
||||
void iommu_init_early_pSeries(void)
|
||||
{
|
||||
if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) {
|
||||
/* Direct I/O, IOMMU off */
|
||||
ppc_md.pci_dma_dev_setup = NULL;
|
||||
ppc_md.pci_dma_bus_setup = NULL;
|
||||
set_pci_dma_ops(&dma_direct_ops);
|
||||
if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
|
||||
return;
|
||||
}
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
|
||||
|
@ -622,3 +601,17 @@ void iommu_init_early_pSeries(void)
|
|||
set_pci_dma_ops(&dma_iommu_ops);
|
||||
}
|
||||
|
||||
static int __init disable_multitce(char *str)
|
||||
{
|
||||
if (strcmp(str, "off") == 0 &&
|
||||
firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
firmware_has_feature(FW_FEATURE_MULTITCE)) {
|
||||
printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
|
||||
ppc_md.tce_build = tce_build_pSeriesLP;
|
||||
ppc_md.tce_free = tce_free_pSeriesLP;
|
||||
powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("multitce=", disable_multitce);
|
||||
|
|
|
@ -627,6 +627,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
|
|||
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
|
||||
}
|
||||
|
||||
static int __init disable_bulk_remove(char *str)
|
||||
{
|
||||
if (strcmp(str, "off") == 0 &&
|
||||
firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
|
||||
printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
|
||||
powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("bulk_remove=", disable_bulk_remove);
|
||||
|
||||
void __init hpte_init_lpar(void)
|
||||
{
|
||||
ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
|
||||
|
|
|
@ -22,11 +22,25 @@
|
|||
#include <asm/prom.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
/* Max bytes to read/write in one go */
|
||||
#define NVRW_CNT 0x20
|
||||
|
||||
static unsigned int nvram_size;
|
||||
static int nvram_fetch, nvram_store;
|
||||
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
|
||||
static DEFINE_SPINLOCK(nvram_lock);
|
||||
|
||||
static long nvram_error_log_index = -1;
|
||||
static long nvram_error_log_size = 0;
|
||||
|
||||
struct err_log_info {
|
||||
int error_type;
|
||||
unsigned int seq_num;
|
||||
};
|
||||
#define NVRAM_MAX_REQ 2079
|
||||
#define NVRAM_MIN_REQ 1055
|
||||
|
||||
#define NVRAM_LOG_PART_NAME "ibm,rtas-log"
|
||||
|
||||
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
|
||||
{
|
||||
|
@ -119,6 +133,197 @@ static ssize_t pSeries_nvram_get_size(void)
|
|||
return nvram_size ? nvram_size : -ENODEV;
|
||||
}
|
||||
|
||||
|
||||
/* nvram_write_error_log
|
||||
*
|
||||
* We need to buffer the error logs into nvram to ensure that we have
|
||||
* the failure information to decode. If we have a severe error there
|
||||
* is no way to guarantee that the OS or the machine is in a state to
|
||||
* get back to user land and write the error to disk. For example if
|
||||
* the SCSI device driver causes a Machine Check by writing to a bad
|
||||
* IO address, there is no way of guaranteeing that the device driver
|
||||
* is in any state that is would also be able to write the error data
|
||||
* captured to disk, thus we buffer it in NVRAM for analysis on the
|
||||
* next boot.
|
||||
*
|
||||
* In NVRAM the partition containing the error log buffer will looks like:
|
||||
* Header (in bytes):
|
||||
* +-----------+----------+--------+------------+------------------+
|
||||
* | signature | checksum | length | name | data |
|
||||
* |0 |1 |2 3|4 15|16 length-1|
|
||||
* +-----------+----------+--------+------------+------------------+
|
||||
*
|
||||
* The 'data' section would look like (in bytes):
|
||||
* +--------------+------------+-----------------------------------+
|
||||
* | event_logged | sequence # | error log |
|
||||
* |0 3|4 7|8 nvram_error_log_size-1|
|
||||
* +--------------+------------+-----------------------------------+
|
||||
*
|
||||
* event_logged: 0 if event has not been logged to syslog, 1 if it has
|
||||
* sequence #: The unique sequence # for each event. (until it wraps)
|
||||
* error log: The error log from event_scan
|
||||
*/
|
||||
int nvram_write_error_log(char * buff, int length,
|
||||
unsigned int err_type, unsigned int error_log_cnt)
|
||||
{
|
||||
int rc;
|
||||
loff_t tmp_index;
|
||||
struct err_log_info info;
|
||||
|
||||
if (nvram_error_log_index == -1) {
|
||||
return -ESPIPE;
|
||||
}
|
||||
|
||||
if (length > nvram_error_log_size) {
|
||||
length = nvram_error_log_size;
|
||||
}
|
||||
|
||||
info.error_type = err_type;
|
||||
info.seq_num = error_log_cnt;
|
||||
|
||||
tmp_index = nvram_error_log_index;
|
||||
|
||||
rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ppc_md.nvram_write(buff, length, &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* nvram_read_error_log
|
||||
*
|
||||
* Reads nvram for error log for at most 'length'
|
||||
*/
|
||||
int nvram_read_error_log(char * buff, int length,
|
||||
unsigned int * err_type, unsigned int * error_log_cnt)
|
||||
{
|
||||
int rc;
|
||||
loff_t tmp_index;
|
||||
struct err_log_info info;
|
||||
|
||||
if (nvram_error_log_index == -1)
|
||||
return -1;
|
||||
|
||||
if (length > nvram_error_log_size)
|
||||
length = nvram_error_log_size;
|
||||
|
||||
tmp_index = nvram_error_log_index;
|
||||
|
||||
rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ppc_md.nvram_read(buff, length, &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
*error_log_cnt = info.seq_num;
|
||||
*err_type = info.error_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This doesn't actually zero anything, but it sets the event_logged
|
||||
* word to tell that this event is safely in syslog.
|
||||
*/
|
||||
int nvram_clear_error_log(void)
|
||||
{
|
||||
loff_t tmp_index;
|
||||
int clear_word = ERR_FLAG_ALREADY_LOGGED;
|
||||
int rc;
|
||||
|
||||
if (nvram_error_log_index == -1)
|
||||
return -1;
|
||||
|
||||
tmp_index = nvram_error_log_index;
|
||||
|
||||
rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
|
||||
if (rc <= 0) {
|
||||
printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* pseries_nvram_init_log_partition
|
||||
*
|
||||
* This will setup the partition we need for buffering the
|
||||
* error logs and cleanup partitions if needed.
|
||||
*
|
||||
* The general strategy is the following:
|
||||
* 1.) If there is log partition large enough then use it.
|
||||
* 2.) If there is none large enough, search
|
||||
* for a free partition that is large enough.
|
||||
* 3.) If there is not a free partition large enough remove
|
||||
* _all_ OS partitions and consolidate the space.
|
||||
* 4.) Will first try getting a chunk that will satisfy the maximum
|
||||
* error log size (NVRAM_MAX_REQ).
|
||||
* 5.) If the max chunk cannot be allocated then try finding a chunk
|
||||
* that will satisfy the minum needed (NVRAM_MIN_REQ).
|
||||
*/
|
||||
static int __init pseries_nvram_init_log_partition(void)
|
||||
{
|
||||
loff_t p;
|
||||
int size;
|
||||
|
||||
/* Scan nvram for partitions */
|
||||
nvram_scan_partitions();
|
||||
|
||||
/* Lookg for ours */
|
||||
p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
|
||||
|
||||
/* Found one but too small, remove it */
|
||||
if (p && size < NVRAM_MIN_REQ) {
|
||||
pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
|
||||
",removing it...");
|
||||
nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
|
||||
p = 0;
|
||||
}
|
||||
|
||||
/* Create one if we didn't find */
|
||||
if (!p) {
|
||||
p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
|
||||
NVRAM_MAX_REQ, NVRAM_MIN_REQ);
|
||||
/* No room for it, try to get rid of any OS partition
|
||||
* and try again
|
||||
*/
|
||||
if (p == -ENOSPC) {
|
||||
pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
|
||||
" partition, deleting all OS partitions...");
|
||||
nvram_remove_partition(NULL, NVRAM_SIG_OS);
|
||||
p = nvram_create_partition(NVRAM_LOG_PART_NAME,
|
||||
NVRAM_SIG_OS, NVRAM_MAX_REQ,
|
||||
NVRAM_MIN_REQ);
|
||||
}
|
||||
}
|
||||
|
||||
if (p <= 0) {
|
||||
pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
|
||||
" partition, err %d\n", (int)p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvram_error_log_index = p;
|
||||
nvram_error_log_size = nvram_get_partition_size(p) -
|
||||
sizeof(struct err_log_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
|
||||
|
||||
int __init pSeries_nvram_init(void)
|
||||
{
|
||||
struct device_node *nvram;
|
||||
|
|
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
* POWER platform energy management driver
|
||||
* Copyright (C) 2010 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This pseries platform device driver provides access to
|
||||
* platform energy management capabilities.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/of.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/hvcall.h>
|
||||
|
||||
|
||||
#define MODULE_VERS "1.0"
|
||||
#define MODULE_NAME "pseries_energy"
|
||||
|
||||
/* Driver flags */
|
||||
|
||||
static int sysfs_entries;
|
||||
|
||||
/* Helper routines */
|
||||
|
||||
/*
|
||||
* Routine to detect firmware support for hcall
|
||||
* return 1 if H_BEST_ENERGY is supported
|
||||
* else return 0
|
||||
*/
|
||||
|
||||
static int check_for_h_best_energy(void)
|
||||
{
|
||||
struct device_node *rtas = NULL;
|
||||
const char *hypertas, *s;
|
||||
int length;
|
||||
int rc = 0;
|
||||
|
||||
rtas = of_find_node_by_path("/rtas");
|
||||
if (!rtas)
|
||||
return 0;
|
||||
|
||||
hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
|
||||
if (!hypertas) {
|
||||
of_node_put(rtas);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* hypertas will have list of strings with hcall names */
|
||||
for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
|
||||
if (!strncmp("hcall-best-energy-1", s, 19)) {
|
||||
rc = 1; /* Found the string */
|
||||
break;
|
||||
}
|
||||
}
|
||||
of_node_put(rtas);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Helper Routines to convert between drc_index to cpu numbers */
|
||||
|
||||
static u32 cpu_to_drc_index(int cpu)
|
||||
{
|
||||
struct device_node *dn = NULL;
|
||||
const int *indexes;
|
||||
int i;
|
||||
int rc = 1;
|
||||
u32 ret = 0;
|
||||
|
||||
dn = of_find_node_by_path("/cpus");
|
||||
if (dn == NULL)
|
||||
goto err;
|
||||
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
|
||||
if (indexes == NULL)
|
||||
goto err_of_node_put;
|
||||
/* Convert logical cpu number to core number */
|
||||
i = cpu_core_index_of_thread(cpu);
|
||||
/*
|
||||
* The first element indexes[0] is the number of drc_indexes
|
||||
* returned in the list. Hence i+1 will get the drc_index
|
||||
* corresponding to core number i.
|
||||
*/
|
||||
WARN_ON(i > indexes[0]);
|
||||
ret = indexes[i + 1];
|
||||
rc = 0;
|
||||
|
||||
err_of_node_put:
|
||||
of_node_put(dn);
|
||||
err:
|
||||
if (rc)
|
||||
printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int drc_index_to_cpu(u32 drc_index)
|
||||
{
|
||||
struct device_node *dn = NULL;
|
||||
const int *indexes;
|
||||
int i, cpu = 0;
|
||||
int rc = 1;
|
||||
|
||||
dn = of_find_node_by_path("/cpus");
|
||||
if (dn == NULL)
|
||||
goto err;
|
||||
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
|
||||
if (indexes == NULL)
|
||||
goto err_of_node_put;
|
||||
/*
|
||||
* First element in the array is the number of drc_indexes
|
||||
* returned. Search through the list to find the matching
|
||||
* drc_index and get the core number
|
||||
*/
|
||||
for (i = 0; i < indexes[0]; i++) {
|
||||
if (indexes[i + 1] == drc_index)
|
||||
break;
|
||||
}
|
||||
/* Convert core number to logical cpu number */
|
||||
cpu = cpu_first_thread_of_core(i);
|
||||
rc = 0;
|
||||
|
||||
err_of_node_put:
|
||||
of_node_put(dn);
|
||||
err:
|
||||
if (rc)
|
||||
printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* pseries hypervisor call H_BEST_ENERGY provides hints to OS on
|
||||
* preferred logical cpus to activate or deactivate for optimized
|
||||
* energy consumption.
|
||||
*/
|
||||
|
||||
#define FLAGS_MODE1 0x004E200000080E01
|
||||
#define FLAGS_MODE2 0x004E200000080401
|
||||
#define FLAGS_ACTIVATE 0x100
|
||||
|
||||
static ssize_t get_best_energy_list(char *page, int activate)
|
||||
{
|
||||
int rc, cnt, i, cpu;
|
||||
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
|
||||
unsigned long flags = 0;
|
||||
u32 *buf_page;
|
||||
char *s = page;
|
||||
|
||||
buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!buf_page)
|
||||
return -ENOMEM;
|
||||
|
||||
flags = FLAGS_MODE1;
|
||||
if (activate)
|
||||
flags |= FLAGS_ACTIVATE;
|
||||
|
||||
rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
|
||||
0, 0, 0, 0, 0, 0);
|
||||
if (rc != H_SUCCESS) {
|
||||
free_page((unsigned long) buf_page);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cnt = retbuf[0];
|
||||
for (i = 0; i < cnt; i++) {
|
||||
cpu = drc_index_to_cpu(buf_page[2*i+1]);
|
||||
if ((cpu_online(cpu) && !activate) ||
|
||||
(!cpu_online(cpu) && activate))
|
||||
s += sprintf(s, "%d,", cpu);
|
||||
}
|
||||
if (s > page) { /* Something to show */
|
||||
s--; /* Suppress last comma */
|
||||
s += sprintf(s, "\n");
|
||||
}
|
||||
|
||||
free_page((unsigned long) buf_page);
|
||||
return s-page;
|
||||
}
|
||||
|
||||
static ssize_t get_best_energy_data(struct sys_device *dev,
|
||||
char *page, int activate)
|
||||
{
|
||||
int rc;
|
||||
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
|
||||
unsigned long flags = 0;
|
||||
|
||||
flags = FLAGS_MODE2;
|
||||
if (activate)
|
||||
flags |= FLAGS_ACTIVATE;
|
||||
|
||||
rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
|
||||
cpu_to_drc_index(dev->id),
|
||||
0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
if (rc != H_SUCCESS)
|
||||
return -EINVAL;
|
||||
|
||||
return sprintf(page, "%lu\n", retbuf[1] >> 32);
|
||||
}
|
||||
|
||||
/* Wrapper functions */
|
||||
|
||||
static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
|
||||
struct sysdev_class_attribute *attr, char *page)
|
||||
{
|
||||
return get_best_energy_list(page, 1);
|
||||
}
|
||||
|
||||
static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
|
||||
struct sysdev_class_attribute *attr, char *page)
|
||||
{
|
||||
return get_best_energy_list(page, 0);
|
||||
}
|
||||
|
||||
static ssize_t percpu_activate_hint_show(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr, char *page)
|
||||
{
|
||||
return get_best_energy_data(dev, page, 1);
|
||||
}
|
||||
|
||||
static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr, char *page)
|
||||
{
|
||||
return get_best_energy_data(dev, page, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create sysfs interface:
|
||||
* /sys/devices/system/cpu/pseries_activate_hint_list
|
||||
* /sys/devices/system/cpu/pseries_deactivate_hint_list
|
||||
* Comma separated list of cpus to activate or deactivate
|
||||
* /sys/devices/system/cpu/cpuN/pseries_activate_hint
|
||||
* /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
|
||||
* Per-cpu value of the hint
|
||||
*/
|
||||
|
||||
struct sysdev_class_attribute attr_cpu_activate_hint_list =
|
||||
_SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
|
||||
cpu_activate_hint_list_show, NULL);
|
||||
|
||||
struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
|
||||
_SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
|
||||
cpu_deactivate_hint_list_show, NULL);
|
||||
|
||||
struct sysdev_attribute attr_percpu_activate_hint =
|
||||
_SYSDEV_ATTR(pseries_activate_hint, 0444,
|
||||
percpu_activate_hint_show, NULL);
|
||||
|
||||
struct sysdev_attribute attr_percpu_deactivate_hint =
|
||||
_SYSDEV_ATTR(pseries_deactivate_hint, 0444,
|
||||
percpu_deactivate_hint_show, NULL);
|
||||
|
||||
static int __init pseries_energy_init(void)
|
||||
{
|
||||
int cpu, err;
|
||||
struct sys_device *cpu_sys_dev;
|
||||
|
||||
if (!check_for_h_best_energy()) {
|
||||
printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
|
||||
return 0;
|
||||
}
|
||||
/* Create the sysfs files */
|
||||
err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
|
||||
&attr_cpu_activate_hint_list.attr);
|
||||
if (!err)
|
||||
err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
|
||||
&attr_cpu_deactivate_hint_list.attr);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_sys_dev = get_cpu_sysdev(cpu);
|
||||
err = sysfs_create_file(&cpu_sys_dev->kobj,
|
||||
&attr_percpu_activate_hint.attr);
|
||||
if (err)
|
||||
break;
|
||||
err = sysfs_create_file(&cpu_sys_dev->kobj,
|
||||
&attr_percpu_deactivate_hint.attr);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sysfs_entries = 1; /* Removed entries on cleanup */
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static void __exit pseries_energy_cleanup(void)
|
||||
{
|
||||
int cpu;
|
||||
struct sys_device *cpu_sys_dev;
|
||||
|
||||
if (!sysfs_entries)
|
||||
return;
|
||||
|
||||
/* Remove the sysfs files */
|
||||
sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
|
||||
&attr_cpu_activate_hint_list.attr);
|
||||
|
||||
sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
|
||||
&attr_cpu_deactivate_hint_list.attr);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_sys_dev = get_cpu_sysdev(cpu);
|
||||
sysfs_remove_file(&cpu_sys_dev->kobj,
|
||||
&attr_percpu_activate_hint.attr);
|
||||
sysfs_remove_file(&cpu_sys_dev->kobj,
|
||||
&attr_percpu_deactivate_hint.attr);
|
||||
}
|
||||
}
|
||||
|
||||
module_init(pseries_energy_init);
|
||||
module_exit(pseries_energy_cleanup);
|
||||
MODULE_DESCRIPTION("Driver for pSeries platform energy management");
|
||||
MODULE_AUTHOR("Vaidyanathan Srinivasan");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -41,6 +41,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
|
|||
ifeq ($(CONFIG_PCI),y)
|
||||
obj-$(CONFIG_4xx) += ppc4xx_pci.o
|
||||
endif
|
||||
obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
|
||||
obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
|
||||
|
||||
obj-$(CONFIG_CPM) += cpm_common.o
|
||||
|
|
|
@ -312,17 +312,10 @@ static void pci_dma_dev_setup_dart(struct pci_dev *dev)
|
|||
|
||||
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
|
||||
{
|
||||
struct device_node *dn;
|
||||
|
||||
if (!iommu_table_dart_inited) {
|
||||
iommu_table_dart_inited = 1;
|
||||
iommu_table_dart_setup();
|
||||
}
|
||||
|
||||
dn = pci_bus_to_OF_node(bus);
|
||||
|
||||
if (dn)
|
||||
PCI_DN(dn)->iommu_table = &iommu_table_dart;
|
||||
}
|
||||
|
||||
static bool dart_device_on_pcie(struct device *dev)
|
||||
|
@ -373,7 +366,7 @@ void __init iommu_init_early_dart(void)
|
|||
if (dn == NULL) {
|
||||
dn = of_find_compatible_node(NULL, "dart", "u4-dart");
|
||||
if (dn == NULL)
|
||||
goto bail;
|
||||
return; /* use default direct_dma_ops */
|
||||
dart_is_u4 = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* GPIOs on MPC8349/8572/8610 and compatible
|
||||
* GPIOs on MPC512x/8349/8572/8610 and compatible
|
||||
*
|
||||
* Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
|
||||
*
|
||||
|
@ -26,6 +26,7 @@
|
|||
#define GPIO_IER 0x0c
|
||||
#define GPIO_IMR 0x10
|
||||
#define GPIO_ICR 0x14
|
||||
#define GPIO_ICR2 0x18
|
||||
|
||||
struct mpc8xxx_gpio_chip {
|
||||
struct of_mm_gpio_chip mm_gc;
|
||||
|
@ -37,6 +38,7 @@ struct mpc8xxx_gpio_chip {
|
|||
*/
|
||||
u32 data;
|
||||
struct irq_host *irq;
|
||||
void *of_dev_id_data;
|
||||
};
|
||||
|
||||
static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
|
||||
|
@ -215,6 +217,51 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
|
||||
{
|
||||
struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
|
||||
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
|
||||
unsigned long gpio = virq_to_hw(virq);
|
||||
void __iomem *reg;
|
||||
unsigned int shift;
|
||||
unsigned long flags;
|
||||
|
||||
if (gpio < 16) {
|
||||
reg = mm->regs + GPIO_ICR;
|
||||
shift = (15 - gpio) * 2;
|
||||
} else {
|
||||
reg = mm->regs + GPIO_ICR2;
|
||||
shift = (15 - (gpio % 16)) * 2;
|
||||
}
|
||||
|
||||
switch (flow_type) {
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
|
||||
clrsetbits_be32(reg, 3 << shift, 2 << shift);
|
||||
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
|
||||
break;
|
||||
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
|
||||
clrsetbits_be32(reg, 3 << shift, 1 << shift);
|
||||
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
|
||||
break;
|
||||
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
|
||||
clrbits32(reg, 3 << shift);
|
||||
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip mpc8xxx_irq_chip = {
|
||||
.name = "mpc8xxx-gpio",
|
||||
.unmask = mpc8xxx_irq_unmask,
|
||||
|
@ -226,6 +273,11 @@ static struct irq_chip mpc8xxx_irq_chip = {
|
|||
static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
|
||||
|
||||
if (mpc8xxx_gc->of_dev_id_data)
|
||||
mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
|
||||
|
||||
set_irq_chip_data(virq, h->host_data);
|
||||
set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
|
||||
set_irq_type(virq, IRQ_TYPE_NONE);
|
||||
|
@ -253,11 +305,20 @@ static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
|
|||
.xlate = mpc8xxx_gpio_irq_xlate,
|
||||
};
|
||||
|
||||
static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
|
||||
{ .compatible = "fsl,mpc8349-gpio", },
|
||||
{ .compatible = "fsl,mpc8572-gpio", },
|
||||
{ .compatible = "fsl,mpc8610-gpio", },
|
||||
{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init mpc8xxx_add_controller(struct device_node *np)
|
||||
{
|
||||
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
|
||||
struct of_mm_gpio_chip *mm_gc;
|
||||
struct gpio_chip *gc;
|
||||
const struct of_device_id *id;
|
||||
unsigned hwirq;
|
||||
int ret;
|
||||
|
||||
|
@ -297,6 +358,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
|
|||
if (!mpc8xxx_gc->irq)
|
||||
goto skip_irq;
|
||||
|
||||
id = of_match_node(mpc8xxx_gpio_ids, np);
|
||||
if (id)
|
||||
mpc8xxx_gc->of_dev_id_data = id->data;
|
||||
|
||||
mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
|
||||
|
||||
/* ack and mask all irqs */
|
||||
|
@ -321,13 +386,7 @@ static int __init mpc8xxx_add_gpiochips(void)
|
|||
{
|
||||
struct device_node *np;
|
||||
|
||||
for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio")
|
||||
mpc8xxx_add_controller(np);
|
||||
|
||||
for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
|
||||
mpc8xxx_add_controller(np);
|
||||
|
||||
for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
|
||||
for_each_matching_node(np, mpc8xxx_gpio_ids)
|
||||
mpc8xxx_add_controller(np);
|
||||
|
||||
for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
|
||||
|
|
|
@ -0,0 +1,346 @@
|
|||
/*
|
||||
* PowerPC 4xx Clock and Power Management
|
||||
*
|
||||
* Copyright (C) 2010, Applied Micro Circuits Corporation
|
||||
* Victor Gallardo (vgallardo@apm.com)
|
||||
*
|
||||
* Based on arch/powerpc/platforms/44x/idle.c:
|
||||
* Jerone Young <jyoung5@us.ibm.com>
|
||||
* Copyright 2008 IBM Corp.
|
||||
*
|
||||
* Based on arch/powerpc/sysdev/fsl_pmc.c:
|
||||
* Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
* Copyright 2009 MontaVista Software, Inc.
|
||||
*
|
||||
* See file CREDITS for list of people who contributed to this
|
||||
* project.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
|
||||
* MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <asm/dcr.h>
|
||||
#include <asm/dcr-native.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
#define CPM_ER 0
|
||||
#define CPM_FR 1
|
||||
#define CPM_SR 2
|
||||
|
||||
#define CPM_IDLE_WAIT 0
|
||||
#define CPM_IDLE_DOZE 1
|
||||
|
||||
struct cpm {
|
||||
dcr_host_t dcr_host;
|
||||
unsigned int dcr_offset[3];
|
||||
unsigned int powersave_off;
|
||||
unsigned int unused;
|
||||
unsigned int idle_doze;
|
||||
unsigned int standby;
|
||||
unsigned int suspend;
|
||||
};
|
||||
|
||||
static struct cpm cpm;
|
||||
|
||||
struct cpm_idle_mode {
|
||||
unsigned int enabled;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static struct cpm_idle_mode idle_mode[] = {
|
||||
[CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
|
||||
[CPM_IDLE_DOZE] = { 0, "doze" },
|
||||
};
|
||||
|
||||
static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
|
||||
{
|
||||
unsigned int value;
|
||||
|
||||
/* CPM controller supports 3 different types of sleep interface
|
||||
* known as class 1, 2 and 3. For class 1 units, they are
|
||||
* unconditionally put to sleep when the corresponding CPM bit is
|
||||
* set. For class 2 and 3 units this is not case; if they can be
|
||||
* put to to sleep, they will. Here we do not verify, we just
|
||||
* set them and expect them to eventually go off when they can.
|
||||
*/
|
||||
value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
|
||||
dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
|
||||
|
||||
/* return old state, to restore later if needed */
|
||||
return value;
|
||||
}
|
||||
|
||||
static void cpm_idle_wait(void)
|
||||
{
|
||||
unsigned long msr_save;
|
||||
|
||||
/* save off initial state */
|
||||
msr_save = mfmsr();
|
||||
/* sync required when CPM0_ER[CPU] is set */
|
||||
mb();
|
||||
/* set wait state MSR */
|
||||
mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
|
||||
isync();
|
||||
/* return to initial state */
|
||||
mtmsr(msr_save);
|
||||
isync();
|
||||
}
|
||||
|
||||
static void cpm_idle_sleep(unsigned int mask)
|
||||
{
|
||||
unsigned int er_save;
|
||||
|
||||
/* update CPM_ER state */
|
||||
er_save = cpm_set(CPM_ER, mask);
|
||||
|
||||
/* go to wait state so that CPM0_ER[CPU] can take effect */
|
||||
cpm_idle_wait();
|
||||
|
||||
/* restore CPM_ER state */
|
||||
dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
|
||||
}
|
||||
|
||||
static void cpm_idle_doze(void)
|
||||
{
|
||||
cpm_idle_sleep(cpm.idle_doze);
|
||||
}
|
||||
|
||||
static void cpm_idle_config(int mode)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (idle_mode[mode].enabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
|
||||
idle_mode[i].enabled = 0;
|
||||
|
||||
idle_mode[mode].enabled = 1;
|
||||
}
|
||||
|
||||
static ssize_t cpm_idle_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
char *s = buf;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
|
||||
if (idle_mode[i].enabled)
|
||||
s += sprintf(s, "[%s] ", idle_mode[i].name);
|
||||
else
|
||||
s += sprintf(s, "%s ", idle_mode[i].name);
|
||||
}
|
||||
|
||||
*(s-1) = '\n'; /* convert the last space to a newline */
|
||||
|
||||
return s - buf;
|
||||
}
|
||||
|
||||
static ssize_t cpm_idle_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
int i;
|
||||
char *p;
|
||||
int len;
|
||||
|
||||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
|
||||
if (strncmp(buf, idle_mode[i].name, len) == 0) {
|
||||
cpm_idle_config(i);
|
||||
return n;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct kobj_attribute cpm_idle_attr =
|
||||
__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
|
||||
|
||||
static void cpm_idle_config_sysfs(void)
|
||||
{
|
||||
struct sys_device *sys_dev;
|
||||
unsigned long ret;
|
||||
|
||||
sys_dev = get_cpu_sysdev(0);
|
||||
|
||||
ret = sysfs_create_file(&sys_dev->kobj,
|
||||
&cpm_idle_attr.attr);
|
||||
if (ret)
|
||||
printk(KERN_WARNING
|
||||
"cpm: failed to create idle sysfs entry\n");
|
||||
}
|
||||
|
||||
static void cpm_idle(void)
|
||||
{
|
||||
if (idle_mode[CPM_IDLE_DOZE].enabled)
|
||||
cpm_idle_doze();
|
||||
else
|
||||
cpm_idle_wait();
|
||||
}
|
||||
|
||||
static int cpm_suspend_valid(suspend_state_t state)
|
||||
{
|
||||
switch (state) {
|
||||
case PM_SUSPEND_STANDBY:
|
||||
return !!cpm.standby;
|
||||
case PM_SUSPEND_MEM:
|
||||
return !!cpm.suspend;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void cpm_suspend_standby(unsigned int mask)
|
||||
{
|
||||
unsigned long tcr_save;
|
||||
|
||||
/* disable decrement interrupt */
|
||||
tcr_save = mfspr(SPRN_TCR);
|
||||
mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
|
||||
|
||||
/* go to sleep state */
|
||||
cpm_idle_sleep(mask);
|
||||
|
||||
/* restore decrement interrupt */
|
||||
mtspr(SPRN_TCR, tcr_save);
|
||||
}
|
||||
|
||||
static int cpm_suspend_enter(suspend_state_t state)
|
||||
{
|
||||
switch (state) {
|
||||
case PM_SUSPEND_STANDBY:
|
||||
cpm_suspend_standby(cpm.standby);
|
||||
break;
|
||||
case PM_SUSPEND_MEM:
|
||||
cpm_suspend_standby(cpm.suspend);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_suspend_ops cpm_suspend_ops = {
|
||||
.valid = cpm_suspend_valid,
|
||||
.enter = cpm_suspend_enter,
|
||||
};
|
||||
|
||||
static int cpm_get_uint_property(struct device_node *np,
|
||||
const char *name)
|
||||
{
|
||||
int len;
|
||||
const unsigned int *prop = of_get_property(np, name, &len);
|
||||
|
||||
if (prop == NULL || len < sizeof(u32))
|
||||
return 0;
|
||||
|
||||
return *prop;
|
||||
}
|
||||
|
||||
static int __init cpm_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
int dcr_base, dcr_len;
|
||||
int ret = 0;
|
||||
|
||||
if (!cpm.powersave_off) {
|
||||
cpm_idle_config(CPM_IDLE_WAIT);
|
||||
ppc_md.power_save = &cpm_idle;
|
||||
}
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
|
||||
if (!np) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dcr_base = dcr_resource_start(np, 0);
|
||||
dcr_len = dcr_resource_len(np, 0);
|
||||
|
||||
if (dcr_base == 0 || dcr_len == 0) {
|
||||
printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
|
||||
np->full_name);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
|
||||
|
||||
if (!DCR_MAP_OK(cpm.dcr_host)) {
|
||||
printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
|
||||
np->full_name);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* All 4xx SoCs with a CPM controller have one of two
|
||||
* different order for the CPM registers. Some have the
|
||||
* CPM registers in the following order (ER,FR,SR). The
|
||||
* others have them in the following order (SR,ER,FR).
|
||||
*/
|
||||
|
||||
if (cpm_get_uint_property(np, "er-offset") == 0) {
|
||||
cpm.dcr_offset[CPM_ER] = 0;
|
||||
cpm.dcr_offset[CPM_FR] = 1;
|
||||
cpm.dcr_offset[CPM_SR] = 2;
|
||||
} else {
|
||||
cpm.dcr_offset[CPM_ER] = 1;
|
||||
cpm.dcr_offset[CPM_FR] = 2;
|
||||
cpm.dcr_offset[CPM_SR] = 0;
|
||||
}
|
||||
|
||||
/* Now let's see what IPs to turn off for the following modes */
|
||||
|
||||
cpm.unused = cpm_get_uint_property(np, "unused-units");
|
||||
cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
|
||||
cpm.standby = cpm_get_uint_property(np, "standby");
|
||||
cpm.suspend = cpm_get_uint_property(np, "suspend");
|
||||
|
||||
/* If some IPs are unused let's turn them off now */
|
||||
|
||||
if (cpm.unused) {
|
||||
cpm_set(CPM_ER, cpm.unused);
|
||||
cpm_set(CPM_FR, cpm.unused);
|
||||
}
|
||||
|
||||
/* Now let's export interfaces */
|
||||
|
||||
if (!cpm.powersave_off && cpm.idle_doze)
|
||||
cpm_idle_config_sysfs();
|
||||
|
||||
if (cpm.standby || cpm.suspend)
|
||||
suspend_set_ops(&cpm_suspend_ops);
|
||||
out:
|
||||
if (np)
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
late_initcall(cpm_init);
|
||||
|
||||
static int __init cpm_powersave_off(char *arg)
|
||||
{
|
||||
cpm.powersave_off = 1;
|
||||
return 0;
|
||||
}
|
||||
__setup("powersave=off", cpm_powersave_off);
|
|
@ -84,8 +84,8 @@ static int __init tsi108_eth_of_init(void)
|
|||
memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
|
||||
|
||||
ret = of_address_to_resource(np, 0, &r[0]);
|
||||
DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
|
||||
__func__,r[0].name, r[0].start, r[0].end);
|
||||
DBG("%s: name:start->end = %s:%pR\n",
|
||||
__func__, r[0].name, &r[0]);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -93,8 +93,8 @@ static int __init tsi108_eth_of_init(void)
|
|||
r[1].start = irq_of_parse_and_map(np, 0);
|
||||
r[1].end = irq_of_parse_and_map(np, 0);
|
||||
r[1].flags = IORESOURCE_IRQ;
|
||||
DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
|
||||
__func__,r[1].name, r[1].start, r[1].end);
|
||||
DBG("%s: name:start->end = %s:%pR\n",
|
||||
__func__, r[1].name, &r[1]);
|
||||
|
||||
tsi_eth_dev =
|
||||
platform_device_register_simple("tsi-ethernet", i++, &r[0],
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#include "hvc_console.h"
|
||||
|
||||
char hvc_driver_name[] = "hvc_console";
|
||||
static const char hvc_driver_name[] = "hvc_console";
|
||||
|
||||
static struct vio_device_id hvc_driver_table[] __devinitdata = {
|
||||
{"serial", "hvterm1"},
|
||||
|
|
|
@ -109,7 +109,7 @@ config FSL_DMA
|
|||
|
||||
config MPC512X_DMA
|
||||
tristate "Freescale MPC512x built-in DMA engine support"
|
||||
depends on PPC_MPC512x
|
||||
depends on PPC_MPC512x || PPC_MPC831x
|
||||
select DMA_ENGINE
|
||||
---help---
|
||||
Enable support for the Freescale MPC512x built-in DMA engine.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/*
|
||||
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
|
||||
* Copyright (C) Semihalf 2009
|
||||
* Copyright (C) Ilya Yanok, Emcraft Systems 2010
|
||||
*
|
||||
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
|
||||
* (defines, structures and comments) was taken from MPC5121 DMA driver
|
||||
|
@ -70,6 +71,8 @@
|
|||
#define MPC_DMA_DMAES_SBE (1 << 1)
|
||||
#define MPC_DMA_DMAES_DBE (1 << 0)
|
||||
|
||||
#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
|
||||
|
||||
#define MPC_DMA_TSIZE_1 0x00
|
||||
#define MPC_DMA_TSIZE_2 0x01
|
||||
#define MPC_DMA_TSIZE_4 0x02
|
||||
|
@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs {
|
|||
/* 0x30 */
|
||||
u32 dmahrsh; /* DMA hw request status high(ch63~32) */
|
||||
u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
|
||||
u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
|
||||
union {
|
||||
u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
|
||||
u32 dmagpor; /* (General purpose register on MPC8308) */
|
||||
};
|
||||
u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
|
||||
/* 0x40 ~ 0xff */
|
||||
u32 reserve0[48]; /* Reserved */
|
||||
|
@ -195,7 +201,9 @@ struct mpc_dma {
|
|||
struct mpc_dma_regs __iomem *regs;
|
||||
struct mpc_dma_tcd __iomem *tcd;
|
||||
int irq;
|
||||
int irq2;
|
||||
uint error_status;
|
||||
int is_mpc8308;
|
||||
|
||||
/* Lock for error_status field in this structure */
|
||||
spinlock_t error_status_lock;
|
||||
|
@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
|
|||
prev = mdesc;
|
||||
}
|
||||
|
||||
prev->tcd->start = 0;
|
||||
prev->tcd->int_maj = 1;
|
||||
|
||||
/* Send first descriptor in chain into hardware */
|
||||
memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
|
||||
|
||||
if (first != prev)
|
||||
mdma->tcd[cid].e_sg = 1;
|
||||
out_8(&mdma->regs->dmassrt, cid);
|
||||
}
|
||||
|
||||
|
@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
|
|||
|
||||
spin_lock(&mchan->lock);
|
||||
|
||||
out_8(&mdma->regs->dmacint, ch + off);
|
||||
out_8(&mdma->regs->dmacerr, ch + off);
|
||||
|
||||
/* Check error status */
|
||||
if (es & (1 << ch))
|
||||
list_for_each_entry(mdesc, &mchan->active, node)
|
||||
|
@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)
|
|||
spin_unlock(&mdma->error_status_lock);
|
||||
|
||||
/* Handle interrupt on each channel */
|
||||
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
|
||||
if (mdma->dma.chancnt > 32) {
|
||||
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
|
||||
in_be32(&mdma->regs->dmaerrh), 32);
|
||||
}
|
||||
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
|
||||
in_be32(&mdma->regs->dmaerrl), 0);
|
||||
|
||||
/* Ack interrupt on all channels */
|
||||
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
|
||||
|
||||
/* Schedule tasklet */
|
||||
tasklet_schedule(&mdma->tasklet);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* DMA Tasklet */
|
||||
static void mpc_dma_tasklet(unsigned long data)
|
||||
/* proccess completed descriptors */
|
||||
static void mpc_dma_process_completed(struct mpc_dma *mdma)
|
||||
{
|
||||
struct mpc_dma *mdma = (void *)data;
|
||||
dma_cookie_t last_cookie = 0;
|
||||
struct mpc_dma_chan *mchan;
|
||||
struct mpc_dma_desc *mdesc;
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(list);
|
||||
uint es;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mdma->dma.chancnt; i++) {
|
||||
mchan = &mdma->channels[i];
|
||||
|
||||
/* Get all completed descriptors */
|
||||
spin_lock_irqsave(&mchan->lock, flags);
|
||||
if (!list_empty(&mchan->completed))
|
||||
list_splice_tail_init(&mchan->completed, &list);
|
||||
spin_unlock_irqrestore(&mchan->lock, flags);
|
||||
|
||||
if (list_empty(&list))
|
||||
continue;
|
||||
|
||||
/* Execute callbacks and run dependencies */
|
||||
list_for_each_entry(mdesc, &list, node) {
|
||||
desc = &mdesc->desc;
|
||||
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
|
||||
last_cookie = desc->cookie;
|
||||
dma_run_dependencies(desc);
|
||||
}
|
||||
|
||||
/* Free descriptors */
|
||||
spin_lock_irqsave(&mchan->lock, flags);
|
||||
list_splice_tail_init(&list, &mchan->free);
|
||||
mchan->completed_cookie = last_cookie;
|
||||
spin_unlock_irqrestore(&mchan->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/* DMA Tasklet */
|
||||
static void mpc_dma_tasklet(unsigned long data)
|
||||
{
|
||||
struct mpc_dma *mdma = (void *)data;
|
||||
unsigned long flags;
|
||||
uint es;
|
||||
|
||||
spin_lock_irqsave(&mdma->error_status_lock, flags);
|
||||
es = mdma->error_status;
|
||||
mdma->error_status = 0;
|
||||
|
@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data)
|
|||
dev_err(mdma->dma.dev, "- Destination Bus Error\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < mdma->dma.chancnt; i++) {
|
||||
mchan = &mdma->channels[i];
|
||||
|
||||
/* Get all completed descriptors */
|
||||
spin_lock_irqsave(&mchan->lock, flags);
|
||||
if (!list_empty(&mchan->completed))
|
||||
list_splice_tail_init(&mchan->completed, &list);
|
||||
spin_unlock_irqrestore(&mchan->lock, flags);
|
||||
|
||||
if (list_empty(&list))
|
||||
continue;
|
||||
|
||||
/* Execute callbacks and run dependencies */
|
||||
list_for_each_entry(mdesc, &list, node) {
|
||||
desc = &mdesc->desc;
|
||||
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
|
||||
last_cookie = desc->cookie;
|
||||
dma_run_dependencies(desc);
|
||||
}
|
||||
|
||||
/* Free descriptors */
|
||||
spin_lock_irqsave(&mchan->lock, flags);
|
||||
list_splice_tail_init(&list, &mchan->free);
|
||||
mchan->completed_cookie = last_cookie;
|
||||
spin_unlock_irqrestore(&mchan->lock, flags);
|
||||
}
|
||||
mpc_dma_process_completed(mdma);
|
||||
}
|
||||
|
||||
/* Submit descriptor to hardware */
|
||||
|
@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor *
|
|||
mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
|
||||
size_t len, unsigned long flags)
|
||||
{
|
||||
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
|
||||
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
|
||||
struct mpc_dma_desc *mdesc = NULL;
|
||||
struct mpc_dma_tcd *tcd;
|
||||
|
@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
|
|||
}
|
||||
spin_unlock_irqrestore(&mchan->lock, iflags);
|
||||
|
||||
if (!mdesc)
|
||||
if (!mdesc) {
|
||||
/* try to free completed descriptors */
|
||||
mpc_dma_process_completed(mdma);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mdesc->error = 0;
|
||||
tcd = mdesc->tcd;
|
||||
|
@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
|
|||
tcd->dsize = MPC_DMA_TSIZE_32;
|
||||
tcd->soff = 32;
|
||||
tcd->doff = 32;
|
||||
} else if (IS_ALIGNED(src | dst | len, 16)) {
|
||||
} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
|
||||
/* MPC8308 doesn't support 16 byte transfers */
|
||||
tcd->ssize = MPC_DMA_TSIZE_16;
|
||||
tcd->dsize = MPC_DMA_TSIZE_16;
|
||||
tcd->soff = 16;
|
||||
|
@ -651,6 +673,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
|
||||
mdma->is_mpc8308 = 1;
|
||||
mdma->irq2 = irq_of_parse_and_map(dn, 1);
|
||||
if (mdma->irq2 == NO_IRQ) {
|
||||
dev_err(dev, "Error mapping IRQ!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
retval = of_address_to_resource(dn, 0, &res);
|
||||
if (retval) {
|
||||
dev_err(dev, "Error parsing memory region!\n");
|
||||
|
@ -681,11 +712,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mdma->is_mpc8308) {
|
||||
retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
|
||||
DRV_NAME, mdma);
|
||||
if (retval) {
|
||||
dev_err(dev, "Error requesting IRQ2!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_init(&mdma->error_status_lock);
|
||||
|
||||
dma = &mdma->dma;
|
||||
dma->dev = dev;
|
||||
dma->chancnt = MPC_DMA_CHANNELS;
|
||||
if (!mdma->is_mpc8308)
|
||||
dma->chancnt = MPC_DMA_CHANNELS;
|
||||
else
|
||||
dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
|
||||
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
|
||||
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
|
||||
dma->device_issue_pending = mpc_dma_issue_pending;
|
||||
|
@ -721,26 +764,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
|
|||
* - Round-robin group arbitration,
|
||||
* - Round-robin channel arbitration.
|
||||
*/
|
||||
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
|
||||
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
|
||||
if (!mdma->is_mpc8308) {
|
||||
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
|
||||
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
|
||||
|
||||
/* Disable hardware DMA requests */
|
||||
out_be32(&mdma->regs->dmaerqh, 0);
|
||||
out_be32(&mdma->regs->dmaerql, 0);
|
||||
/* Disable hardware DMA requests */
|
||||
out_be32(&mdma->regs->dmaerqh, 0);
|
||||
out_be32(&mdma->regs->dmaerql, 0);
|
||||
|
||||
/* Disable error interrupts */
|
||||
out_be32(&mdma->regs->dmaeeih, 0);
|
||||
out_be32(&mdma->regs->dmaeeil, 0);
|
||||
/* Disable error interrupts */
|
||||
out_be32(&mdma->regs->dmaeeih, 0);
|
||||
out_be32(&mdma->regs->dmaeeil, 0);
|
||||
|
||||
/* Clear interrupts status */
|
||||
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
|
||||
/* Clear interrupts status */
|
||||
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
|
||||
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
|
||||
|
||||
/* Route interrupts to IPIC */
|
||||
out_be32(&mdma->regs->dmaihsa, 0);
|
||||
out_be32(&mdma->regs->dmailsa, 0);
|
||||
/* Route interrupts to IPIC */
|
||||
out_be32(&mdma->regs->dmaihsa, 0);
|
||||
out_be32(&mdma->regs->dmailsa, 0);
|
||||
} else {
|
||||
/* MPC8308 has 16 channels and lacks some registers */
|
||||
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
|
||||
|
||||
/* enable snooping */
|
||||
out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
|
||||
/* Disable error interrupts */
|
||||
out_be32(&mdma->regs->dmaeeil, 0);
|
||||
|
||||
/* Clear interrupts status */
|
||||
out_be32(&mdma->regs->dmaintl, 0xFFFF);
|
||||
out_be32(&mdma->regs->dmaerrl, 0xFFFF);
|
||||
}
|
||||
|
||||
/* Register DMA engine */
|
||||
dev_set_drvdata(dev, mdma);
|
||||
|
|
|
@ -387,11 +387,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
|
|||
/* Set the DMA ops to the ones from the PCI device, this could be
|
||||
* fishy if we didn't know that on PowerMac it's always direct ops
|
||||
* or iommu ops that will work fine
|
||||
*
|
||||
* To get all the fields, copy all archdata
|
||||
*/
|
||||
dev->ofdev.dev.archdata.dma_ops =
|
||||
chip->lbus.pdev->dev.archdata.dma_ops;
|
||||
dev->ofdev.dev.archdata.dma_data =
|
||||
chip->lbus.pdev->dev.archdata.dma_data;
|
||||
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
#ifdef DEBUG
|
||||
|
|
|
@ -2213,6 +2213,9 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
|
|||
static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
|
||||
{
|
||||
state = state_detached;
|
||||
of_dev = dev;
|
||||
|
||||
dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
|
||||
|
||||
/* Lookup the fans in the device tree */
|
||||
fcu_lookup_fans(dev->dev.of_node);
|
||||
|
@ -2235,6 +2238,7 @@ static const struct of_device_id fcu_match[] =
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fcu_match);
|
||||
|
||||
static struct of_platform_driver fcu_of_platform_driver =
|
||||
{
|
||||
|
@ -2252,8 +2256,6 @@ static struct of_platform_driver fcu_of_platform_driver =
|
|||
*/
|
||||
static int __init therm_pm72_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
rackmac = of_machine_is_compatible("RackMac3,1");
|
||||
|
||||
if (!of_machine_is_compatible("PowerMac7,2") &&
|
||||
|
@ -2261,34 +2263,12 @@ static int __init therm_pm72_init(void)
|
|||
!rackmac)
|
||||
return -ENODEV;
|
||||
|
||||
printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION);
|
||||
|
||||
np = of_find_node_by_type(NULL, "fcu");
|
||||
if (np == NULL) {
|
||||
/* Some machines have strangely broken device-tree */
|
||||
np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
|
||||
if (np == NULL) {
|
||||
printk(KERN_ERR "Can't find FCU in device-tree !\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
of_dev = of_platform_device_create(np, "temperature", NULL);
|
||||
if (of_dev == NULL) {
|
||||
printk(KERN_ERR "Can't register FCU platform device !\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
of_register_platform_driver(&fcu_of_platform_driver);
|
||||
|
||||
return 0;
|
||||
return of_register_platform_driver(&fcu_of_platform_driver);
|
||||
}
|
||||
|
||||
static void __exit therm_pm72_exit(void)
|
||||
{
|
||||
of_unregister_platform_driver(&fcu_of_platform_driver);
|
||||
|
||||
if (of_dev)
|
||||
of_device_unregister(of_dev);
|
||||
}
|
||||
|
||||
module_init(therm_pm72_init);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
|
||||
obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
|
||||
ps3av_mod-objs += ps3av.o ps3av_cmd.o
|
||||
ps3av_mod-y := ps3av.o ps3av_cmd.o
|
||||
obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
|
||||
obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
|
||||
obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
|
||||
|
|
|
@ -687,7 +687,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
|
|||
#if defined(CONFIG_ATARI)
|
||||
address_space = 64;
|
||||
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
|
||||
|| defined(__sparc__) || defined(__mips__)
|
||||
|| defined(__sparc__) || defined(__mips__) \
|
||||
|| defined(__powerpc__)
|
||||
address_space = 128;
|
||||
#else
|
||||
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
|
||||
|
|
Загрузка…
Ссылка в новой задаче