powerpc: Merge enough to start building in arch/powerpc.

This creates the directory structure under arch/powerpc and a bunch
of Kconfig files.  It does a first-cut merge of arch/powerpc/mm,
arch/powerpc/lib and arch/powerpc/platforms/powermac.  This is enough
to build a 32-bit powermac kernel with ARCH=powerpc.

For now we are getting some unmerged files from arch/ppc/kernel and
arch/ppc/syslib, or arch/ppc64/kernel.  This makes some minor changes
to files in those directories and files outside arch/powerpc.

The boot directory is still not merged.  That's going to be interesting.

Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Paul Mackerras 2005-09-26 16:04:21 +10:00
Родитель e5baa396af
Коммит 14cf11af6c
89 изменённых файлов: 32423 добавлений и 25 удалений

861
arch/powerpc/Kconfig Normal file
Просмотреть файл

@ -0,0 +1,861 @@
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
mainmenu "Linux/PowerPC Kernel Configuration"
config PPC64
bool "64-bit kernel"
default n
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
config PPC32
bool
default y if !PPC64
config 64BIT
bool
default y if PPC64
config PPC_MERGE
def_bool y
config MMU
bool
default y
config UID16
bool
config GENERIC_HARDIRQS
bool
default y
config RWSEM_GENERIC_SPINLOCK
bool
config RWSEM_XCHGADD_ALGORITHM
bool
default y
config GENERIC_CALIBRATE_DELAY
bool
default y
config PPC
bool
default y
config EARLY_PRINTK
bool
default y if PPC64
config COMPAT
bool
default y if PPC64
config SYSVIPC_COMPAT
bool
depends on COMPAT && SYSVIPC
default y
# All PPC32s use generic nvram driver through ppc_md
config GENERIC_NVRAM
bool
default y if PPC32
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
config ARCH_MAY_HAVE_PC_FDC
bool
default y
menu "Processor support"
choice
prompt "Processor Type"
depends on PPC32
default 6xx
config 6xx
bool "6xx/7xx/74xx"
select PPC_FPU
help
There are four families of PowerPC chips supported. The more common
types (601, 603, 604, 740, 750, 7400), the Motorola embedded
versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
embedded versions (403 and 405) and the high end 64 bit Power
processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
Unless you are building a kernel for one of the embedded processor
systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
Note that the kernel runs in 32-bit mode even on 64-bit chips.
config PPC_52xx
bool "Freescale 52xx"
config PPC_82xx
bool "Freescale 82xx"
config PPC_83xx
bool "Freescale 83xx"
config 40x
bool "AMCC 40x"
config 44x
bool "AMCC 44x"
config PPC64BRIDGE
select PPC_FPU
bool "POWER3, POWER4 and PPC970 (G5)"
config 8xx
bool "Freescale 8xx"
config E200
bool "Freescale e200"
config E500
bool "Freescale e500"
endchoice
config POWER4_ONLY
bool "Optimize for POWER4"
depends on PPC64 || PPC64BRIDGE
default n
---help---
Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
The resulting binary will not work on POWER3 or RS64 processors
when compiled with binutils 2.15 or later.
config POWER3
bool
depends on PPC64 || PPC64BRIDGE
default y if !POWER4_ONLY
config POWER4
depends on PPC64 || PPC64BRIDGE
def_bool y
config PPC_FPU
bool
default y if PPC64
config BOOKE
bool
depends on E200 || E500
default y
config FSL_BOOKE
bool
depends on E200 || E500
default y
config PTE_64BIT
bool
depends on 44x || E500
default y if 44x
default y if E500 && PHYS_64BIT
config PHYS_64BIT
bool 'Large physical address support' if E500
depends on 44x || E500
default y if 44x
---help---
This option enables kernel support for larger than 32-bit physical
addresses. This features is not be available on all e500 cores.
If in doubt, say N here.
config ALTIVEC
bool "AltiVec Support"
depends on 6xx || POWER4
---help---
This option enables kernel support for the Altivec extensions to the
PowerPC processor. The kernel currently supports saving and restoring
altivec registers, and turning on the 'altivec enable' bit so user
processes can execute altivec instructions.
This option is only usefully if you have a processor that supports
altivec (G4, otherwise known as 74xx series), but does not have
any affect on a non-altivec cpu (it does, however add code to the
kernel).
If in doubt, say Y here.
config SPE
bool "SPE Support"
depends on E200 || E500
---help---
This option enables kernel support for the Signal Processing
Extensions (SPE) to the PowerPC processor. The kernel currently
supports saving and restoring SPE registers, and turning on the
'spe enable' bit so user processes can execute SPE instructions.
This option is only useful if you have a processor that supports
SPE (e500, otherwise known as 85xx series), but does not have any
effect on a non-spe cpu (it does, however add code to the kernel).
If in doubt, say Y here.
config PPC_STD_MMU
bool
depends on 6xx || POWER3 || POWER4 || PPC64
default y
config PPC_STD_MMU_32
def_bool y
depends on PPC_STD_MMU && PPC32
config SMP
depends on PPC_STD_MMU
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y. Note that the kernel does not currently
support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
since they have inadequate hardware support for multiprocessor
operation.
If you say N here, the kernel will run on single and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on single-processor machines.
On a single-processor machine, the kernel will run faster if you say
N here.
If you don't know what to do here, say N.
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 128
depends on SMP
default "32" if PPC64
default "4"
config NOT_COHERENT_CACHE
bool
depends on 4xx || 8xx || E200
default y
endmenu
source "init/Kconfig"
menu "Platform support"
depends on PPC64 || 6xx
choice
prompt "Machine type"
default PPC_MULTIPLATFORM
config PPC_MULTIPLATFORM
bool "Generic desktop/server/laptop"
help
Select this option if configuring for an IBM pSeries or
RS/6000 machine, an Apple machine, or a PReP, CHRP,
Maple or Cell-based machine.
config PPC_ISERIES
bool "IBM Legacy iSeries"
depends on PPC64
config EMBEDDED6xx
bool "Embedded 6xx/7xx/7xxx-based board"
depends on PPC32
config APUS
bool "Amiga-APUS"
depends on PPC32 && BROKEN
help
Select APUS if configuring for a PowerUP Amiga.
More information is available at:
<http://linux-apus.sourceforge.net/>.
endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM && PPC64
bool " IBM pSeries & new (POWER5-based) iSeries"
default y
config PPC_CHRP
bool " Common Hardware Reference Platform (CHRP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
default y
config PPC_PMAC
bool " Apple PowerMac based machines"
depends on PPC_MULTIPLATFORM
default y
config PPC_PMAC64
bool
depends on PPC_PMAC && POWER4
default y
config PPC_PREP
bool " PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
default y
config PPC_MAPLE
depends on PPC_MULTIPLATFORM && PPC64
bool " Maple 970FX Evaluation Board"
select U3_DART
select MPIC_BROKEN_U3
default n
help
This option enables support for the Maple 970FX Evaluation Board.
For more informations, refer to <http://www.970eval.com>
config PPC_BPA
bool " Broadband Processor Architecture"
depends on PPC_MULTIPLATFORM && PPC64
config PPC_OF
bool
depends on PPC_MULTIPLATFORM # for now
default y
config XICS
depends on PPC_PSERIES
bool
default y
config U3_DART
bool
depends on PPC_MULTIPLATFORM && PPC64
default n
config MPIC
depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
bool
default y
config MPIC_BROKEN_U3
bool
depends on PPC_MAPLE
default y
config BPA_IIC
depends on PPC_BPA
bool
default y
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
default y
source "drivers/cpufreq/Kconfig"
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on CPU_FREQ && ADB_PMU && PPC32
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple PowerBooks,
this currently includes some models of iBook & Titanium
PowerBook.
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
depends on 6xx && (PPC_PREP || PPC_PMAC)
help
Some versions of the PPC601 (the first PowerPC chip) have bugs which
mean that extra synchronization instructions are required near
certain instructions, typically those that make major changes to the
CPU state. These extra instructions reduce performance slightly.
If you say N here, these extra instructions will not be included,
resulting in a kernel which will run faster but may not run at all
on some systems with the PPC601 chip.
If in doubt, say Y here.
config TAU
bool "Thermal Management Support"
depends on 6xx
help
G3 and G4 processors have an on-chip temperature sensor called the
'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
temperature within 2-4 degrees Celsius. This option shows the current
on-die temperature in /proc/cpuinfo if the cpu supports it.
Unfortunately, on some chip revisions, this sensor is very inaccurate
and in some cases, does not work at all, so don't assume the cpu
temp is actually what /proc/cpuinfo says it is.
config TAU_INT
bool "Interrupt driven TAU driver (DANGEROUS)"
depends on TAU
---help---
The TAU supports an interrupt driven mode which causes an interrupt
whenever the temperature goes out of range. This is the fastest way
to get notified the temp has exceeded a range. With this option off,
a timer is used to re-check the temperature periodically.
However, on some cpus it appears that the TAU interrupt hardware
is buggy and can cause a situation which would lead unexplained hard
lockups.
Unless you are extending the TAU driver, or enjoy kernel/hardware
debugging, leave this option off.
config TAU_AVERAGE
bool "Average high and low temp"
depends on TAU
---help---
The TAU hardware can compare the temperature to an upper and lower
bound. The default behavior is to show both the upper and lower
bound in /proc/cpuinfo. If the range is large, the temperature is
either changing a lot, or the TAU hardware is broken (likely on some
G4's). If the range is small (around 4 degrees), the temperature is
relatively stable. If you say Y here, a single temperature value,
halfway between the upper and lower bounds, will be reported in
/proc/cpuinfo.
If in doubt, say N here.
endmenu
source arch/powerpc/platforms/embedded6xx/Kconfig
source arch/powerpc/platforms/4xx/Kconfig
source arch/powerpc/platforms/85xx/Kconfig
source arch/powerpc/platforms/8xx/Kconfig
menu "Kernel options"
config HIGHMEM
bool "High memory support"
depends on PPC32
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
source "fs/Kconfig.binfmt"
# We optimistically allocate largepages from the VM, so make the limit
# large enough (16MB). This badly named config option is actually
# max order + 1
config FORCE_MAX_ZONEORDER
int
depends on PPC64
default "13"
config MATH_EMULATION
bool "Math emulation"
depends on 4xx || 8xx || E200 || E500
---help---
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
floating-point instructions in the PowerPC instruction set. If you
say Y here, the kernel will include code to emulate a floating-point
unit, which will allow programs that use floating-point
instructions to run.
config IOMMU_VMERGE
bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
depends on EXPERIMENTAL && PPC64
default n
help
Cause IO segments sent to a device for DMA to be merged virtually
by the IOMMU when they happen to have been allocated contiguously.
This doesn't add pressure to the IOMMU allocator. However, some
drivers don't support getting large merged segments coming back
from *_map_sg(). Say Y if you know the drivers you are using are
properly handling this case.
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
---help---
Say Y here to be able to disable and re-enable individual
CPUs at runtime on SMP machines.
Say N if you are unsure.
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on PPC_MULTIPLATFORM && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
config EMBEDDEDBOOT
bool
depends on 8xx || 8260
default y
config PC_KEYBOARD
bool "PC PS/2 style Keyboard"
depends on 4xx || CPM2
config PPCBUG_NVRAM
bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
default y if PPC_PREP
config IRQ_ALL_CPUS
bool "Distribute interrupts on all CPUs by default"
depends on SMP && !MV64360
help
This option gives the kernel permission to distribute IRQs across
multiple CPUs. Saying N here will route all IRQs to the first
CPU. Generally saying Y is safe, although some problems have been
reported with SMP Power Macintoshes with this option enabled.
source "arch/powerpc/platforms/pseries/Kconfig"
config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on PPC64
config ARCH_FLATMEM_ENABLE
def_bool y
depends on PPC64 && !NUMA
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
depends on SMP && PPC_PSERIES
config ARCH_DISCONTIGMEM_DEFAULT
def_bool y
depends on ARCH_DISCONTIGMEM_ENABLE
config ARCH_FLATMEM_ENABLE
def_bool y
depends on PPC64
config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on ARCH_DISCONTIGMEM_ENABLE
source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
# reside on that node.
#
# This is a relatively temporary hack that should
# be able to go away when sparsemem is fully in
# place
config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
config NUMA
bool "NUMA support"
default y if DISCONTIGMEM || SPARSEMEM
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP
default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with POWER5 cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
config PROC_DEVICETREE
bool "Support for Open Firmware device tree in /proc"
depends on PPC_OF && PROC_FS
help
This option adds a device-tree directory under /proc which contains
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
source "arch/powerpc/platforms/prep/Kconfig"
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
depends on !PPC_ISERIES
config CMDLINE
string "Initial kernel command string"
depends on CMDLINE_BOOL
default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
some command-line options at build time by entering them here. In
most cases you will need to specify the root device here.
if !44x || BROKEN
source kernel/power/Kconfig
endif
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
endmenu
config ISA_DMA_API
bool
default y
menu "Bus options"
config ISA
bool "Support for ISA-bus hardware"
depends on PPC_PREP || PPC_CHRP
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
you have an embedded board, consult your board documentation.
config GENERIC_ISA_DMA
bool
depends on PPC64 || POWER4 || 6xx && !CPM2
default y
config EISA
bool
config SBUS
bool
# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
config MCA
bool
config PCI
bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx
help
Find out whether your system includes a PCI bus. PCI is the name of
a bus system, i.e. the way the CPU talks to the other stuff inside
your box. If you say Y here, the kernel will include drivers and
infrastructure code to support PCI bus devices.
config PCI_DOMAINS
bool
default PCI
config MPC83xx_PCI2
bool " Supprt for 2nd PCI host controller"
depends on PCI && MPC834x
default y if MPC834x_SYS
config PCI_QSPAN
bool "QSpan PCI"
depends on !4xx && !CPM2 && 8xx
help
Say Y here if you have a system based on a Motorola 8xx-series
embedded processor with a QSPAN PCI interface, otherwise say N.
config PCI_8260
bool
depends on PCI && 8260
default y
config 8260_PCI9
bool " Enable workaround for MPC826x erratum PCI 9"
depends on PCI_8260 && !ADS8272
default y
choice
prompt " IDMA channel for PCI 9 workaround"
depends on 8260_PCI9
config 8260_PCI9_IDMA1
bool "IDMA1"
config 8260_PCI9_IDMA2
bool "IDMA2"
config 8260_PCI9_IDMA3
bool "IDMA3"
config 8260_PCI9_IDMA4
bool "IDMA4"
endchoice
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
endmenu
menu "Advanced setup"
depends on PPC32
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
help
This option will enable prompting for a variety of advanced kernel
configuration options. These options can cause the kernel to not
work if they are set incorrectly, but can be used to optimize certain
aspects of kernel memory management.
Unless you know what you are doing, say N here.
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
config HIGHMEM_START_BOOL
bool "Set high memory pool address"
depends on ADVANCED_OPTIONS && HIGHMEM
help
This option allows you to set the base address of the kernel virtual
area used to map high memory pages. This can be useful in
optimizing the layout of kernel virtual memory.
Say N here unless you know what you are doing.
config HIGHMEM_START
hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
default "0xfe000000"
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
depends on ADVANCED_OPTIONS
help
This option allows you to set the maximum amount of memory which
will be used as "low memory", that is, memory which the kernel can
access directly, without having to set up a kernel virtual mapping.
This can be useful in optimizing the layout of kernel virtual
memory.
Say N here unless you know what you are doing.
config LOWMEM_SIZE
hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
default "0x30000000"
config KERNEL_START_BOOL
bool "Set custom kernel base address"
depends on ADVANCED_OPTIONS
help
This option allows you to set the kernel virtual address at which
the kernel will map low memory (the kernel image will be linked at
this address). This can be useful in optimizing the virtual memory
layout of the system.
Say N here unless you know what you are doing.
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
default "0xc0000000"
config TASK_SIZE_BOOL
bool "Set custom user task size"
depends on ADVANCED_OPTIONS
help
This option allows you to set the amount of virtual address space
allocated to user tasks. This can be useful in optimizing the
virtual memory layout of the system.
Say N here unless you know what you are doing.
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000"
config CONSISTENT_START_BOOL
bool "Set custom consistent memory pool address"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the base virtual address
of the the consistent memory pool. This pool of virtual
memory is used to make consistent memory allocations.
config CONSISTENT_START
hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
default "0xff100000" if NOT_COHERENT_CACHE
config CONSISTENT_SIZE_BOOL
bool "Set custom consistent memory pool size"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the size of the the
consistent memory pool. This pool of virtual memory
is used to make consistent memory allocations.
config CONSISTENT_SIZE
hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
default "0x00200000" if NOT_COHERENT_CACHE
config BOOT_LOAD_BOOL
bool "Set the boot link/load address"
depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM
help
This option allows you to set the initial load address of the zImage
or zImage.initrd file. This can be useful if you are on a board
which has a small amount of memory.
Say N here unless you know what you are doing.
config BOOT_LOAD
hex "Link/load address for booting" if BOOT_LOAD_BOOL
default "0x00400000" if 40x || 8xx || 8260
default "0x01000000" if 44x
default "0x00800000"
config PIN_TLB
bool "Pinned Kernel TLBs (860 ONLY)"
depends on ADVANCED_OPTIONS && 8xx
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
# XXX source "arch/ppc/8xx_io/Kconfig"
# XXX source "arch/ppc/8260_io/Kconfig"
source "arch/powerpc/platforms/iseries/Kconfig"
source "lib/Kconfig"
source "arch/powerpc/oprofile/Kconfig"
source "arch/powerpc/Kconfig.debug"
source "security/Kconfig"
config KEYS_COMPAT
bool
depends on COMPAT && KEYS
default y
source "crypto/Kconfig"

Просмотреть файл

@ -0,0 +1,73 @@
menu "Kernel hacking"
source "lib/Kconfig.debug"
config KGDB
bool "Include kgdb kernel debugger"
depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx)
select DEBUG_INFO
help
Include in-kernel hooks for kgdb, the Linux kernel source level
debugger. See <http://kgdb.sourceforge.net/> for more information.
Unless you are intending to debug the kernel, say N here.
choice
prompt "Serial Port"
depends on KGDB
default KGDB_TTYS1
config KGDB_TTYS0
bool "ttyS0"
config KGDB_TTYS1
bool "ttyS1"
config KGDB_TTYS2
bool "ttyS2"
config KGDB_TTYS3
bool "ttyS3"
endchoice
config KGDB_CONSOLE
bool "Enable serial console thru kgdb port"
depends on KGDB && 8xx || CPM2
help
If you enable this, all serial console messages will be sent
over the gdb stub.
If unsure, say N.
config XMON
bool "Include xmon kernel debugger"
depends on DEBUG_KERNEL
help
Include in-kernel hooks for the xmon kernel monitor/debugger.
Unless you are intending to debug the kernel, say N here.
config BDI_SWITCH
bool "Include BDI-2000 user context switcher"
depends on DEBUG_KERNEL
help
Include in-kernel support for the Abatron BDI2000 debugger.
Unless you are intending to debug the kernel with one of these
machines, say N here.
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
depends PPC_OF
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
config SERIAL_TEXT_DEBUG
bool "Support for early boot texts over serial port"
depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
PPC_GEN550 || PPC_MPC52xx
config PPC_OCP
bool
depends on IBM_OCP || XILINX_OCP
default y
endmenu

222
arch/powerpc/Makefile Normal file
Просмотреть файл

@ -0,0 +1,222 @@
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies. Remember to do have actions
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1994 by Linus Torvalds
# Changes for PPC by Gary Thomas
# Rewritten by Cort Dougan and Paul Mackerras
#
# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
KERNELLOAD := $(CONFIG_KERNEL_START)
HAS_BIARCH := $(call cc-option-yn, -m32)
ifeq ($(CONFIG_PPC64),y)
SZ := 64
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
CROSS32CC := $(CROSS32_COMPILE)gcc
CROSS32AS := $(CROSS32_COMPILE)as
CROSS32LD := $(CROSS32_COMPILE)ld
CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy
ifeq ($(HAS_BIARCH),y)
ifeq ($(CROSS32_COMPILE),)
CROSS32CC := $(CC) -m32
CROSS32AS := $(AS) -a32
CROSS32LD := $(LD) -m elf32ppc
CROSS32OBJCOPY := $(OBJCOPY)
endif
endif
export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
ifeq ($(new_nm),y)
NM := $(NM) --synthetic
endif
else
SZ := 32
endif
ifeq ($(HAS_BIARCH),y)
override AS += -a$(SZ)
override LD += -m elf$(SZ)ppc
override CC += -m$(SZ)
endif
LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
# The -Iarch/$(ARCH)/include is temporary while we are merging
CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
AFLAGS += -Iarch/$(ARCH)
CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
ifeq ($(CONFIG_PPC64),y)
CFLAGS += -mminimal-toc -mtraceback=none -mcall-aixdesc
else
CFLAGS += -ffixed-r2 -mmultiple
endif
CPP = $(CC) -E $(CFLAGS)
# Temporary hack until we have migrated to asm-powerpc
LINUXINCLUDE += -Iarch/$(ARCH)/include
CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
ifeq ($(CONFIG_PPC64),y)
GCC_VERSION := $(call cc-version)
GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
ifeq ($(CONFIG_POWER4_ONLY),y)
ifeq ($(CONFIG_ALTIVEC),y)
ifeq ($(GCC_BROKEN_VEC),y)
CFLAGS += $(call cc-option,-mcpu=970)
else
CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
CFLAGS += $(call cc-option,-mtune=power4)
endif
endif
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
CFLAGS += $(call cc-option,-funit-at-a-time)
ifndef CONFIG_FSL_BOOKE
CFLAGS += -mstring
endif
cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
cpu-as-$(CONFIG_E500) += -Wa,-me500
cpu-as-$(CONFIG_E200) += -Wa,-me200
AFLAGS += $(cpu-as-y)
CFLAGS += $(cpu-as-y)
# Default to the common case.
KBUILD_DEFCONFIG := common_defconfig
head-y := arch/powerpc/kernel/head.o
head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
ifeq ($(CONFIG_PPC32),y)
head-$(CONFIG_6xx) += arch/powerpc/kernel/idle_6xx.o
head-$(CONFIG_POWER4) += arch/powerpc/kernel/idle_power4.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
endif
core-y += arch/powerpc/kernel/ \
arch/powerpc/mm/ \
arch/powerpc/lib/ \
arch/powerpc/sysdev/
core-$(CONFIG_PPC32) += arch/ppc/kernel/ \
arch/ppc/syslib/
core-$(CONFIG_PPC64) += arch/ppc64/kernel/
core-$(CONFIG_PPC_PMAC) += arch/powerpc/platforms/powermac/
core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/
core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/
core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_APUS) += arch/ppc/amiga/
drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
.PHONY: $(BOOT_TARGETS)
all: uImage zImage
CPPFLAGS_vmlinux.lds := -Upowerpc
# All the instructions talk about "make bzImage".
bzImage: zImage
boot := arch/$(ARCH)/boot
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot)/images $(boot)/images/$@
define archhelp
@echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
@echo ' uImage - Create a bootable image for U-Boot / PPCBoot'
@echo ' install - Install kernel using'
@echo ' (your) ~/bin/installkernel or'
@echo ' (distribution) /sbin/installkernel or'
@echo ' install to $$(INSTALL_PATH) and run lilo'
@echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs'
endef
archclean:
$(Q)$(MAKE) $(clean)=arch/ppc/boot
# Temporary hack until we have migrated to asm-powerpc
$(Q)rm -rf arch/$(ARCH)/include
archprepare: checkbin
# Temporary hack until we have migrated to asm-powerpc
ifeq ($(CONFIG_PPC64),y)
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-ppc64 arch/$(ARCH)/include/asm
else
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-ppc arch/$(ARCH)/include/asm
endif
# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
# to stdout and these checks are run even on install targets.
TOUT := .tmp_gas_check
# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
# instructions.
# gcc-3.4 and binutils-2.14 are a fatal combination.
GCC_VERSION := $(call cc-version)
checkbin:
@if test "$(GCC_VERSION)" = "0304" ; then \
if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
echo 'correctly with gcc-3.4 and your version of binutils.'; \
echo '*** Please upgrade your binutils or downgrade your gcc'; \
false; \
fi ; \
fi
@if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
echo 'correctly with old versions of binutils.' ; \
echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
false ; \
fi
CLEAN_FILES += $(TOUT)

Просмотреть файл

@ -0,0 +1,18 @@
#
# Makefile for the linux kernel.
#
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra_$(CONFIG_PPC64) := head_64.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds
obj-y := semaphore.o traps.o process.o
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o

Просмотреть файл

@ -0,0 +1,262 @@
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/suspend.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/hardirq.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/iSeries/HvLpEvent.h>
#include <asm/rtas.h>
#include <asm/cache.h>
#include <asm/systemcfg.h>
#include <asm/compat.h>
#endif
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* thread struct on stack */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#ifdef CONFIG_PPC32
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
#endif
#ifdef CONFIG_PPC64
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
DEFINE(THREAD_SHIFT, THREAD_SHIFT);
#endif
DEFINE(THREAD_SIZE, THREAD_SIZE);
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
DEFINE(PT_PTRACED, PT_PTRACED);
#endif
#ifdef CONFIG_PPC64
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#endif
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
#ifndef CONFIG_PPC64
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
#else
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
/* 288 = # of volatile regs, int & fp, for leaf routines */
/* which do not stack a frame. See the PPC64 ABI. */
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
#endif
/* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
/* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR
* SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs
* for such processors. For critical interrupts we use them to
* hold SRR0 and SRR1.
*/
DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
#ifdef CONFIG_PPC64
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
/* RTAS */
DEFINE(RTASBASE, offsetof(struct rtas_t, base));
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
/* systemcfg offsets for use by vdso */
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
/* timeval/timezone offsets for use by vdso */
DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
#endif
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
return 0;
}

133
arch/powerpc/kernel/fpu.S Normal file
Просмотреть файл

@ -0,0 +1,133 @@
/*
* FPU support code, moved here from head.S so that it can be used
* by chips which use other head-whatever.S files.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
*/
.globl load_up_fpu
load_up_fpu:
mfmsr r5
ori r5,r5,MSR_FP
#ifdef CONFIG_PPC64BRIDGE
clrldi r5,r5,1 /* turn off 64-bit mode */
#endif /* CONFIG_PPC64BRIDGE */
SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
/*
* For SMP, we don't do lazy FPU switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_fpu in switch_to.
*/
#ifndef CONFIG_SMP
tophys(r6,0) /* get __pa constant */
addis r3,r6,last_task_used_math@ha
lwz r4,last_task_used_math@l(r3)
cmpwi 0,r4,0
beq 1f
add r4,r4,r6
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r4)
lwz r5,PT_REGS(r4)
add r5,r5,r6
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
lfd fr0,THREAD_FPSCR-4(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
sub r4,r4,r6
stw r4,last_task_used_math@l(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
b fast_exception_return
/*
* FP unavailable trap from kernel - print a message, but let
* the task use FP in the kernel until it returns to user mode.
*/
.globl KernelFP
KernelFP:
lwz r3,_MSR(r1)
ori r3,r3,MSR_FP
stw r3,_MSR(r1) /* enable use of FP after return */
lis r3,86f@h
ori r3,r3,86f@l
mr r4,r2 /* current */
lwz r5,_NIP(r1)
bl printk
b ret_from_except
86: .string "floating point used in kernel (task=%p, pc=%x)\n"
.align 4,0
/*
* giveup_fpu(tsk)
* Disable FP for the task given as the argument,
* and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
*/
.globl giveup_fpu
giveup_fpu:
mfmsr r5
ori r5,r5,MSR_FP
SYNC_601
ISYNC_601
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
cmpwi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpwi 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r3)
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r3 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
lis r4,last_task_used_math@ha
stw r5,last_task_used_math@l(r4)
#endif /* CONFIG_SMP */
blr

1545
arch/powerpc/kernel/head.S Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,778 @@
/*
* arch/ppc/kernel/head_44x.S
*
* Kernel execution entry point code.
*
* Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
* Initial PowerPC version.
* Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
* Rewritten for PReP
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Low-level exception handers, MMU support, and rewrite.
* Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
* PowerPC 8xx modifications.
* Copyright (c) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
* PowerPC 403GCX/405GP modifications.
* Copyright 2000 MontaVista Software Inc.
* PPC405 modifications
* PowerPC 403GCX/405GP modifications.
* Author: MontaVista Software, Inc.
* frank_rowand@mvista.com or source@mvista.com
* debbie_chu@mvista.com
* Copyright 2002-2005 MontaVista Software, Inc.
* PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/ibm4xx.h>
#include <asm/ibm44x.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
* optional, information:
*
* r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
* r4 - Starting address of the init RAM disk
* r5 - Ending address of the init RAM disk
* r6 - Start of kernel command line string (e.g. "mem=128")
* r7 - End of kernel command line string
*
*/
.text
_GLOBAL(_stext)
_GLOBAL(_start)
/*
* Reserve a word at a fixed location to store the address
* of abatron_pteptrs
*/
nop
/*
* Save parameters we are passed
*/
mr r31,r3
mr r30,r4
mr r29,r5
mr r28,r6
mr r27,r7
li r24,0 /* CPU number */
/*
* Set up the initial MMU state
*
* We are still executing code at the virtual address
* mappings set by the firmware for the base of RAM.
*
* We first invalidate all TLB entries but the one
* we are running from. We then load the KERNELBASE
* mappings so we can begin to use kernel addresses
* natively and so the interrupt vector locations are
* permanently pinned (necessary since Book E
* implementations always have translation enabled).
*
* TODO: Use the known TLB entry we are running from to
* determine which physical region we are located
* in. This can be used to determine where in RAM
* (on a shared CPU system) or PCI memory space
* (on a DRAMless system) we are located.
* For now, we assume a perfect world which means
* we are located at the base of DRAM (physical 0).
*/
/*
* Search TLB for entry that we are currently using.
* Invalidate all entries but the one we are using.
*/
/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
mfspr r3,SPRN_PID /* Get PID */
mfmsr r4 /* Get MSR */
andi. r4,r4,MSR_IS@l /* TS=1? */
beq wmmucr /* If not, leave STS=0 */
oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
sync
bl invstr /* Find our address */
invstr: mflr r5 /* Make it accessible */
tlbsx r23,0,r5 /* Find entry we are in */
li r4,0 /* Start at TLB entry 0 */
li r3,0 /* Set PAGEID inval value */
1: cmpw r23,r4 /* Is this our entry? */
beq skpinv /* If so, skip the inval */
tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
skpinv: addi r4,r4,1 /* Increment */
cmpwi r4,64 /* Are we done? */
bne 1b /* If not, repeat */
isync /* If so, context change */
/*
* Configure and load pinned entry into TLB slot 63.
*/
lis r3,KERNELBASE@h /* Load the kernel virtual address */
ori r3,r3,KERNELBASE@l
/* Kernel is at the base of RAM */
li r4, 0 /* Load the kernel physical address */
/* Load the kernel PID = 0 */
li r0,0
mtspr SPRN_PID,r0
sync
/* Initialize MMUCR */
li r5,0
mtspr SPRN_MMUCR,r5
sync
/* pageid fields */
clrrwi r3,r3,10 /* Mask off the effective page number */
ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
/* xlat fields */
clrrwi r4,r4,10 /* Mask off the real page number */
/* ERPN is 0 for first 4GB page */
/* attrib fields */
/* Added guarded bit to protect against speculative loads/stores */
li r5,0
ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
li r0,63 /* TLB slot 63 */
tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
/* Force context change */
mfmsr r0
mtspr SPRN_SRR1, r0
lis r0,3f@h
ori r0,r0,3f@l
mtspr SPRN_SRR0,r0
sync
rfi
/* If necessary, invalidate original entry we used */
3: cmpwi r23,63
beq 4f
li r6,0
tlbwe r6,r23,PPC44x_TLB_PAGEID
isync
4:
#ifdef CONFIG_SERIAL_TEXT_DEBUG
/*
* Add temporary UART mapping for early debug.
* We can map UART registers wherever we want as long as they don't
* interfere with other system mappings (e.g. with pinned entries).
* For an example of how we handle this - see ocotea.h. --ebs
*/
/* pageid fields */
lis r3,UART0_IO_BASE@h
ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
/* xlat fields */
lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
#ifndef CONFIG_440EP
ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
#endif
/* attrib fields */
li r5,0
ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
li r0,0 /* TLB slot 0 */
tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
/* Force context change */
isync
#endif /* CONFIG_SERIAL_TEXT_DEBUG */
/* Establish the interrupt vector offsets */
SET_IVOR(0, CriticalInput);
SET_IVOR(1, MachineCheck);
SET_IVOR(2, DataStorage);
SET_IVOR(3, InstructionStorage);
SET_IVOR(4, ExternalInput);
SET_IVOR(5, Alignment);
SET_IVOR(6, Program);
SET_IVOR(7, FloatingPointUnavailable);
SET_IVOR(8, SystemCall);
SET_IVOR(9, AuxillaryProcessorUnavailable);
SET_IVOR(10, Decrementer);
SET_IVOR(11, FixedIntervalTimer);
SET_IVOR(12, WatchdogTimer);
SET_IVOR(13, DataTLBError);
SET_IVOR(14, InstructionTLBError);
SET_IVOR(15, Debug);
/* Establish the interrupt vector base */
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
mtspr SPRN_IVPR,r4
#ifdef CONFIG_440EP
/* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
mfspr r2,SPRN_CCR0
lis r3,0xffef
ori r3,r3,0xffff
and r2,r2,r3
mtspr SPRN_CCR0,r2
isync
#endif
/*
* This is where the main kernel code starts.
*/
/* ptr to current */
lis r2,init_task@h
ori r2,r2,init_task@l
/* ptr to current thread */
addi r4,r2,THREAD /* init task's THREAD */
mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@h
ori r1,r1,init_thread_union@l
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
bl early_init
/*
* Decide what sort of machine this is and initialize the MMU.
*/
mr r3,r31
mr r4,r30
mr r5,r29
mr r6,r28
mr r7,r27
bl machine_init
bl MMU_init
/* Setup PTE pointers for the Abatron bdiGDB */
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
lis r4, KERNELBASE@h
ori r4, r4, KERNELBASE@l
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5)
/* Let's move on */
lis r4,start_kernel@h
ori r4,r4,start_kernel@l
lis r3,MSR_KERNEL@h
ori r3,r3,MSR_KERNEL@l
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
rfi /* change context and jump to start_kernel */
/*
* Interrupt vector entry code
*
* The Book E MMUs are always on so we don't need to handle
* interrupts in real mode as with previous PPC processors. In
* this case we handle interrupts in the kernel virtual address
* space.
*
* Interrupt vectors are dynamically placed relative to the
* interrupt prefix as determined by the address of interrupt_base.
* The interrupt vectors offsets are programmed using the labels
* for each interrupt vector entry.
*
* Interrupt vectors must be aligned on a 16 byte boundary.
* We align on a 32 byte cache line boundary for good measure.
*/
interrupt_base:
/* Critical Input Interrupt */
CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
/* Machine Check Interrupt */
#ifdef CONFIG_440A
MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
#else
CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
#endif
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRN_SPRG7W, r11
/*
* Check if it was a store fault, if not then bail
* because a user tried to access a kernel or
* read-protected page. Otherwise, get the
* offending address and handle it.
*/
mfspr r10, SPRN_ESR
andis. r10, r10, ESR_ST@h
beq 2f
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
lis r11, TASK_SIZE@h
cmplw r10, r11
blt+ 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
mfspr r12,SPRN_MMUCR
rlwinm r12,r12,0,0,23 /* Clear TID */
b 4f
/* Get the PGD for the current thread */
3:
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
mfspr r12,SPRN_MMUCR /* Get MMUCR */
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
4:
mtspr SPRN_MMUCR,r12
rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
lwz r11, 4(r12) /* Get pte entry */
andi. r13, r11, _PAGE_RW /* Is it writeable? */
beq 2f /* Bail if not */
/* Update 'changed'.
*/
ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
stw r11, 4(r12) /* Update Linux page table */
li r13, PPC44x_TLB_SR@l /* Set SR */
rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
and r12, r12, r11 /* HWEXEC/RW & USER */
rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
rlwimi r11,r13,0,26,31 /* Insert static perms */
rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
/* find the TLB index that caused the fault. It has to be here. */
tlbsx r10, 0, r10
tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
*/
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
rfi /* Force context change */
2:
/*
* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b data_access
/* Instruction Storage Interrupt */
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
/* Program Interrupt */
PROGRAM_EXCEPTION
/* Floating Point Unavailable Interrupt */
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
NORMAL_EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxillary Processor Unavailable Interrupt */
EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
#ifdef CONFIG_BOOKE_WDT
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
#else
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException)
#endif
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
lis r11, TASK_SIZE@h
cmplw r10, r11
blt+ 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
mfspr r12,SPRN_MMUCR
rlwinm r12,r12,0,0,23 /* Clear TID */
b 4f
/* Get the PGD for the current thread */
3:
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
mfspr r12,SPRN_MMUCR
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
4:
mtspr SPRN_MMUCR,r12
rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
lwz r11, 4(r12) /* Get pte entry */
andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
beq 2f /* Bail if not present */
ori r11, r11, _PAGE_ACCESSED
stw r11, 4(r12)
/* Jump to common tlb load */
b finish_tlb_load
2:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b data_access
/* Instruction TLB Error Interrupt */
/*
* Nearly the same as above, except we get our
* information from different registers and bailout
* to a different point.
*/
START_EXCEPTION(InstructionTLBError)
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
lis r11, TASK_SIZE@h
cmplw r10, r11
blt+ 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
mfspr r12,SPRN_MMUCR
rlwinm r12,r12,0,0,23 /* Clear TID */
b 4f
/* Get the PGD for the current thread */
3:
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
mfspr r12,SPRN_MMUCR
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
4:
mtspr SPRN_MMUCR,r12
rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
lwz r11, 4(r12) /* Get pte entry */
andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
beq 2f /* Bail if not present */
ori r11, r11, _PAGE_ACCESSED
stw r11, 4(r12)
/* Jump to common TLB load point */
b finish_tlb_load
2:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b InstructionStorage
/* Debug Interrupt */
DEBUG_EXCEPTION
/*
* Local functions
*/
/*
* Data TLB exceptions will bail out to this point
* if they can't resolve the lightweight TLB fault.
*/
data_access:
NORMAL_EXCEPTION_PROLOG
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
EXC_XFER_EE_LITE(0x0300, handle_page_fault)
/*
* Both the instruction and data TLB miss get to this
* point to load the TLB.
* r10 - EA of fault
* r11 - available to use
* r12 - Pointer to the 64-bit PTE
* r13 - available to use
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
finish_tlb_load:
/*
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
/* Load the next available TLB index */
lis r13, tlb_44x_index@ha
lwz r13, tlb_44x_index@l(r13)
/* Load the TLB high watermark */
lis r11, tlb_44x_hwater@ha
lwz r11, tlb_44x_hwater@l(r11)
/* Increment, rollover, and store TLB index */
addi r13, r13, 1
cmpw 0, r13, r11 /* reserve entries */
ble 7f
li r13, 0
7:
/* Store the next available TLB index */
lis r11, tlb_44x_index@ha
stw r13, tlb_44x_index@l(r11)
lwz r11, 0(r12) /* Get MS word of PTE */
lwz r12, 4(r12) /* Get LS word of PTE */
rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
/*
* Create PAGEID. This is the faulting address,
* page size, and valid flag.
*/
li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
li r10, PPC44x_TLB_SR@l /* Set SR */
rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
and r11, r12, r11 /* HWEXEC & USER */
rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
*/
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
rfi /* Force context change */
/*
* Global functions
*/
/*
* extern void giveup_altivec(struct task_struct *prev)
*
* The 44x core does not have an AltiVec unit.
*/
_GLOBAL(giveup_altivec)
blr
/*
* extern void giveup_fpu(struct task_struct *prev)
*
* The 44x core does not have an FPU.
*/
#ifndef CONFIG_PPC_FPU
_GLOBAL(giveup_fpu)
blr
#endif
/*
* extern void abort(void)
*
* At present, this routine just applies a system reset.
*/
_GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR0_RST_SYSTEM@h
mtspr SPRN_DBCR0,r13
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is the second parameter.
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r4, 0x4(r5)
#endif
mtspr SPRN_PID,r3
isync /* Force context change */
blr
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
_GLOBAL(sdata)
_GLOBAL(empty_zero_page)
.space 4096
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
*/
_GLOBAL(swapper_pg_dir)
.space 8192
/* Reserved 4k for the critical exception stack & 4k for the machine
* check stack per CPU for kernel mode exceptions */
.section .bss
.align 12
exception_stack_bottom:
.space BOOKE_EXCEPTION_STACK_SIZE
_GLOBAL(exception_stack_top)
/*
* This space gets a copy of optional info passed to us by the bootstrap
* which is used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
_GLOBAL(cmd_line)
.space 512
/*
* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
abatron_pteptrs:
.space 8

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,860 @@
/*
* arch/ppc/kernel/except_8xx.S
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Low-level exception handlers and MMU support
* rewritten by Paul Mackerras.
* Copyright (C) 1996 Paul Mackerras.
* MPC8xx modifications by Dan Malek
* Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
*
* This file contains low-level support and setup for PowerPC 8xx
* embedded processors, including trap and interrupt dispatch.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
#define DO_8xx_CPU6(val, reg) \
li reg, val; \
stw reg, 12(r0); \
lwz reg, 12(r0);
#else
#define DO_8xx_CPU6(val, reg)
#endif
.text
.globl _stext
_stext:
.text
.globl _start
_start:
/* MPC8xx
* This port was done on an MBX board with an 860. Right now I only
* support an ELF compressed (zImage) boot from EPPC-Bug because the
* code there loads up some registers before calling us:
* r3: ptr to board info data
* r4: initrd_start or if no initrd then 0
* r5: initrd_end - unused if r4 is 0
* r6: Start of command line string
* r7: End of command line string
*
* I decided to use conditional compilation instead of checking PVR and
* adding more processor specific branches around code I don't need.
* Since this is an embedded processor, I also appreciate any memory
* savings I can get.
*
* The MPC8xx does not have any BATs, but it supports large page sizes.
* We first initialize the MMU to support 8M byte pages, then load one
* entry into each of the instruction and data TLBs to map the first
* 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
* the "internal" processor registers before MMU_init is called.
*
* The TLB code currently contains a major hack. Since I use the condition
* code register, I have to save and restore it. I am out of registers, so
* I just store it in memory location 0 (the TLB handlers are not reentrant).
* To avoid making any decisions, I need to use the "segment" valid bit
* in the first level table, but that would require many changes to the
* Linux page directory/table functions that I don't want to do right now.
*
* I used to use SPRG2 for a temporary register in the TLB handler, but it
* has since been put to other uses. I now use a hack to save a register
* and the CCR at memory location 0.....Someday I'll fix this.....
* -- Dan
*/
.globl __start
__start:
mr r31,r3 /* save parameters */
mr r30,r4
mr r29,r5
mr r28,r6
mr r27,r7
/* We have to turn on the MMU right away so we get cache modes
* set correctly.
*/
bl initial_mmu
/* We now have the lower 8 Meg mapped into TLB entries, and the caches
* ready to work.
*/
turn_on_mmu:
mfmsr r0
ori r0,r0,MSR_DR|MSR_IR
mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
mtspr SPRN_SRR0,r0
SYNC
rfi /* enables MMU */
/*
* Exception entry code. This code runs with address translation
* turned off, i.e. using physical addresses.
* We assume sprg3 has the physical address of the current
* task's thread_struct.
*/
#define EXCEPTION_PROLOG \
mtspr SPRN_SPRG0,r10; \
mtspr SPRN_SPRG1,r11; \
mfcr r10; \
EXCEPTION_PROLOG_1; \
EXCEPTION_PROLOG_2
#define EXCEPTION_PROLOG_1 \
mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
andi. r11,r11,MSR_PR; \
tophys(r11,r1); /* use tophys(r1) if kernel */ \
beq 1f; \
mfspr r11,SPRN_SPRG3; \
lwz r11,THREAD_INFO-THREAD(r11); \
addi r11,r11,THREAD_SIZE; \
tophys(r11,r11); \
1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
#define EXCEPTION_PROLOG_2 \
CLR_TOP32(r11); \
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r12,SPRN_SRR0; \
mfspr r9,SPRN_SRR1; \
stw r1,GPR1(r11); \
stw r1,0(r11); \
tovirt(r1,r11); /* set new kernel sp */ \
li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
MTMSRD(r10); /* (except for mach check in rtas) */ \
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r11, r12 (SRR0), and r9 (SRR1).
*
* Note2: once we have set r1 we are in a position to take exceptions
* again, and we could thus set MSR:RI at that point.
*/
/*
* Exception vectors.
*/
#define EXCEPTION(n, label, hdlr, xfer) \
. = n; \
label: \
EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
xfer(n, hdlr)
#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
li r10,trap; \
stw r10,TRAP(r11); \
li r10,MSR_KERNEL; \
copyee(r10, r9); \
bl tfer; \
i##n: \
.long hdlr; \
.long ret
#define COPY_EE(d, s) rlwimi d,s,0,16,16
#define NOCOPY(d, s)
#define EXC_XFER_STD(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
ret_from_except)
#define EXC_XFER_EE(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_EE_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
ret_from_except)
/* System reset */
EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
/* Machine check */
. = 0x200
MachineCheck:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0x200, MachineCheckException)
/* Data access exception.
* This is "never generated" by the MPC8xx. We jump to it for other
* translation errors.
*/
. = 0x300
DataAccess:
EXCEPTION_PROLOG
mfspr r10,SPRN_DSISR
stw r10,_DSISR(r11)
mr r5,r10
mfspr r4,SPRN_DAR
EXC_XFER_EE_LITE(0x300, handle_page_fault)
/* Instruction access exception.
* This is "never generated" by the MPC8xx. We jump to it for other
* translation errors.
*/
. = 0x400
InstructionAccess:
EXCEPTION_PROLOG
mr r4,r12
mr r5,r9
EXC_XFER_EE_LITE(0x400, handle_page_fault)
/* External interrupt */
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
/* Alignment exception */
. = 0x600
Alignment:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE(0x600, AlignmentException)
/* Program check exception */
EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
/* No FPU on MPC8xx. This exception is not supposed to happen.
*/
EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD)
/* Decrementer */
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
/* System call */
. = 0xc00
SystemCall:
EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0xc00, DoSyscall)
/* Single step - not used on 601 */
EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE)
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
. = 0x1100
/*
* For the MPC8xx, this is a software tablewalk to load the instruction
* TLB. It is modelled after the example in the Motorola manual. The task
* switch loads the M_TWB register with the pointer to the first level table.
* If we discover there is no second level table (value is zero) or if there
* is an invalid pte, we load that into the TLB, which causes another fault
* into the TLB Error interrupt where we can handle such problems.
* We have to use the MD_xxx registers for the tablewalk because the
* equivalent MI_xxx registers only perform the attribute functions.
*/
InstructionTLBMiss:
#ifdef CONFIG_8xx_CPU6
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
DO_8xx_CPU6(0x3780, r3)
mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
beq 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19
3:
lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
/* We have a pte table, so load the MI_TWC with the attributes
* for this "segment."
*/
ori r11,r11,1 /* Set valid bit */
DO_8xx_CPU6(0x2b80, r3)
mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
DO_8xx_CPU6(0x3b80, r3)
mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
ori r10, r10, _PAGE_ACCESSED
stw r10, 0(r11)
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 21, 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
2: li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x2d80, r3)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
rfi
. = 0x1200
DataStoreTLBMiss:
#ifdef CONFIG_8xx_CPU6
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
andi. r11, r10, 0x0800
beq 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19
3:
lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
/* We have a pte table, so load fetch the pte from the table.
*/
ori r11, r11, 1 /* Set valid bit in physical L2 page */
DO_8xx_CPU6(0x3b80, r3)
mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r10) /* Get the pte */
/* Insert the Guarded flag into the TWC from the Linux PTE.
* It is bit 27 of both the Linux PTE and the TWC (at least
* I got that right :-). It will be better when we can put
* this into the Linux pgd/pmd and load it in the operation
* above.
*/
rlwimi r11, r10, 0, 27, 27
DO_8xx_CPU6(0x3b80, r3)
mtspr SPRN_MD_TWC, r11
mfspr r11, SPRN_MD_TWC /* get the pte address again */
ori r10, r10, _PAGE_ACCESSED
stw r10, 0(r11)
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 21, 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
2: li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
rfi
/* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction
* addresses. There is nothing to do but handle a big time error fault.
*/
. = 0x1300
InstructionTLBError:
b InstructionAccess
/* This is the data TLB error on the MPC8xx. This could be due to
* many reasons, including a dirty update to a pte. We can catch that
* one here, but anything else is an error. First, we track down the
* Linux pte. If it is valid, write access is allowed, but the
* page dirty bit is not set, we will set it and reload the TLB. For
* any other case, we bail out to a higher level function that can
* handle it.
*/
. = 0x1400
DataTLBError:
#ifdef CONFIG_8xx_CPU6
stw r3, 8(r0)
#endif
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
stw r10, 0(r0)
stw r11, 4(r0)
/* First, make sure this was a store operation.
*/
mfspr r10, SPRN_DSISR
andis. r11, r10, 0x0200 /* If set, indicates store op */
beq 2f
/* The EA of a data TLB miss is automatically stored in the MD_EPN
* register. The EA of a data TLB error is automatically stored in
* the DAR, but not the MD_EPN register. We must copy the 20 most
* significant bits of the EA from the DAR to MD_EPN before we
* start walking the page tables. We also need to copy the CASID
* value from the M_CASID register.
* Addendum: The EA of a data TLB error is _supposed_ to be stored
* in DAR, but it seems that this doesn't happen in some cases, such
* as when the error is due to a dcbi instruction to a page with a
* TLB that doesn't have the changed bit set. In such cases, there
* does not appear to be any way to recover the EA of the error
* since it is neither in DAR nor MD_EPN. As a workaround, the
* _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
* are initialized in mapin_ram(). This will avoid the problem,
* assuming we only use the dcbi instruction on kernel addresses.
*/
mfspr r10, SPRN_DAR
rlwinm r11, r10, 0, 0, 19
ori r11, r11, MD_EVALID
mfspr r10, SPRN_M_CASID
rlwimi r11, r10, 0, 28, 31
DO_8xx_CPU6(0x3780, r3)
mtspr SPRN_MD_EPN, r11
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
andi. r11, r10, 0x0800
beq 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19
3:
lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, bail */
/* We have a pte table, so fetch the pte from the table.
*/
ori r11, r11, 1 /* Set valid bit in physical L2 page */
DO_8xx_CPU6(0x3b80, r3)
mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
andi. r11, r10, _PAGE_RW /* Is it writeable? */
beq 2f /* Bail out if not */
/* Update 'changed', among others.
*/
ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
mfspr r11, SPRN_MD_TWC /* Get pte address again */
stw r10, 0(r11) /* and update pte in table */
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 21, 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
rfi
2:
mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
b DataAccess
EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
/* On the MPC8xx, these next four traps are used for development
* support of breakpoints and such. Someday I will get around to
* using them.
*/
EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
. = 0x2000
.globl giveup_fpu
giveup_fpu:
blr
/*
* This is where the main kernel code starts.
*/
start_here:
/* ptr to current */
lis r2,init_task@h
ori r2,r2,init_task@l
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG3,r4
li r3,0
mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
bl early_init /* We have to do this with MMU on */
/*
* Decide what sort of machine this is and initialize the MMU.
*/
mr r3,r31
mr r4,r30
mr r5,r29
mr r6,r28
mr r7,r27
bl machine_init
bl MMU_init
/*
* Go back to running unmapped so we can load up new values
* and change to using our exception vectors.
* On the 8xx, all we have to do is invalidate the TLB to clear
* the old 8M byte TLB mappings and load the page table base register.
*/
/* The right way to do this would be to track it down through
* init's THREAD like the context switch code does, but this is
* easier......until someone changes init's static structures.
*/
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
tophys(r6,r6)
#ifdef CONFIG_8xx_CPU6
lis r4, cpu6_errata_word@h
ori r4, r4, cpu6_errata_word@l
li r3, 0x3980
stw r3, 12(r4)
lwz r3, 12(r4)
#endif
mtspr SPRN_M_TWB, r6
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
rfi
/* Load up the kernel context */
2:
SYNC /* Force all PTE updates to finish */
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
TLBSYNC /* ... on all CPUs */
/* set up the PTE pointers for the Abatron bdiGDB.
*/
tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(r0) /* Must match your Abatron config file */
tophys(r5,r5)
stw r6, 0(r5)
/* Now turn on the MMU for real! */
li r4,MSR_KERNEL
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
rfi /* enable MMU and jump to start_kernel */
/* Set up the initial MMU state so we can do the first level of
* kernel initialization. This maps the first 8 MBytes of memory 1:1
* virtual to physical. Also, set the cache mode since that is defined
* by TLB entries and perform any additional mapping (like of the IMMR).
* If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
* 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
* these mappings is mapped by page tables.
*/
initial_mmu:
tlbia /* Invalidate all TLB entries */
#ifdef CONFIG_PIN_TLB
lis r8, MI_RSV4I@h
ori r8, r8, 0x1c00
#else
li r8, 0
#endif
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
#ifdef CONFIG_PIN_TLB
lis r10, (MD_RSV4I | MD_RESETVAL)@h
ori r10, r10, 0x1c00
mr r8, r10
#else
lis r10, MD_RESETVAL@h
#endif
#ifndef CONFIG_8xx_COPYBACK
oris r10, r10, MD_WTDEF@h
#endif
mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
/* Now map the lower 8 Meg into the TLBs. For this quick hack,
* we can load the instruction and data TLB registers with the
* same values.
*/
lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8
mtspr SPRN_MD_EPN, r8
li r8, MI_PS8MEG /* Set 8M byte page */
ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8
mtspr SPRN_MD_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
mtspr SPRN_MD_RPN, r8
lis r8, MI_Kp@h /* Set the protection mode */
mtspr SPRN_MI_AP, r8
mtspr SPRN_MD_AP, r8
/* Map another 8 MByte at the IMMR to get the processor
* internal registers (among other things).
*/
#ifdef CONFIG_PIN_TLB
addi r10, r10, 0x0100
mtspr SPRN_MD_CTR, r10
#endif
mfspr r9, 638 /* Get current IMMR */
andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
mr r8, r9 /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
li r8, MD_PS8MEG /* Set 8M byte page */
ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
mtspr SPRN_MD_RPN, r8
#ifdef CONFIG_PIN_TLB
/* Map two more 8M kernel data pages.
*/
addi r10, r10, 0x0100
mtspr SPRN_MD_CTR, r10
lis r8, KERNELBASE@h /* Create vaddr for TLB */
addis r8, r8, 0x0080 /* Add 8M */
ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
li r9, MI_PS8MEG /* Set 8M byte page */
ori r9, r9, MI_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r9
li r11, MI_BOOTINIT /* Create RPN for address 0 */
addis r11, r11, 0x0080 /* Add 8M */
mtspr SPRN_MD_RPN, r8
addis r8, r8, 0x0080 /* Add 8M */
mtspr SPRN_MD_EPN, r8
mtspr SPRN_MD_TWC, r9
addis r11, r11, 0x0080 /* Add 8M */
mtspr SPRN_MD_RPN, r8
#endif
/* Since the cache is enabled according to the information we
* just loaded into the TLB, invalidate and enable the caches here.
* We should probably check/set other modes....later.
*/
lis r8, IDC_INVALL@h
mtspr SPRN_IC_CST, r8
mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
mtspr SPRN_IC_CST, r8
#ifdef CONFIG_8xx_COPYBACK
mtspr SPRN_DC_CST, r8
#else
/* For a debug option, I left this here to easily enable
* the write through cache mode
*/
lis r8, DC_SFWT@h
mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
mtspr SPRN_DC_CST, r8
#endif
blr
/*
* Set up to use a given MMU context.
* r3 is context number, r4 is PGD pointer.
*
* We place the physical address of the new task page directory loaded
* into the MMU base register, and set the ASID compare register with
* the new "context."
*/
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is passed as second argument.
*/
lis r5, KERNELBASE@h
lwz r5, 0xf0(r5)
stw r4, 0x4(r5)
#endif
#ifdef CONFIG_8xx_CPU6
lis r6, cpu6_errata_word@h
ori r6, r6, cpu6_errata_word@l
tophys (r4, r4)
li r7, 0x3980
stw r7, 12(r6)
lwz r7, 12(r6)
mtspr SPRN_M_TWB, r4 /* Update MMU base address */
li r7, 0x3380
stw r7, 12(r6)
lwz r7, 12(r6)
mtspr SPRN_M_CASID, r3 /* Update context */
#else
mtspr SPRN_M_CASID,r3 /* Update context */
tophys (r4, r4)
mtspr SPRN_M_TWB, r4 /* and pgd */
#endif
SYNC
blr
#ifdef CONFIG_8xx_CPU6
/* It's here because it is unique to the 8xx.
* It is important we get called with interrupts disabled. I used to
* do that, but it appears that all code that calls this already had
* interrupt disabled.
*/
.globl set_dec_cpu6
set_dec_cpu6:
lis r7, cpu6_errata_word@h
ori r7, r7, cpu6_errata_word@l
li r4, 0x2c00
stw r4, 8(r7)
lwz r4, 8(r7)
mtspr 22, r3 /* Update Decrementer */
SYNC
blr
#endif
/*
* We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the data segment,
* which is page-aligned.
*/
.data
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
.globl swapper_pg_dir
swapper_pg_dir:
.space 4096
/*
* This space gets a copy of optional info passed to us by the bootstrap
* Used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
.globl cmd_line
cmd_line:
.space 512
/* Room for two PTE table poiners, usually the kernel and current user
* pointer to their respective root page table (pgdir).
*/
abatron_pteptrs:
.space 8
#ifdef CONFIG_8xx_CPU6
.globl cpu6_errata_word
cpu6_errata_word:
.space 16
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,233 @@
/*
* This file contains the power_save function for 6xx & 7xxx CPUs
* rewritten in assembler
*
* Warning ! This code assumes that if your machine has a 750fx
* it will have PLL 1 set to low speed mode (used during NAP/DOZE).
* if this is not the case some additional changes will have to
* be done to check a runtime var (a bit like powersave-nap)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#undef DEBUG
.text
/*
* Init idle, called at early CPU setup time from head.S for each CPU
* Make sure no rest of NAP mode remains in HID0, save default
* values for some CPU specific registers. Called with r24
* containing CPU number and r3 reloc offset
*/
_GLOBAL(init_idle_6xx)
BEGIN_FTR_SECTION
mfspr r4,SPRN_HID0
rlwinm r4,r4,0,10,8 /* Clear NAP */
mtspr SPRN_HID0, r4
b 1f
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
blr
1:
slwi r5,r24,2
add r5,r5,r3
BEGIN_FTR_SECTION
mfspr r4,SPRN_MSSCR0
addis r6,r5, nap_save_msscr0@ha
stw r4,nap_save_msscr0@l(r6)
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
mfspr r4,SPRN_HID1
addis r6,r5,nap_save_hid1@ha
stw r4,nap_save_hid1@l(r6)
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
blr
/*
* Here is the power_save_6xx function. This could eventually be
* split into several functions & changing the function pointer
* depending on the various features.
*/
_GLOBAL(ppc6xx_idle)
/* Check if we can nap or doze, put HID0 mask in r3
*/
lis r3, 0
BEGIN_FTR_SECTION
lis r3,HID0_DOZE@h
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
/* We must dynamically check for the NAP feature as it
* can be cleared by CPU init after the fixups are done
*/
lis r4,cur_cpu_spec@ha
lwz r4,cur_cpu_spec@l(r4)
lwz r4,CPU_SPEC_FEATURES(r4)
andi. r0,r4,CPU_FTR_CAN_NAP
beq 1f
/* Now check if user or arch enabled NAP mode */
lis r4,powersave_nap@ha
lwz r4,powersave_nap@l(r4)
cmpwi 0,r4,0
beq 1f
lis r3,HID0_NAP@h
1:
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
cmpwi 0,r3,0
beqlr
/* Clear MSR:EE */
mfmsr r7
rlwinm r0,r7,0,17,15
mtmsr r0
/* Check current_thread_info()->flags */
rlwinm r4,r1,0,0,18
lwz r4,TI_FLAGS(r4)
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
mtmsr r7 /* out of line this ? */
blr
1:
/* Some pre-nap cleanups needed on some CPUs */
andis. r0,r3,HID0_NAP@h
beq 2f
BEGIN_FTR_SECTION
/* Disable L2 prefetch on some 745x and try to ensure
* L2 prefetch engines are idle. As explained by errata
* text, we can't be sure they are, we just hope very hard
* that well be enough (sic !). At least I noticed Apple
* doesn't even bother doing the dcbf's here...
*/
mfspr r4,SPRN_MSSCR0
rlwinm r4,r4,0,0,29
sync
mtspr SPRN_MSSCR0,r4
sync
isync
lis r4,KERNELBASE@h
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
#ifdef DEBUG
lis r6,nap_enter_count@ha
lwz r4,nap_enter_count@l(r6)
addi r4,r4,1
stw r4,nap_enter_count@l(r6)
#endif
2:
BEGIN_FTR_SECTION
/* Go to low speed mode on some 750FX */
lis r4,powersave_lowspeed@ha
lwz r4,powersave_lowspeed@l(r4)
cmpwi 0,r4,0
beq 1f
mfspr r4,SPRN_HID1
oris r4,r4,0x0001
mtspr SPRN_HID1,r4
1:
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
/* Go to NAP or DOZE now */
mfspr r4,SPRN_HID0
lis r5,(HID0_NAP|HID0_SLEEP)@h
BEGIN_FTR_SECTION
oris r5,r5,HID0_DOZE@h
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
andc r4,r4,r5
or r4,r4,r3
BEGIN_FTR_SECTION
oris r4,r4,HID0_DPM@h /* that should be done once for all */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
ori r7,r7,MSR_EE /* Could be ommited (already set) */
oris r7,r7,MSR_POW@h
sync
isync
mtmsr r7
isync
sync
blr
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
* we are called with DR/IR still off and r2 containing physical
* address of current.
*/
_GLOBAL(power_save_6xx_restore)
mfspr r11,SPRN_HID0
rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
BEGIN_FTR_SECTION
rlwinm r11,r11,0,9,7 /* Clear DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
mtspr SPRN_HID0, r11
#ifdef DEBUG
beq cr1,1f
lis r11,(nap_return_count-KERNELBASE)@ha
lwz r9,nap_return_count@l(r11)
addi r9,r9,1
stw r9,nap_return_count@l(r11)
1:
#endif
rlwinm r9,r1,0,0,18
tophys(r9,r9)
lwz r11,TI_CPU(r9)
slwi r11,r11,2
/* Todo make sure all these are in the same page
* and load r22 (@ha part + CPU offset) only once
*/
BEGIN_FTR_SECTION
beq cr1,1f
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
lwz r9,nap_save_msscr0@l(r9)
mtspr SPRN_MSSCR0, r9
sync
isync
1:
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
lwz r9,nap_save_hid1@l(r9)
mtspr SPRN_HID1, r9
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
b transfer_to_handler_cont
.data
_GLOBAL(nap_save_msscr0)
.space 4*NR_CPUS
_GLOBAL(nap_save_hid1)
.space 4*NR_CPUS
_GLOBAL(powersave_nap)
.long 0
_GLOBAL(powersave_lowspeed)
.long 0
#ifdef DEBUG
_GLOBAL(nap_enter_count)
.space 4
_GLOBAL(nap_return_count)
.space 4
#endif

Просмотреть файл

@ -0,0 +1,724 @@
/*
* arch/ppc/kernel/process.c
*
* Derived from "arch/i386/kernel/process.c"
* Copyright (C) 1995 Linus Torvalds
*
* Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
* Paul Mackerras (paulus@cs.anu.edu.au)
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/prctl.h>
#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/prom.h>
extern unsigned long _get_SP(void);
#ifndef CONFIG_SMP
struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
struct task_struct *last_task_used_spe = NULL;
#endif
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
/* this is 8kB-aligned so we can get to the thread_info struct
at the base of it from the stack pointer with 1 integer instruction. */
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
/* initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/* only used to get secondary processor up */
struct task_struct *current_set[NR_CPUS] = {&init_task, };
/*
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_fp_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
/*
* We need to disable preemption here because if we didn't,
* another process could get scheduled after the regs->msr
* test but before we have finished saving the FP registers
* to the thread_struct. That process could take over the
* FPU, and then when we get scheduled again we would store
* bogus values for the remaining FP registers.
*/
preempt_disable();
if (tsk->thread.regs->msr & MSR_FP) {
#ifdef CONFIG_SMP
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
* the FP register state on context switch on SMP,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON(tsk != current);
#endif
giveup_fpu(current);
}
preempt_enable();
}
}
void enable_kernel_fp(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
else
giveup_fpu(NULL); /* just enables FP for kernel */
#else
giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
if (!tsk->thread.regs)
return 0;
flush_fp_to_thread(current);
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1;
}
#ifdef CONFIG_ALTIVEC
void enable_kernel_altivec(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
else
giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
#else
giveup_altivec(last_task_used_altivec);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_altivec);
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
*/
void flush_altivec_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) {
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
giveup_altivec(current);
}
preempt_enable();
}
}
int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
{
flush_altivec_to_thread(current);
memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1;
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
void enable_kernel_spe(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
giveup_spe(current);
else
giveup_spe(NULL); /* just enable SPE for kernel - force */
#else
giveup_spe(last_task_used_spe);
#endif /* __SMP __ */
}
EXPORT_SYMBOL(enable_kernel_spe);
void flush_spe_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_SPE) {
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
giveup_spe(current);
}
preempt_enable();
}
}
int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
{
flush_spe_to_thread(current);
/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
return 1;
}
#endif /* CONFIG_SPE */
static void set_dabr_spr(unsigned long val)
{
mtspr(SPRN_DABR, val);
}
int set_dabr(unsigned long dabr)
{
int ret = 0;
#ifdef CONFIG_PPC64
if (firmware_has_feature(FW_FEATURE_XDABR)) {
/* We want to catch accesses from kernel and userspace */
unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
ret = plpar_set_xdabr(dabr, flags);
} else if (firmware_has_feature(FW_FEATURE_DABR)) {
ret = plpar_set_dabr(dabr);
} else
#endif
set_dabr_spr(dabr);
return ret;
}
static DEFINE_PER_CPU(unsigned long, current_dabr);
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
struct thread_struct *new_thread, *old_thread;
unsigned long flags;
struct task_struct *last;
#ifdef CONFIG_SMP
/* avoid complexity of lazy save/restore of fpu
* by just saving it every time we switch out if
* this task used the fpu during the last quantum.
*
* If it tries to use the fpu again, it'll trap and
* reload its fp regs. So we don't have to do a restore
* every switch, just a save.
* -- Cort
*/
if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
giveup_fpu(prev);
#ifdef CONFIG_ALTIVEC
/*
* If the previous thread used altivec in the last quantum
* (thus changing altivec regs) then save them.
* We used to check the VRSAVE register but not all apps
* set it, so we don't rely on it now (and in fact we need
* to save & restore VSCR even if VRSAVE == 0). -- paulus
*
* On SMP we always save/restore altivec regs just to avoid the
* complexity of changing processors.
* -- Cort
*/
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
giveup_altivec(prev);
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
/*
* If the previous thread used spe in the last quantum
* (thus changing spe regs) then save them.
*
* On SMP we always save/restore spe regs just to avoid the
* complexity of changing processors.
*/
if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
giveup_spe(prev);
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_spe
*/
if (new->thread.regs && last_task_used_spe == new)
new->thread.regs->msr |= MSR_SPE;
#endif /* CONFIG_SPE */
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC64 /* for now */
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
set_dabr(new->thread.dabr);
__get_cpu_var(current_dabr) = new->thread.dabr;
}
#endif
new_thread = &new->thread;
old_thread = &current->thread;
local_irq_save(flags);
last = _switch(old_thread, new_thread);
local_irq_restore(flags);
return last;
}
void show_regs(struct pt_regs * regs)
{
int i, trap;
printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
print_tainted());
printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
regs->msr&MSR_IR ? 1 : 0,
regs->msr&MSR_DR ? 1 : 0);
trap = TRAP(regs);
if (trap == 0x300 || trap == 0x600)
printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
printk("TASK = %p[%d] '%s' THREAD: %p\n",
current, current->pid, current->comm, current->thread_info);
printk("Last syscall: %ld ", current->thread.last_syscall);
#ifdef CONFIG_SMP
printk(" CPU: %d", smp_processor_id());
#endif /* CONFIG_SMP */
for (i = 0; i < 32; i++) {
long r;
if ((i % 8) == 0)
printk("\n" KERN_INFO "GPR%02d: ", i);
if (__get_user(r, &regs->gpr[i]))
break;
printk("%08lX ", r);
if (i == 12 && !FULL_REGS(regs))
break;
}
printk("\n");
#ifdef CONFIG_KALLSYMS
/*
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
printk("NIP [%08lx] ", regs->nip);
print_symbol("%s\n", regs->nip);
printk("LR [%08lx] ", regs->link);
print_symbol("%s\n", regs->link);
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
}
void exit_thread(void)
{
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
#ifdef CONFIG_ALTIVEC
if (last_task_used_altivec == current)
last_task_used_altivec = NULL;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
if (last_task_used_spe == current)
last_task_used_spe = NULL;
#endif
#endif /* CONFIG_SMP */
}
void flush_thread(void)
{
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
#ifdef CONFIG_ALTIVEC
if (last_task_used_altivec == current)
last_task_used_altivec = NULL;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
if (last_task_used_spe == current)
last_task_used_spe = NULL;
#endif
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC64 /* for now */
if (current->thread.dabr) {
current->thread.dabr = 0;
set_dabr(0);
}
#endif
}
void
release_thread(struct task_struct *t)
{
}
/*
* This gets called before we allocate a new thread and copy
* the current task into it.
*/
void prepare_to_copy(struct task_struct *tsk)
{
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_spe_to_thread(current);
}
/*
* Copy a thread..
*/
int
copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
unsigned long childframe;
CHECK_FULL_REGS(regs);
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
*childregs = *regs;
if ((childregs->msr & MSR_PR) == 0) {
/* for kernel thread, set `current' and stackptr in new task */
childregs->gpr[1] = sp + sizeof(struct pt_regs);
childregs->gpr[2] = (unsigned long) p;
p->thread.regs = NULL; /* no user register state */
} else {
childregs->gpr[1] = usp;
p->thread.regs = childregs;
if (clone_flags & CLONE_SETTLS)
childregs->gpr[2] = childregs->gpr[6];
}
childregs->gpr[3] = 0; /* Result from fork() */
sp -= STACK_FRAME_OVERHEAD;
childframe = sp;
/*
* The way this works is that at some point in the future
* some task will call _switch to switch to the new task.
* That will pop off the stack frame created below and start
* the new task running at ret_from_fork. The new task will
* do some house keeping and then return from the fork or clone
* system call, using the stack frame created above.
*/
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
kregs->nip = (unsigned long)ret_from_fork;
p->thread.last_syscall = -1;
return 0;
}
/*
* Set up a thread for executing a new program
*/
void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
{
set_fs(USER_DS);
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
regs->xer = 0;
regs->ccr = 0;
regs->mq = 0;
regs->nip = nip;
regs->gpr[1] = sp;
regs->msr = MSR_USER;
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
#ifdef CONFIG_ALTIVEC
if (last_task_used_altivec == current)
last_task_used_altivec = NULL;
#endif
#ifdef CONFIG_SPE
if (last_task_used_spe == current)
last_task_used_spe = NULL;
#endif
#endif /* CONFIG_SMP */
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
memset(current->thread.vr, 0, sizeof(current->thread.vr));
memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
current->thread.vrsave = 0;
current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
current->thread.acc = 0;
current->thread.spefscr = 0;
current->thread.used_spe = 0;
#endif /* CONFIG_SPE */
}
#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
| PR_FP_EXC_RES | PR_FP_EXC_INV)
int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
{
struct pt_regs *regs = tsk->thread.regs;
/* This is a bit hairy. If we are an SPE enabled processor
* (have embedded fp) we store the IEEE exception enable flags in
* fpexc_mode. fpexc_mode is also used for setting FP exception
* mode (asyn, precise, disabled) for 'Classic' FP. */
if (val & PR_FP_EXC_SW_ENABLE) {
#ifdef CONFIG_SPE
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
#else
return -EINVAL;
#endif
} else {
/* on a CONFIG_SPE this does not hurt us. The bits that
* __pack_fe01 use do not overlap with bits used for
* PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
* on CONFIG_SPE implementations are reserved so writing to
* them does not change anything */
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
if (regs != NULL && (regs->msr & MSR_FP) != 0)
regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
| tsk->thread.fpexc_mode;
}
return 0;
}
int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
{
unsigned int val;
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
#ifdef CONFIG_SPE
val = tsk->thread.fpexc_mode;
#else
return -EINVAL;
#endif
else
val = __unpack_fe01(tsk->thread.fpexc_mode);
return put_user(val, (unsigned int __user *) adr);
}
int sys_clone(unsigned long clone_flags, unsigned long usp,
int __user *parent_tidp, void __user *child_threadptr,
int __user *child_tidp, int p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
if (usp == 0)
usp = regs->gpr[1]; /* stack pointer for child */
return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
}
int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
}
int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
regs, 0, NULL, NULL);
}
int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs *regs)
{
int error;
char * filename;
filename = getname((char __user *) a0);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_spe_to_thread(current);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;
task_unlock(current);
}
putname(filename);
out:
return error;
}
static int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
unsigned long stack_page = (unsigned long)p->thread_info;
if (sp >= stack_page + sizeof(struct thread_struct)
&& sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
#ifdef CONFIG_IRQSTACKS
stack_page = (unsigned long) hardirq_ctx[task_cpu(p)];
if (sp >= stack_page + sizeof(struct thread_struct)
&& sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
stack_page = (unsigned long) softirq_ctx[task_cpu(p)];
if (sp >= stack_page + sizeof(struct thread_struct)
&& sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
#endif
return 0;
}
void dump_stack(void)
{
show_stack(current, NULL);
}
EXPORT_SYMBOL(dump_stack);
void show_stack(struct task_struct *tsk, unsigned long *stack)
{
unsigned long sp, stack_top, prev_sp, ret;
int count = 0;
unsigned long next_exc = 0;
struct pt_regs *regs;
extern char ret_from_except, ret_from_except_full, ret_from_syscall;
sp = (unsigned long) stack;
if (tsk == NULL)
tsk = current;
if (sp == 0) {
if (tsk == current)
asm("mr %0,1" : "=r" (sp));
else
sp = tsk->thread.ksp;
}
prev_sp = (unsigned long) (tsk->thread_info + 1);
stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
if (count == 0) {
printk("Call trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
} else {
if (next_exc) {
ret = next_exc;
next_exc = 0;
} else
ret = *(unsigned long *)(sp + 4);
printk(" [%08lx] ", ret);
#ifdef CONFIG_KALLSYMS
print_symbol("%s", ret);
printk("\n");
#endif
if (ret == (unsigned long) &ret_from_except
|| ret == (unsigned long) &ret_from_except_full
|| ret == (unsigned long) &ret_from_syscall) {
/* sp + 16 points to an exception frame */
regs = (struct pt_regs *) (sp + 16);
if (sp + 16 + sizeof(*regs) <= stack_top)
next_exc = regs->nip;
}
}
++count;
sp = *(unsigned long *)sp;
}
#ifndef CONFIG_KALLSYMS
if (count > 0)
printk("\n");
#endif
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
sp = p->thread.ksp;
if (!validate_sp(sp, p, 16))
return 0;
do {
sp = *(unsigned long *)sp;
if (!validate_sp(sp, p, 16))
return 0;
if (count > 0) {
ip = *(unsigned long *)(sp + 4);
if (!in_sched_functions(ip))
return ip;
}
} while (count++ < 16);
return 0;
}
EXPORT_SYMBOL(get_wchan);

Просмотреть файл

@ -0,0 +1,135 @@
/*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
"1: lwarx %0,0,%3\n"
" srawi %1,%0,31\n"
" andc %1,%0,%1\n"
" add %1,%1,%4\n"
PPC405_ERR77(0,%3)
" stwcx. %1,0,%3\n"
" bne 1b"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
EXPORT_SYMBOL(__down_interruptible);

1047
arch/powerpc/kernel/traps.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,197 @@
#include <linux/config.h>
#include <asm/ppc_asm.h>
#include <asm/processor.h>
/*
* The routines below are in assembler so we can closely control the
* usage of floating-point registers. These routines must be called
* with preempt disabled.
*/
#ifdef CONFIG_PPC32
.data
fpzero:
.long 0
fpone:
.long 0x3f800000 /* 1.0 in single-precision FP */
fphalf:
.long 0x3f000000 /* 0.5 in single-precision FP */
#define LDCONST(fr, name) \
lis r11,name@ha; \
lfs fr,name@l(r11)
#else
.section ".toc","aw"
fpzero:
.tc FD_0_0[TC],0
fpone:
.tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
fphalf:
.tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
#define LDCONST(fr, name) \
lfd fr,name@toc(r2)
#endif
.text
/*
* Internal routine to enable floating point and set FPSCR to 0.
* Don't call it from C; it doesn't use the normal calling convention.
*/
fpenable:
#ifdef CONFIG_PPC32
stwu r1,-64(r1)
#else
stdu r1,-64(r1)
#endif
mfmsr r10
ori r11,r10,MSR_FP
mtmsr r11
isync
stfd fr0,24(r1)
stfd fr1,16(r1)
stfd fr31,8(r1)
LDCONST(fr1, fpzero)
mffs fr31
mtfsf 0xff,fr1
blr
fpdisable:
mtlr r12
mtfsf 0xff,fr31
lfd fr31,8(r1)
lfd fr1,16(r1)
lfd fr0,24(r1)
mtmsr r10
isync
addi r1,r1,64
blr
/*
* Vector add, floating point.
*/
_GLOBAL(vaddfp)
mflr r12
bl fpenable
li r0,4
mtctr r0
li r6,0
1: lfsx fr0,r4,r6
lfsx fr1,r5,r6
fadds fr0,fr0,fr1
stfsx fr0,r3,r6
addi r6,r6,4
bdnz 1b
b fpdisable
/*
* Vector subtract, floating point.
*/
_GLOBAL(vsubfp)
mflr r12
bl fpenable
li r0,4
mtctr r0
li r6,0
1: lfsx fr0,r4,r6
lfsx fr1,r5,r6
fsubs fr0,fr0,fr1
stfsx fr0,r3,r6
addi r6,r6,4
bdnz 1b
b fpdisable
/*
* Vector multiply and add, floating point.
*/
_GLOBAL(vmaddfp)
mflr r12
bl fpenable
stfd fr2,32(r1)
li r0,4
mtctr r0
li r7,0
1: lfsx fr0,r4,r7
lfsx fr1,r5,r7
lfsx fr2,r6,r7
fmadds fr0,fr0,fr2,fr1
stfsx fr0,r3,r7
addi r7,r7,4
bdnz 1b
lfd fr2,32(r1)
b fpdisable
/*
* Vector negative multiply and subtract, floating point.
*/
_GLOBAL(vnmsubfp)
mflr r12
bl fpenable
stfd fr2,32(r1)
li r0,4
mtctr r0
li r7,0
1: lfsx fr0,r4,r7
lfsx fr1,r5,r7
lfsx fr2,r6,r7
fnmsubs fr0,fr0,fr2,fr1
stfsx fr0,r3,r7
addi r7,r7,4
bdnz 1b
lfd fr2,32(r1)
b fpdisable
/*
* Vector reciprocal estimate. We just compute 1.0/x.
* r3 -> destination, r4 -> source.
*/
_GLOBAL(vrefp)
mflr r12
bl fpenable
li r0,4
LDCONST(fr1, fpone)
mtctr r0
li r6,0
1: lfsx fr0,r4,r6
fdivs fr0,fr1,fr0
stfsx fr0,r3,r6
addi r6,r6,4
bdnz 1b
b fpdisable
/*
* Vector reciprocal square-root estimate, floating point.
* We use the frsqrte instruction for the initial estimate followed
* by 2 iterations of Newton-Raphson to get sufficient accuracy.
* r3 -> destination, r4 -> source.
*/
_GLOBAL(vrsqrtefp)
mflr r12
bl fpenable
stfd fr2,32(r1)
stfd fr3,40(r1)
stfd fr4,48(r1)
stfd fr5,56(r1)
li r0,4
LDCONST(fr4, fpone)
LDCONST(fr5, fphalf)
mtctr r0
li r6,0
1: lfsx fr0,r4,r6
frsqrte fr1,fr0 /* r = frsqrte(s) */
fmuls fr3,fr1,fr0 /* r * s */
fmuls fr2,fr1,fr5 /* r * 0.5 */
fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
fmuls fr3,fr1,fr0 /* r * s */
fmuls fr2,fr1,fr5 /* r * 0.5 */
fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
stfsx fr1,r3,r6
addi r6,r6,4
bdnz 1b
lfd fr5,56(r1)
lfd fr4,48(r1)
lfd fr3,40(r1)
lfd fr2,32(r1)
b fpdisable

Просмотреть файл

@ -0,0 +1,174 @@
/* Align . to a 8 byte boundary equals to maximum function alignment. */
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to
the beginning of the section so we begin them at 0. */
/* Stabs debugging sections. */
OUTPUT_ARCH(powerpc:common)
jiffies = jiffies_64 + 4;
SECTIONS
{
/* Read-only sections, merged into text segment: */
. = + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.rel.text : { *(.rel.text) }
.rela.text : { *(.rela.text) }
.rel.data : { *(.rel.data) }
.rela.data : { *(.rela.data) }
.rel.rodata : { *(.rel.rodata) }
.rela.rodata : { *(.rela.rodata) }
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.bss : { *(.rel.bss) }
.rela.bss : { *(.rela.bss) }
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
/* .init : { *(.init) } =0*/
.plt : { *(.plt) }
.text :
{
*(.text)
. = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .;
. = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .;
*(.fixup)
*(.got1)
__got2_start = .;
*(.got2)
__got2_end = .;
}
_etext = .;
PROVIDE (etext = .);
.rodata : AT(ADDR(.rodata) - 0) { *(.rodata) *(.rodata.*) *(__vermagic) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(__ksymtab) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(__ksymtab_gpl) __stop___ksymtab_gpl = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(__kcrctab) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(__kcrctab_gpl) __stop___kcrctab_gpl = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; }
.fini : { *(.fini) } =0
.ctors : { *(.ctors) }
.dtors : { *(.dtors) }
.fixup : { *(.fixup) }
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
__bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
/* Read-write section, merged into data segment: */
. = ALIGN(4096);
.data :
{
*(.data)
*(.data1)
*(.sdata)
*(.sdata2)
*(.got.plt) *(.got)
*(.dynamic)
CONSTRUCTORS
}
. = ALIGN(4096);
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(4096);
__nosave_end = .;
. = ALIGN(32);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
_edata = .;
PROVIDE (edata = .);
. = ALIGN(8192);
.data.init_task : { *(.data.init_task) }
. = ALIGN(4096);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
/* .exit.text is discarded at runtime, not link time,
to deal with references from __bug_table */
.exit.text : { *(.exit.text) }
.init.data : {
*(.init.data);
__vtop_table_begin = .;
*(.vtop_fixup);
__vtop_table_end = .;
__ptov_table_begin = .;
*(.ptov_fixup);
__ptov_table_end = .;
}
. = ALIGN(16);
__setup_start = .;
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
.security_initcall.init : AT(ADDR(.security_initcall.init) - 0) { __security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .; }
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(4096);
__init_end = .;
. = ALIGN(4096);
_sextratext = .;
_eextratext = .;
__bss_start = .;
.bss :
{
*(.sbss) *(.scommon)
*(.dynbss)
*(.bss)
*(COMMON)
}
__bss_stop = .;
_end = . ;
PROVIDE (end = .);
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
*(.exit.data)
}
}

Просмотреть файл

@ -0,0 +1,172 @@
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH(powerpc:common)
jiffies = jiffies_64 + 4;
SECTIONS
{
/* Read-only sections, merged into text segment: */
. = + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.rel.text : { *(.rel.text) }
.rela.text : { *(.rela.text) }
.rel.data : { *(.rel.data) }
.rela.data : { *(.rela.data) }
.rel.rodata : { *(.rel.rodata) }
.rela.rodata : { *(.rela.rodata) }
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.bss : { *(.rel.bss) }
.rela.bss : { *(.rela.bss) }
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
/* .init : { *(.init) } =0*/
.plt : { *(.plt) }
.text :
{
*(.text)
SCHED_TEXT
LOCK_TEXT
*(.fixup)
*(.got1)
__got2_start = .;
*(.got2)
__got2_end = .;
}
_etext = .;
PROVIDE (etext = .);
RODATA
.fini : { *(.fini) } =0
.ctors : { *(.ctors) }
.dtors : { *(.dtors) }
.fixup : { *(.fixup) }
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
__bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
/* Read-write section, merged into data segment: */
. = ALIGN(4096);
.data :
{
*(.data)
*(.data1)
*(.sdata)
*(.sdata2)
*(.got.plt) *(.got)
*(.dynamic)
CONSTRUCTORS
}
. = ALIGN(4096);
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(4096);
__nosave_end = .;
. = ALIGN(32);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
_edata = .;
PROVIDE (edata = .);
. = ALIGN(8192);
.data.init_task : { *(.data.init_task) }
. = ALIGN(4096);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
/* .exit.text is discarded at runtime, not link time,
to deal with references from __bug_table */
.exit.text : { *(.exit.text) }
.init.data : {
*(.init.data);
__vtop_table_begin = .;
*(.vtop_fixup);
__vtop_table_end = .;
__ptov_table_begin = .;
*(.ptov_fixup);
__ptov_table_end = .;
}
. = ALIGN(16);
__setup_start = .;
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
}
__initcall_end = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(4096);
__init_end = .;
. = ALIGN(4096);
_sextratext = .;
_eextratext = .;
__bss_start = .;
.bss :
{
*(.sbss) *(.scommon)
*(.dynbss)
*(.bss)
*(COMMON)
}
__bss_stop = .;
_end = . ;
PROVIDE (end = .);
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
*(.exit.data)
}
}

Просмотреть файл

@ -0,0 +1,9 @@
#
# Makefile for ppc-specific library files..
#
obj-y := strcase.o string.o
obj-$(CONFIG_PPC32) += div64.o copy32.o checksum.o
obj-$(CONFIG_PPC64) += copypage.o copyuser.o memcpy.o usercopy.o \
sstep.o checksum64.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o

225
arch/powerpc/lib/checksum.S Normal file
Просмотреть файл

@ -0,0 +1,225 @@
/*
* This file contains assembly-language implementations
* of IP-style 1's complement checksum routines.
*
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
*/
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
.text
/*
* ip_fast_csum(buf, len) -- Optimized for IP header
* len is in words and is always >= 5.
*/
_GLOBAL(ip_fast_csum)
lwz r0,0(r3)
lwzu r5,4(r3)
addic. r4,r4,-2
addc r0,r0,r5
mtctr r4
blelr-
1: lwzu r4,4(r3)
adde r0,r0,r4
bdnz 1b
addze r0,r0 /* add in final carry */
rlwinm r3,r0,16,0,31 /* fold two halves together */
add r3,r0,r3
not r3,r3
srwi r3,r3,16
blr
/*
* Compute checksum of TCP or UDP pseudo-header:
* csum_tcpudp_magic(saddr, daddr, len, proto, sum)
*/
_GLOBAL(csum_tcpudp_magic)
rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
addc r0,r3,r4 /* add 4 32-bit words together */
adde r0,r0,r5
adde r0,r0,r7
addze r0,r0 /* add in final carry */
rlwinm r3,r0,16,0,31 /* fold two halves together */
add r3,r0,r3
not r3,r3
srwi r3,r3,16
blr
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* csum_partial(buff, len, sum)
*/
_GLOBAL(csum_partial)
addic r0,r5,0
subi r3,r3,4
srwi. r6,r4,2
beq 3f /* if we're doing < 4 bytes */
andi. r5,r3,2 /* Align buffer to longword boundary */
beq+ 1f
lhz r5,4(r3) /* do 2 bytes to get aligned */
addi r3,r3,2
subi r4,r4,2
addc r0,r0,r5
srwi. r6,r4,2 /* # words to do */
beq 3f
1: mtctr r6
2: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */
adde r0,r0,r5 /* be unnecessary to unroll this loop */
bdnz 2b
andi. r4,r4,3
3: cmpwi 0,r4,2
blt+ 4f
lhz r5,4(r3)
addi r3,r3,2
subi r4,r4,2
adde r0,r0,r5
4: cmpwi 0,r4,1
bne+ 5f
lbz r5,4(r3)
slwi r5,r5,8 /* Upper byte of word */
adde r0,r0,r5
5: addze r3,r0 /* add in final carry */
blr
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
* If an access exception occurs on src or dst, it stores -EFAULT
* to *src_err or *dst_err respectively, and (for an error on
* src) zeroes the rest of dst.
*
* csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
*/
_GLOBAL(csum_partial_copy_generic)
addic r0,r6,0
subi r3,r3,4
subi r4,r4,4
srwi. r6,r5,2
beq 3f /* if we're doing < 4 bytes */
andi. r9,r4,2 /* Align dst to longword boundary */
beq+ 1f
81: lhz r6,4(r3) /* do 2 bytes to get aligned */
addi r3,r3,2
subi r5,r5,2
91: sth r6,4(r4)
addi r4,r4,2
addc r0,r0,r6
srwi. r6,r5,2 /* # words to do */
beq 3f
1: srwi. r6,r5,4 /* # groups of 4 words to do */
beq 10f
mtctr r6
71: lwz r6,4(r3)
72: lwz r9,8(r3)
73: lwz r10,12(r3)
74: lwzu r11,16(r3)
adde r0,r0,r6
75: stw r6,4(r4)
adde r0,r0,r9
76: stw r9,8(r4)
adde r0,r0,r10
77: stw r10,12(r4)
adde r0,r0,r11
78: stwu r11,16(r4)
bdnz 71b
10: rlwinm. r6,r5,30,30,31 /* # words left to do */
beq 13f
mtctr r6
82: lwzu r9,4(r3)
92: stwu r9,4(r4)
adde r0,r0,r9
bdnz 82b
13: andi. r5,r5,3
3: cmpwi 0,r5,2
blt+ 4f
83: lhz r6,4(r3)
addi r3,r3,2
subi r5,r5,2
93: sth r6,4(r4)
addi r4,r4,2
adde r0,r0,r6
4: cmpwi 0,r5,1
bne+ 5f
84: lbz r6,4(r3)
94: stb r6,4(r4)
slwi r6,r6,8 /* Upper byte of word */
adde r0,r0,r6
5: addze r3,r0 /* add in final carry */
blr
/* These shouldn't go in the fixup section, since that would
cause the ex_table addresses to get out of order. */
src_error_4:
mfctr r6 /* update # bytes remaining from ctr */
rlwimi r5,r6,4,0,27
b 79f
src_error_1:
li r6,0
subi r5,r5,2
95: sth r6,4(r4)
addi r4,r4,2
79: srwi. r6,r5,2
beq 3f
mtctr r6
src_error_2:
li r6,0
96: stwu r6,4(r4)
bdnz 96b
3: andi. r5,r5,3
beq src_error
src_error_3:
li r6,0
mtctr r5
addi r4,r4,3
97: stbu r6,1(r4)
bdnz 97b
src_error:
cmpwi 0,r7,0
beq 1f
li r6,-EFAULT
stw r6,0(r7)
1: addze r3,r0
blr
dst_error:
cmpwi 0,r8,0
beq 1f
li r6,-EFAULT
stw r6,0(r8)
1: addze r3,r0
blr
.section __ex_table,"a"
.long 81b,src_error_1
.long 91b,dst_error
.long 71b,src_error_4
.long 72b,src_error_4
.long 73b,src_error_4
.long 74b,src_error_4
.long 75b,dst_error
.long 76b,dst_error
.long 77b,dst_error
.long 78b,dst_error
.long 82b,src_error_2
.long 92b,dst_error
.long 83b,src_error_3
.long 93b,dst_error
.long 84b,src_error_3
.long 94b,dst_error
.long 95b,dst_error
.long 96b,dst_error
.long 97b,dst_error

Просмотреть файл

@ -0,0 +1,229 @@
/*
* This file contains assembly-language implementations
* of IP-style 1's complement checksum routines.
*
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
*/
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
/*
* ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
* len is in words and is always >= 5.
*
* In practice len == 5, but this is not guaranteed. So this code does not
* attempt to use doubleword instructions.
*/
_GLOBAL(ip_fast_csum)
lwz r0,0(r3)
lwzu r5,4(r3)
addic. r4,r4,-2
addc r0,r0,r5
mtctr r4
blelr-
1: lwzu r4,4(r3)
adde r0,r0,r4
bdnz 1b
addze r0,r0 /* add in final carry */
rldicl r4,r0,32,0 /* fold two 32-bit halves together */
add r0,r0,r4
srdi r0,r0,32
rlwinm r3,r0,16,0,31 /* fold two halves together */
add r3,r0,r3
not r3,r3
srwi r3,r3,16
blr
/*
* Compute checksum of TCP or UDP pseudo-header:
* csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
* No real gain trying to do this specially for 64 bit, but
* the 32 bit addition may spill into the upper bits of
* the doubleword so we still must fold it down from 64.
*/
_GLOBAL(csum_tcpudp_magic)
rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
addc r0,r3,r4 /* add 4 32-bit words together */
adde r0,r0,r5
adde r0,r0,r7
rldicl r4,r0,32,0 /* fold 64 bit value */
add r0,r4,r0
srdi r0,r0,32
rlwinm r3,r0,16,0,31 /* fold two halves together */
add r3,r0,r3
not r3,r3
srwi r3,r3,16
blr
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit).
*
* This code assumes at least halfword alignment, though the length
* can be any number of bytes. The sum is accumulated in r5.
*
* csum_partial(r3=buff, r4=len, r5=sum)
*/
_GLOBAL(csum_partial)
subi r3,r3,8 /* we'll offset by 8 for the loads */
srdi. r6,r4,3 /* divide by 8 for doubleword count */
addic r5,r5,0 /* clear carry */
beq 3f /* if we're doing < 8 bytes */
andi. r0,r3,2 /* aligned on a word boundary already? */
beq+ 1f
lhz r6,8(r3) /* do 2 bytes to get aligned */
addi r3,r3,2
subi r4,r4,2
addc r5,r5,r6
srdi. r6,r4,3 /* recompute number of doublewords */
beq 3f /* any left? */
1: mtctr r6
2: ldu r6,8(r3) /* main sum loop */
adde r5,r5,r6
bdnz 2b
andi. r4,r4,7 /* compute bytes left to sum after doublewords */
3: cmpwi 0,r4,4 /* is at least a full word left? */
blt 4f
lwz r6,8(r3) /* sum this word */
addi r3,r3,4
subi r4,r4,4
adde r5,r5,r6
4: cmpwi 0,r4,2 /* is at least a halfword left? */
blt+ 5f
lhz r6,8(r3) /* sum this halfword */
addi r3,r3,2
subi r4,r4,2
adde r5,r5,r6
5: cmpwi 0,r4,1 /* is at least a byte left? */
bne+ 6f
lbz r6,8(r3) /* sum this byte */
slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */
adde r5,r5,r6
6: addze r5,r5 /* add in final carry */
rldicl r4,r5,32,0 /* fold two 32-bit halves together */
add r3,r4,r5
srdi r3,r3,32
blr
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
* If an access exception occurs on src or dst, it stores -EFAULT
* to *src_err or *dst_err respectively, and (for an error on
* src) zeroes the rest of dst.
*
* This code needs to be reworked to take advantage of 64 bit sum+copy.
* However, due to tokenring halfword alignment problems this will be very
* tricky. For now we'll leave it until we instrument it somehow.
*
* csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
*/
_GLOBAL(csum_partial_copy_generic)
addic r0,r6,0
subi r3,r3,4
subi r4,r4,4
srwi. r6,r5,2
beq 3f /* if we're doing < 4 bytes */
andi. r9,r4,2 /* Align dst to longword boundary */
beq+ 1f
81: lhz r6,4(r3) /* do 2 bytes to get aligned */
addi r3,r3,2
subi r5,r5,2
91: sth r6,4(r4)
addi r4,r4,2
addc r0,r0,r6
srwi. r6,r5,2 /* # words to do */
beq 3f
1: mtctr r6
82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */
92: stwu r6,4(r4) /* be unnecessary to unroll this loop */
adde r0,r0,r6
bdnz 82b
andi. r5,r5,3
3: cmpwi 0,r5,2
blt+ 4f
83: lhz r6,4(r3)
addi r3,r3,2
subi r5,r5,2
93: sth r6,4(r4)
addi r4,r4,2
adde r0,r0,r6
4: cmpwi 0,r5,1
bne+ 5f
84: lbz r6,4(r3)
94: stb r6,4(r4)
slwi r6,r6,8 /* Upper byte of word */
adde r0,r0,r6
5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */
rldicl r4,r3,32,0 /* fold 64 bit value */
add r3,r4,r3
srdi r3,r3,32
blr
/* These shouldn't go in the fixup section, since that would
cause the ex_table addresses to get out of order. */
.globl src_error_1
src_error_1:
li r6,0
subi r5,r5,2
95: sth r6,4(r4)
addi r4,r4,2
srwi. r6,r5,2
beq 3f
mtctr r6
.globl src_error_2
src_error_2:
li r6,0
96: stwu r6,4(r4)
bdnz 96b
3: andi. r5,r5,3
beq src_error
.globl src_error_3
src_error_3:
li r6,0
mtctr r5
addi r4,r4,3
97: stbu r6,1(r4)
bdnz 97b
.globl src_error
src_error:
cmpdi 0,r7,0
beq 1f
li r6,-EFAULT
stw r6,0(r7)
1: addze r3,r0
blr
.globl dst_error
dst_error:
cmpdi 0,r8,0
beq 1f
li r6,-EFAULT
stw r6,0(r8)
1: addze r3,r0
blr
.section __ex_table,"a"
.align 3
.llong 81b,src_error_1
.llong 91b,dst_error
.llong 82b,src_error_2
.llong 92b,dst_error
.llong 83b,src_error_3
.llong 93b,dst_error
.llong 84b,src_error_3
.llong 94b,dst_error
.llong 95b,dst_error
.llong 96b,dst_error
.llong 97b,dst_error

543
arch/powerpc/lib/copy32.S Normal file
Просмотреть файл

@ -0,0 +1,543 @@
/*
* Memory copy functions for 32-bit PowerPC.
*
* Copyright (C) 1996-2005 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
lwz r8,8(r4); \
lwz r9,12(r4); \
lwzu r10,16(r4); \
stw r7,4(r6); \
stw r8,8(r6); \
stw r9,12(r6); \
stwu r10,16(r6)
#define COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
lwz r7,4(r4); \
8 ## n ## 1: \
lwz r8,8(r4); \
8 ## n ## 2: \
lwz r9,12(r4); \
8 ## n ## 3: \
lwzu r10,16(r4); \
8 ## n ## 4: \
stw r7,4(r6); \
8 ## n ## 5: \
stw r8,8(r6); \
8 ## n ## 6: \
stw r9,12(r6); \
8 ## n ## 7: \
stwu r10,16(r6)
#define COPY_16_BYTES_EXCODE(n) \
9 ## n ## 0: \
addi r5,r5,-(16 * n); \
b 104f; \
9 ## n ## 1: \
addi r5,r5,-(16 * n); \
b 105f; \
.section __ex_table,"a"; \
.align 2; \
.long 8 ## n ## 0b,9 ## n ## 0b; \
.long 8 ## n ## 1b,9 ## n ## 0b; \
.long 8 ## n ## 2b,9 ## n ## 0b; \
.long 8 ## n ## 3b,9 ## n ## 0b; \
.long 8 ## n ## 4b,9 ## n ## 1b; \
.long 8 ## n ## 5b,9 ## n ## 1b; \
.long 8 ## n ## 6b,9 ## n ## 1b; \
.long 8 ## n ## 7b,9 ## n ## 1b; \
.text
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
.stabs "copy32.S",N_SO,0,0,0f
0:
CACHELINE_BYTES = L1_CACHE_LINE_SIZE
LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE
CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1)
/*
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
* area is cacheable. -- paulus
*/
_GLOBAL(cacheable_memzero)
mr r5,r4
li r4,0
addi r6,r3,-4
cmplwi 0,r5,4
blt 7f
stwu r4,4(r6)
beqlr
andi. r0,r6,3
add r5,r0,r5
subf r6,r0,r6
clrlwi r7,r6,32-LG_CACHELINE_BYTES
add r8,r7,r5
srwi r9,r8,LG_CACHELINE_BYTES
addic. r9,r9,-1 /* total number of complete cachelines */
ble 2f
xori r0,r7,CACHELINE_MASK & ~3
srwi. r0,r0,2
beq 3f
mtctr r0
4: stwu r4,4(r6)
bdnz 4b
3: mtctr r9
li r7,4
#if !defined(CONFIG_8xx)
10: dcbz r7,r6
#else
10: stw r4, 4(r6)
stw r4, 8(r6)
stw r4, 12(r6)
stw r4, 16(r6)
#if CACHE_LINE_SIZE >= 32
stw r4, 20(r6)
stw r4, 24(r6)
stw r4, 28(r6)
stw r4, 32(r6)
#endif /* CACHE_LINE_SIZE */
#endif
addi r6,r6,CACHELINE_BYTES
bdnz 10b
clrlwi r5,r8,32-LG_CACHELINE_BYTES
addi r5,r5,4
2: srwi r0,r5,2
mtctr r0
bdz 6f
1: stwu r4,4(r6)
bdnz 1b
6: andi. r5,r5,3
7: cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r6,3
8: stbu r4,1(r6)
bdnz 8b
blr
_GLOBAL(memset)
rlwimi r4,r4,8,16,23
rlwimi r4,r4,16,0,15
addi r6,r3,-4
cmplwi 0,r5,4
blt 7f
stwu r4,4(r6)
beqlr
andi. r0,r6,3
add r5,r0,r5
subf r6,r0,r6
srwi r0,r5,2
mtctr r0
bdz 6f
1: stwu r4,4(r6)
bdnz 1b
6: andi. r5,r5,3
7: cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r6,3
8: stbu r4,1(r6)
bdnz 8b
blr
/*
* This version uses dcbz on the complete cache lines in the
* destination area to reduce memory traffic. This requires that
* the destination area is cacheable.
* We only use this version if the source and dest don't overlap.
* -- paulus.
*/
_GLOBAL(cacheable_memcpy)
add r7,r3,r5 /* test if the src & dst overlap */
add r8,r4,r5
cmplw 0,r4,r7
cmplw 1,r3,r8
crand 0,0,4 /* cr0.lt &= cr1.lt */
blt memcpy /* if regions overlap */
addi r4,r4,-4
addi r6,r3,-4
neg r0,r3
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
beq 58f
cmplw 0,r5,r0 /* is this more than total to do? */
blt 63f /* if not much to do */
andi. r8,r0,3 /* get it word-aligned first */
subf r5,r0,r5
mtctr r8
beq+ 61f
70: lbz r9,4(r4) /* do some bytes */
stb r9,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 70b
61: srwi. r0,r0,2
mtctr r0
beq 58f
72: lwzu r9,4(r4) /* do some words */
stwu r9,4(r6)
bdnz 72b
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
clrlwi r5,r5,32-LG_CACHELINE_BYTES
li r11,4
mtctr r0
beq 63f
53:
#if !defined(CONFIG_8xx)
dcbz r11,r6
#endif
COPY_16_BYTES
#if L1_CACHE_LINE_SIZE >= 32
COPY_16_BYTES
#if L1_CACHE_LINE_SIZE >= 64
COPY_16_BYTES
COPY_16_BYTES
#if L1_CACHE_LINE_SIZE >= 128
COPY_16_BYTES
COPY_16_BYTES
COPY_16_BYTES
COPY_16_BYTES
#endif
#endif
#endif
bdnz 53b
63: srwi. r0,r5,2
mtctr r0
beq 64f
30: lwzu r0,4(r4)
stwu r0,4(r6)
bdnz 30b
64: andi. r0,r5,3
mtctr r0
beq+ 65f
40: lbz r0,4(r4)
stb r0,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 40b
65: blr
_GLOBAL(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
/* fall through */
_GLOBAL(memcpy)
srwi. r7,r5,3
addi r6,r3,-4
addi r4,r4,-4
beq 2f /* if less than 8 bytes to do */
andi. r0,r6,3 /* get dest word aligned */
mtctr r7
bne 5f
1: lwz r7,4(r4)
lwzu r8,8(r4)
stw r7,4(r6)
stwu r8,8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,4(r4)
addi r5,r5,-4
stwu r0,4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
addi r4,r4,3
addi r6,r6,3
4: lbzu r0,1(r4)
stbu r0,1(r6)
bdnz 4b
blr
5: subfic r0,r0,4
mtctr r0
6: lbz r7,4(r4)
addi r4,r4,1
stb r7,4(r6)
addi r6,r6,1
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
_GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
beq 2f
andi. r0,r6,3
mtctr r7
bne 5f
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
stwu r8,-8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,-4(r4)
subi r5,r5,4
stwu r0,-4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
4: lbzu r0,-1(r4)
stbu r0,-1(r6)
bdnz 4b
blr
5: mtctr r0
6: lbzu r7,-1(r4)
stbu r7,-1(r6)
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
_GLOBAL(__copy_tofrom_user)
addi r4,r4,-4
addi r6,r3,-4
neg r0,r3
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
beq 58f
cmplw 0,r5,r0 /* is this more than total to do? */
blt 63f /* if not much to do */
andi. r8,r0,3 /* get it word-aligned first */
mtctr r8
beq+ 61f
70: lbz r9,4(r4) /* do some bytes */
71: stb r9,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 70b
61: subf r5,r0,r5
srwi. r0,r0,2
mtctr r0
beq 58f
72: lwzu r9,4(r4) /* do some words */
73: stwu r9,4(r6)
bdnz 72b
.section __ex_table,"a"
.align 2
.long 70b,100f
.long 71b,101f
.long 72b,102f
.long 73b,103f
.text
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
clrlwi r5,r5,32-LG_CACHELINE_BYTES
li r11,4
beq 63f
#ifdef CONFIG_8xx
/* Don't use prefetch on 8xx */
mtctr r0
li r0,0
53: COPY_16_BYTES_WITHEX(0)
bdnz 53b
#else /* not CONFIG_8xx */
/* Here we decide how far ahead to prefetch the source */
li r3,4
cmpwi r0,1
li r7,0
ble 114f
li r7,1
#if MAX_COPY_PREFETCH > 1
/* Heuristically, for large transfers we prefetch
MAX_COPY_PREFETCH cachelines ahead. For small transfers
we prefetch 1 cacheline ahead. */
cmpwi r0,MAX_COPY_PREFETCH
ble 112f
li r7,MAX_COPY_PREFETCH
112: mtctr r7
111: dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
bdnz 111b
#else
dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
#endif /* MAX_COPY_PREFETCH > 1 */
114: subf r8,r7,r0
mr r0,r7
mtctr r8
53: dcbt r3,r4
54: dcbz r11,r6
.section __ex_table,"a"
.align 2
.long 54b,105f
.text
/* the main body of the cacheline loop */
COPY_16_BYTES_WITHEX(0)
#if L1_CACHE_LINE_SIZE >= 32
COPY_16_BYTES_WITHEX(1)
#if L1_CACHE_LINE_SIZE >= 64
COPY_16_BYTES_WITHEX(2)
COPY_16_BYTES_WITHEX(3)
#if L1_CACHE_LINE_SIZE >= 128
COPY_16_BYTES_WITHEX(4)
COPY_16_BYTES_WITHEX(5)
COPY_16_BYTES_WITHEX(6)
COPY_16_BYTES_WITHEX(7)
#endif
#endif
#endif
bdnz 53b
cmpwi r0,0
li r3,4
li r7,0
bne 114b
#endif /* CONFIG_8xx */
63: srwi. r0,r5,2
mtctr r0
beq 64f
30: lwzu r0,4(r4)
31: stwu r0,4(r6)
bdnz 30b
64: andi. r0,r5,3
mtctr r0
beq+ 65f
40: lbz r0,4(r4)
41: stb r0,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 40b
65: li r3,0
blr
/* read fault, initial single-byte copy */
100: li r9,0
b 90f
/* write fault, initial single-byte copy */
101: li r9,1
90: subf r5,r8,r5
li r3,0
b 99f
/* read fault, initial word copy */
102: li r9,0
b 91f
/* write fault, initial word copy */
103: li r9,1
91: li r3,2
b 99f
/*
* this stuff handles faults in the cacheline loop and branches to either
* 104f (if in read part) or 105f (if in write part), after updating r5
*/
COPY_16_BYTES_EXCODE(0)
#if L1_CACHE_LINE_SIZE >= 32
COPY_16_BYTES_EXCODE(1)
#if L1_CACHE_LINE_SIZE >= 64
COPY_16_BYTES_EXCODE(2)
COPY_16_BYTES_EXCODE(3)
#if L1_CACHE_LINE_SIZE >= 128
COPY_16_BYTES_EXCODE(4)
COPY_16_BYTES_EXCODE(5)
COPY_16_BYTES_EXCODE(6)
COPY_16_BYTES_EXCODE(7)
#endif
#endif
#endif
/* read fault in cacheline loop */
104: li r9,0
b 92f
/* fault on dcbz (effectively a write fault) */
/* or write fault in cacheline loop */
105: li r9,1
92: li r3,LG_CACHELINE_BYTES
mfctr r8
add r0,r0,r8
b 106f
/* read fault in final word loop */
108: li r9,0
b 93f
/* write fault in final word loop */
109: li r9,1
93: andi. r5,r5,3
li r3,2
b 99f
/* read fault in final byte loop */
110: li r9,0
b 94f
/* write fault in final byte loop */
111: li r9,1
94: li r5,0
li r3,0
/*
* At this stage the number of bytes not copied is
* r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
*/
99: mfctr r0
106: slw r3,r0,r3
add. r3,r3,r5
beq 120f /* shouldn't happen */
cmpwi 0,r9,0
bne 120f
/* for a read fault, first try to continue the copy one byte at a time */
mtctr r3
130: lbz r0,4(r4)
131: stb r0,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 130b
/* then clear out the destination: r3 bytes starting at 4(r6) */
132: mfctr r3
srwi. r0,r3,2
li r9,0
mtctr r0
beq 113f
112: stwu r9,4(r6)
bdnz 112b
113: andi. r0,r3,3
mtctr r0
beq 120f
114: stb r9,4(r6)
addi r6,r6,1
bdnz 114b
120: blr
.section __ex_table,"a"
.align 2
.long 30b,108b
.long 31b,109b
.long 40b,110b
.long 41b,111b
.long 130b,132b
.long 131b,120b
.long 112b,120b
.long 114b,120b
.text

121
arch/powerpc/lib/copypage.S Normal file
Просмотреть файл

@ -0,0 +1,121 @@
/*
* arch/ppc64/lib/copypage.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
_GLOBAL(copy_page)
std r31,-8(1)
std r30,-16(1)
std r29,-24(1)
std r28,-32(1)
std r27,-40(1)
std r26,-48(1)
std r25,-56(1)
std r24,-64(1)
std r23,-72(1)
std r22,-80(1)
std r21,-88(1)
std r20,-96(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r12,5
0: addi r5,r5,-24
mtctr r12
ld r22,640(4)
ld r21,512(4)
ld r20,384(4)
ld r11,256(4)
ld r9,128(4)
ld r7,0(4)
ld r25,648(4)
ld r24,520(4)
ld r23,392(4)
ld r10,264(4)
ld r8,136(4)
ldu r6,8(4)
cmpwi r5,24
1: std r22,648(3)
std r21,520(3)
std r20,392(3)
std r11,264(3)
std r9,136(3)
std r7,8(3)
ld r28,648(4)
ld r27,520(4)
ld r26,392(4)
ld r31,264(4)
ld r30,136(4)
ld r29,8(4)
std r25,656(3)
std r24,528(3)
std r23,400(3)
std r10,272(3)
std r8,144(3)
std r6,16(3)
ld r22,656(4)
ld r21,528(4)
ld r20,400(4)
ld r11,272(4)
ld r9,144(4)
ld r7,16(4)
std r28,664(3)
std r27,536(3)
std r26,408(3)
std r31,280(3)
std r30,152(3)
stdu r29,24(3)
ld r25,664(4)
ld r24,536(4)
ld r23,408(4)
ld r10,280(4)
ld r8,152(4)
ldu r6,24(4)
bdnz 1b
std r22,648(3)
std r21,520(3)
std r20,392(3)
std r11,264(3)
std r9,136(3)
std r7,8(3)
addi r4,r4,640
addi r3,r3,648
bge 0b
mtctr r5
ld r7,0(4)
ld r8,8(4)
ldu r9,16(4)
3: ld r10,8(4)
std r7,8(3)
ld r7,16(4)
std r8,16(3)
ld r8,24(4)
std r9,24(3)
ldu r9,32(4)
stdu r10,32(3)
bdnz 3b
4: ld r10,8(4)
std r7,8(3)
std r8,16(3)
std r9,24(3)
std r10,32(3)
9: ld r20,-96(1)
ld r21,-88(1)
ld r22,-80(1)
ld r23,-72(1)
ld r24,-64(1)
ld r25,-56(1)
ld r26,-48(1)
ld r27,-40(1)
ld r28,-32(1)
ld r29,-24(1)
ld r30,-16(1)
ld r31,-8(1)
blr

576
arch/powerpc/lib/copyuser.S Normal file
Просмотреть файл

@ -0,0 +1,576 @@
/*
* arch/ppc64/lib/copyuser.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.align 7
_GLOBAL(__copy_tofrom_user)
/* first check for a whole page copy on a page boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
andi. r0,r0,4095
std r3,-24(r1)
crand cr0*4+2,cr0*4+2,cr6*4+2
std r4,-16(r1)
std r5,-8(r1)
dcbt 0,r4
beq .Lcopy_page
andi. r6,r6,7
mtcrf 0x01,r5
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
20: ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,22f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,72f
21: ld r9,8(r4)
70: std r8,8(r3)
22: ldu r8,16(r4)
71: stdu r9,16(r3)
bdnz 21b
72: std r8,8(r3)
beq+ 3f
addi r3,r3,16
23: ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
73: stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
74: sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
75: stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpldi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,28f
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
sld r6,r9,r10
26: ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
b 2f
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
ble cr1,.Ldo_tail
34: ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
35: lbz r0,0(r4)
81: stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
36: lhzx r0,r7,r4
82: sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
37: lwzx r0,r7,r4
83: stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
38: lwz r0,0(r4)
39: lwz r9,4(r4)
addi r4,r4,8
84: stw r0,0(r3)
85: stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
40: lwz r0,0(r4)
addi r4,r4,4
86: stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
41: lhz r0,0(r4)
addi r4,r4,2
87: sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
42: lbz r0,0(r4)
88: stb r0,0(r3)
4: li r3,0
blr
/*
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
*/
136:
137:
add r3,r3,r7
b 1f
130:
131:
addi r3,r3,8
120:
122:
124:
125:
126:
127:
128:
129:
133:
addi r3,r3,8
121:
132:
addi r3,r3,8
123:
134:
135:
138:
139:
140:
141:
142:
/*
* here we have had a fault on a load and r3 points to the first
* unmodified byte of the destination
*/
1: ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
add r4,r4,r6
subf r5,r6,r5 /* #bytes left to go */
/*
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
43: lbz r0,0(r4)
addi r4,r4,1
89: stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
blr
/*
* here we have trapped again, need to clear ctr bytes starting at r3
*/
143: mfctr r5
li r0,0
mr r4,r3
mr r3,r5 /* return the number of bytes not copied */
1: andi. r9,r4,7
beq 3f
90: stb r0,0(r4)
addic. r5,r5,-1
addi r4,r4,1
bne 1b
blr
3: cmpldi cr1,r5,8
srdi r9,r5,3
andi. r5,r5,7
blt cr1,93f
mtctr r9
91: std r0,0(r4)
addi r4,r4,8
bdnz 91b
93: beqlr
mtctr r5
92: stb r0,0(r4)
addi r4,r4,1
bdnz 92b
blr
/*
* exception handlers for stores: we just need to work
* out how many bytes weren't copied
*/
182:
183:
add r3,r3,r7
b 1f
180:
addi r3,r3,8
171:
177:
addi r3,r3,8
170:
172:
176:
178:
addi r3,r3,4
185:
addi r3,r3,4
173:
174:
175:
179:
181:
184:
186:
187:
188:
189:
1:
ld r6,-24(r1)
ld r5,-8(r1)
add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */
190:
191:
192:
blr /* #bytes not copied in r3 */
.section __ex_table,"a"
.align 3
.llong 20b,120b
.llong 21b,121b
.llong 70b,170b
.llong 22b,122b
.llong 71b,171b
.llong 72b,172b
.llong 23b,123b
.llong 73b,173b
.llong 74b,174b
.llong 75b,175b
.llong 24b,124b
.llong 25b,125b
.llong 26b,126b
.llong 27b,127b
.llong 28b,128b
.llong 29b,129b
.llong 30b,130b
.llong 31b,131b
.llong 32b,132b
.llong 76b,176b
.llong 33b,133b
.llong 77b,177b
.llong 78b,178b
.llong 79b,179b
.llong 80b,180b
.llong 34b,134b
.llong 35b,135b
.llong 81b,181b
.llong 36b,136b
.llong 82b,182b
.llong 37b,137b
.llong 83b,183b
.llong 38b,138b
.llong 39b,139b
.llong 84b,184b
.llong 85b,185b
.llong 40b,140b
.llong 86b,186b
.llong 41b,141b
.llong 87b,187b
.llong 42b,142b
.llong 88b,188b
.llong 43b,143b
.llong 89b,189b
.llong 90b,190b
.llong 91b,191b
.llong 92b,192b
.text
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label) but it runs slightly
* slower on POWER3.
*/
.Lcopy_page:
std r31,-32(1)
std r30,-40(1)
std r29,-48(1)
std r28,-56(1)
std r27,-64(1)
std r26,-72(1)
std r25,-80(1)
std r24,-88(1)
std r23,-96(1)
std r22,-104(1)
std r21,-112(1)
std r20,-120(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r0,5
0: addi r5,r5,-24
mtctr r0
20: ld r22,640(4)
21: ld r21,512(4)
22: ld r20,384(4)
23: ld r11,256(4)
24: ld r9,128(4)
25: ld r7,0(4)
26: ld r25,648(4)
27: ld r24,520(4)
28: ld r23,392(4)
29: ld r10,264(4)
30: ld r8,136(4)
31: ldu r6,8(4)
cmpwi r5,24
1:
32: std r22,648(3)
33: std r21,520(3)
34: std r20,392(3)
35: std r11,264(3)
36: std r9,136(3)
37: std r7,8(3)
38: ld r28,648(4)
39: ld r27,520(4)
40: ld r26,392(4)
41: ld r31,264(4)
42: ld r30,136(4)
43: ld r29,8(4)
44: std r25,656(3)
45: std r24,528(3)
46: std r23,400(3)
47: std r10,272(3)
48: std r8,144(3)
49: std r6,16(3)
50: ld r22,656(4)
51: ld r21,528(4)
52: ld r20,400(4)
53: ld r11,272(4)
54: ld r9,144(4)
55: ld r7,16(4)
56: std r28,664(3)
57: std r27,536(3)
58: std r26,408(3)
59: std r31,280(3)
60: std r30,152(3)
61: stdu r29,24(3)
62: ld r25,664(4)
63: ld r24,536(4)
64: ld r23,408(4)
65: ld r10,280(4)
66: ld r8,152(4)
67: ldu r6,24(4)
bdnz 1b
68: std r22,648(3)
69: std r21,520(3)
70: std r20,392(3)
71: std r11,264(3)
72: std r9,136(3)
73: std r7,8(3)
74: addi r4,r4,640
75: addi r3,r3,648
bge 0b
mtctr r5
76: ld r7,0(4)
77: ld r8,8(4)
78: ldu r9,16(4)
3:
79: ld r10,8(4)
80: std r7,8(3)
81: ld r7,16(4)
82: std r8,16(3)
83: ld r8,24(4)
84: std r9,24(3)
85: ldu r9,32(4)
86: stdu r10,32(3)
bdnz 3b
4:
87: ld r10,8(4)
88: std r7,8(3)
89: std r8,16(3)
90: std r9,24(3)
91: std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
li r3,0
blr
/*
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
100: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
ld r3,-24(r1)
ld r4,-16(r1)
li r5,4096
b .Ldst_aligned
.section __ex_table,"a"
.align 3
.llong 20b,100b
.llong 21b,100b
.llong 22b,100b
.llong 23b,100b
.llong 24b,100b
.llong 25b,100b
.llong 26b,100b
.llong 27b,100b
.llong 28b,100b
.llong 29b,100b
.llong 30b,100b
.llong 31b,100b
.llong 32b,100b
.llong 33b,100b
.llong 34b,100b
.llong 35b,100b
.llong 36b,100b
.llong 37b,100b
.llong 38b,100b
.llong 39b,100b
.llong 40b,100b
.llong 41b,100b
.llong 42b,100b
.llong 43b,100b
.llong 44b,100b
.llong 45b,100b
.llong 46b,100b
.llong 47b,100b
.llong 48b,100b
.llong 49b,100b
.llong 50b,100b
.llong 51b,100b
.llong 52b,100b
.llong 53b,100b
.llong 54b,100b
.llong 55b,100b
.llong 56b,100b
.llong 57b,100b
.llong 58b,100b
.llong 59b,100b
.llong 60b,100b
.llong 61b,100b
.llong 62b,100b
.llong 63b,100b
.llong 64b,100b
.llong 65b,100b
.llong 66b,100b
.llong 67b,100b
.llong 68b,100b
.llong 69b,100b
.llong 70b,100b
.llong 71b,100b
.llong 72b,100b
.llong 73b,100b
.llong 74b,100b
.llong 75b,100b
.llong 76b,100b
.llong 77b,100b
.llong 78b,100b
.llong 79b,100b
.llong 80b,100b
.llong 81b,100b
.llong 82b,100b
.llong 83b,100b
.llong 84b,100b
.llong 85b,100b
.llong 86b,100b
.llong 87b,100b
.llong 88b,100b
.llong 89b,100b
.llong 90b,100b
.llong 91b,100b

58
arch/powerpc/lib/div64.S Normal file
Просмотреть файл

@ -0,0 +1,58 @@
/*
* Divide a 64-bit unsigned number by a 32-bit unsigned number.
* This routine assumes that the top 32 bits of the dividend are
* non-zero to start with.
* On entry, r3 points to the dividend, which get overwritten with
* the 64-bit quotient, and r4 contains the divisor.
* On exit, r3 contains the remainder.
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/processor.h>
_GLOBAL(__div64_32)
lwz r5,0(r3) # get the dividend into r5/r6
lwz r6,4(r3)
cmplw r5,r4
li r7,0
li r8,0
blt 1f
divwu r7,r5,r4 # if dividend.hi >= divisor,
mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
subf. r5,r0,r5 # dividend.hi %= divisor
beq 3f
1: mr r11,r5 # here dividend.hi != 0
andis. r0,r5,0xc000
bne 2f
cntlzw r0,r5 # we are shifting the dividend right
li r10,-1 # to make it < 2^32, and shifting
srw r10,r10,r0 # the divisor right the same amount,
add r9,r4,r10 # rounding up (so the estimate cannot
andc r11,r6,r10 # ever be too large, only too small)
andc r9,r9,r10
or r11,r5,r11
rotlw r9,r9,r0
rotlw r11,r11,r0
divwu r11,r11,r9 # then we divide the shifted quantities
2: mullw r10,r11,r4 # to get an estimate of the quotient,
mulhwu r9,r11,r4 # multiply the estimate by the divisor,
subfc r6,r10,r6 # take the product from the divisor,
add r8,r8,r11 # and add the estimate to the accumulated
subfe. r5,r9,r5 # quotient
bne 1b
3: cmplw r6,r4
blt 4f
divwu r0,r6,r4 # perform the remaining 32-bit division
mullw r10,r0,r4 # and get the remainder
add r8,r8,r0
subf r6,r10,r6
4: stw r7,0(r3) # return the quotient in *r3
stw r8,4(r3)
mr r3,r6 # return the remainder in r3
blr

108
arch/powerpc/lib/e2a.c Normal file
Просмотреть файл

@ -0,0 +1,108 @@
/*
* arch/ppc64/lib/e2a.c
*
* EBCDIC to ASCII conversion
*
* This function moved here from arch/ppc64/kernel/viopath.c
*
* (C) Copyright 2000-2004 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
unsigned char e2a(unsigned char x)
{
switch (x) {
case 0xF0:
return '0';
case 0xF1:
return '1';
case 0xF2:
return '2';
case 0xF3:
return '3';
case 0xF4:
return '4';
case 0xF5:
return '5';
case 0xF6:
return '6';
case 0xF7:
return '7';
case 0xF8:
return '8';
case 0xF9:
return '9';
case 0xC1:
return 'A';
case 0xC2:
return 'B';
case 0xC3:
return 'C';
case 0xC4:
return 'D';
case 0xC5:
return 'E';
case 0xC6:
return 'F';
case 0xC7:
return 'G';
case 0xC8:
return 'H';
case 0xC9:
return 'I';
case 0xD1:
return 'J';
case 0xD2:
return 'K';
case 0xD3:
return 'L';
case 0xD4:
return 'M';
case 0xD5:
return 'N';
case 0xD6:
return 'O';
case 0xD7:
return 'P';
case 0xD8:
return 'Q';
case 0xD9:
return 'R';
case 0xE2:
return 'S';
case 0xE3:
return 'T';
case 0xE4:
return 'U';
case 0xE5:
return 'V';
case 0xE6:
return 'W';
case 0xE7:
return 'X';
case 0xE8:
return 'Y';
case 0xE9:
return 'Z';
}
return ' ';
}
EXPORT_SYMBOL(e2a);

172
arch/powerpc/lib/memcpy.S Normal file
Просмотреть файл

@ -0,0 +1,172 @@
/*
* arch/ppc64/lib/memcpy.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.align 7
_GLOBAL(memcpy)
mtcrf 0x01,r5
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,2f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,3f
1: ld r9,8(r4)
std r8,8(r3)
2: ldu r8,16(r4)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beqlr
addi r3,r3,16
ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpdi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,0f
ld r9,0(r4) # 3+2n loads, 2+2n stores
ld r0,8(r4)
sld r6,r9,r10
ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,4f
ld r0,8(r4)
# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
b 2f
0: ld r0,0(r4) # 4+2n loads, 3+2n stores
ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,3f
# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1: or r7,r7,r6
ld r0,8(r4)
std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
3: std r12,8(r3)
or r7,r7,r6
4: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beqlr
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
ble cr1,.Ldo_tail
ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
lhzx r0,r7,r4
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
lwz r0,0(r4)
lwz r9,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: blr

693
arch/powerpc/lib/rheap.c Normal file
Просмотреть файл

@ -0,0 +1,693 @@
/*
* arch/ppc/syslib/rheap.c
*
* A Remote Heap. Remote means that we don't touch the memory that the
* heap points to. Normal heap implementations use the memory they manage
* to place their list. We cannot do that because the memory we manage may
* have special properties, for example it is uncachable or of different
* endianess.
*
* Author: Pantelis Antoniou <panto@intracom.gr>
*
* 2004 (c) INTRACOM S.A. Greece. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/rheap.h>
/*
* Fixup a list_head, needed when copying lists. If the pointers fall
* between s and e, apply the delta. This assumes that
* sizeof(struct list_head *) == sizeof(unsigned long *).
*/
static inline void fixup(unsigned long s, unsigned long e, int d,
struct list_head *l)
{
unsigned long *pp;
pp = (unsigned long *)&l->next;
if (*pp >= s && *pp < e)
*pp += d;
pp = (unsigned long *)&l->prev;
if (*pp >= s && *pp < e)
*pp += d;
}
/* Grow the allocated blocks */
static int grow(rh_info_t * info, int max_blocks)
{
rh_block_t *block, *blk;
int i, new_blocks;
int delta;
unsigned long blks, blke;
if (max_blocks <= info->max_blocks)
return -EINVAL;
new_blocks = max_blocks - info->max_blocks;
block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
if (block == NULL)
return -ENOMEM;
if (info->max_blocks > 0) {
/* copy old block area */
memcpy(block, info->block,
sizeof(rh_block_t) * info->max_blocks);
delta = (char *)block - (char *)info->block;
/* and fixup list pointers */
blks = (unsigned long)info->block;
blke = (unsigned long)(info->block + info->max_blocks);
for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
fixup(blks, blke, delta, &blk->list);
fixup(blks, blke, delta, &info->empty_list);
fixup(blks, blke, delta, &info->free_list);
fixup(blks, blke, delta, &info->taken_list);
/* free the old allocated memory */
if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
}
info->block = block;
info->empty_slots += new_blocks;
info->max_blocks = max_blocks;
info->flags &= ~RHIF_STATIC_BLOCK;
/* add all new blocks to the free list */
for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
return 0;
}
/*
* Assure at least the required amount of empty slots. If this function
* causes a grow in the block area then all pointers kept to the block
* area are invalid!
*/
static int assure_empty(rh_info_t * info, int slots)
{
int max_blocks;
/* This function is not meant to be used to grow uncontrollably */
if (slots >= 4)
return -EINVAL;
/* Enough space */
if (info->empty_slots >= slots)
return 0;
/* Next 16 sized block */
max_blocks = ((info->max_blocks + slots) + 15) & ~15;
return grow(info, max_blocks);
}
static rh_block_t *get_slot(rh_info_t * info)
{
rh_block_t *blk;
/* If no more free slots, and failure to extend. */
/* XXX: You should have called assure_empty before */
if (info->empty_slots == 0) {
printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
return NULL;
}
/* Get empty slot to use */
blk = list_entry(info->empty_list.next, rh_block_t, list);
list_del_init(&blk->list);
info->empty_slots--;
/* Initialize */
blk->start = NULL;
blk->size = 0;
blk->owner = NULL;
return blk;
}
static inline void release_slot(rh_info_t * info, rh_block_t * blk)
{
list_add(&blk->list, &info->empty_list);
info->empty_slots++;
}
static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
rh_block_t *before;
rh_block_t *after;
rh_block_t *next;
int size;
unsigned long s, e, bs, be;
struct list_head *l;
/* We assume that they are aligned properly */
size = blkn->size;
s = (unsigned long)blkn->start;
e = s + size;
/* Find the blocks immediately before and after the given one
* (if any) */
before = NULL;
after = NULL;
next = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
bs = (unsigned long)blk->start;
be = bs + blk->size;
if (next == NULL && s >= bs)
next = blk;
if (be == s)
before = blk;
if (e == bs)
after = blk;
/* If both are not null, break now */
if (before != NULL && after != NULL)
break;
}
/* Now check if they are really adjacent */
if (before != NULL && s != (unsigned long)before->start + before->size)
before = NULL;
if (after != NULL && e != (unsigned long)after->start)
after = NULL;
/* No coalescing; list insert and return */
if (before == NULL && after == NULL) {
if (next != NULL)
list_add(&blkn->list, &next->list);
else
list_add(&blkn->list, &info->free_list);
return;
}
/* We don't need it anymore */
release_slot(info, blkn);
/* Grow the before block */
if (before != NULL && after == NULL) {
before->size += size;
return;
}
/* Grow the after block backwards */
if (before == NULL && after != NULL) {
after->start = (int8_t *)after->start - size;
after->size += size;
return;
}
/* Grow the before block, and release the after block */
before->size += size + after->size;
list_del(&after->list);
release_slot(info, after);
}
static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
struct list_head *l;
/* Find the block immediately before the given one (if any) */
list_for_each(l, &info->taken_list) {
blk = list_entry(l, rh_block_t, list);
if (blk->start > blkn->start) {
list_add_tail(&blkn->list, &blk->list);
return;
}
}
list_add_tail(&blkn->list, &info->taken_list);
}
/*
* Create a remote heap dynamically. Note that no memory for the blocks
* are allocated. It will upon the first allocation
*/
rh_info_t *rh_create(unsigned int alignment)
{
rh_info_t *info;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return ERR_PTR(-EINVAL);
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
return ERR_PTR(-ENOMEM);
info->alignment = alignment;
/* Initially everything as empty */
info->block = NULL;
info->max_blocks = 0;
info->empty_slots = 0;
info->flags = 0;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
return info;
}
/*
* Destroy a dynamically created remote heap. Deallocate only if the areas
* are not static
*/
void rh_destroy(rh_info_t * info)
{
if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
kfree(info);
}
/*
* Initialize in place a remote heap info block. This is needed to support
* operation very early in the startup of the kernel, when it is not yet safe
* to call kmalloc.
*/
void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
rh_block_t * block)
{
int i;
rh_block_t *blk;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return;
info->alignment = alignment;
/* Initially everything as empty */
info->block = block;
info->max_blocks = max_blocks;
info->empty_slots = max_blocks;
info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
/* Add all new blocks to the free list */
for (i = 0, blk = block; i < max_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
}
/* Attach a free memory region, coalesces regions if adjuscent */
int rh_attach_region(rh_info_t * info, void *start, int size)
{
rh_block_t *blk;
unsigned long s, e, m;
int r;
/* The region must be aligned */
s = (unsigned long)start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
/* Take final values */
start = (void *)s;
size = (int)(e - s);
/* Grow the blocks, if needed */
r = assure_empty(info, 1);
if (r < 0)
return r;
blk = get_slot(info);
blk->start = start;
blk->size = size;
blk->owner = NULL;
attach_free_block(info, blk);
return 0;
}
/* Detatch given address range, splits free block if needed. */
void *rh_detach_region(rh_info_t * info, void *start, int size)
{
struct list_head *l;
rh_block_t *blk, *newblk;
unsigned long s, e, m, bs, be;
/* Validate size */
if (size <= 0)
return ERR_PTR(-EINVAL);
/* The region must be aligned */
s = (unsigned long)start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 1) < 0)
return ERR_PTR(-ENOMEM);
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = (unsigned long)blk->start;
be = (unsigned long)blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return ERR_PTR(-ENOMEM);
/* Perfect fit */
if (bs == s && be == e) {
/* Delete from free list, release slot */
list_del(&blk->list);
release_slot(info, blk);
return (void *)s;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start = (int8_t *)blk->start + size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* the back free fragment */
newblk = get_slot(info);
newblk->start = (void *)e;
newblk->size = be - e;
list_add(&newblk->list, &blk->list);
}
return (void *)s;
}
void *rh_alloc(rh_info_t * info, int size, const char *owner)
{
struct list_head *l;
rh_block_t *blk;
rh_block_t *newblk;
void *start;
/* Validate size */
if (size <= 0)
return ERR_PTR(-EINVAL);
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 1) < 0)
return ERR_PTR(-ENOMEM);
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
if (size <= blk->size)
break;
blk = NULL;
}
if (blk == NULL)
return ERR_PTR(-ENOMEM);
/* Just fits */
if (blk->size == size) {
/* Move from free list to taken list */
list_del(&blk->list);
blk->owner = owner;
start = blk->start;
attach_taken_block(info, blk);
return start;
}
newblk = get_slot(info);
newblk->start = blk->start;
newblk->size = size;
newblk->owner = owner;
/* blk still in free list, with updated start, size */
blk->start = (int8_t *)blk->start + size;
blk->size -= size;
start = newblk->start;
attach_taken_block(info, newblk);
return start;
}
/* allocate at precisely the given address */
void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
{
struct list_head *l;
rh_block_t *blk, *newblk1, *newblk2;
unsigned long s, e, m, bs, be;
/* Validate size */
if (size <= 0)
return ERR_PTR(-EINVAL);
/* The region must be aligned */
s = (unsigned long)start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 2) < 0)
return ERR_PTR(-ENOMEM);
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = (unsigned long)blk->start;
be = (unsigned long)blk->start + blk->size;
if (s >= bs && e <= be)
break;
}
if (blk == NULL)
return ERR_PTR(-ENOMEM);
/* Perfect fit */
if (bs == s && be == e) {
/* Move from free list to taken list */
list_del(&blk->list);
blk->owner = owner;
start = blk->start;
attach_taken_block(info, blk);
return start;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start = (int8_t *)blk->start + size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* The back free fragment */
newblk2 = get_slot(info);
newblk2->start = (void *)e;
newblk2->size = be - e;
list_add(&newblk2->list, &blk->list);
}
newblk1 = get_slot(info);
newblk1->start = (void *)s;
newblk1->size = e - s;
newblk1->owner = owner;
start = newblk1->start;
attach_taken_block(info, newblk1);
return start;
}
int rh_free(rh_info_t * info, void *start)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
/* Remove from taken list */
list_del(&blk->list);
/* Get size of freed block */
size = blk->size;
attach_free_block(info, blk);
return size;
}
int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
{
rh_block_t *blk;
struct list_head *l;
struct list_head *h;
int nr;
switch (what) {
case RHGS_FREE:
h = &info->free_list;
break;
case RHGS_TAKEN:
h = &info->taken_list;
break;
default:
return -EINVAL;
}
/* Linear search for block */
nr = 0;
list_for_each(l, h) {
blk = list_entry(l, rh_block_t, list);
if (stats != NULL && nr < max_stats) {
stats->start = blk->start;
stats->size = blk->size;
stats->owner = blk->owner;
stats++;
}
nr++;
}
return nr;
}
int rh_set_owner(rh_info_t * info, void *start, const char *owner)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
blk->owner = owner;
size = blk->size;
return size;
}
void rh_dump(rh_info_t * info)
{
static rh_stats_t st[32]; /* XXX maximum 32 blocks */
int maxnr;
int i, nr;
maxnr = sizeof(st) / sizeof(st[0]);
printk(KERN_INFO
"info @0x%p (%d slots empty / %d max)\n",
info, info->empty_slots, info->max_blocks);
printk(KERN_INFO " Free:\n");
nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%p-0x%p (%u)\n",
st[i].start, (int8_t *) st[i].start + st[i].size,
st[i].size);
printk(KERN_INFO "\n");
printk(KERN_INFO " Taken:\n");
nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%p-0x%p (%u) %s\n",
st[i].start, (int8_t *) st[i].start + st[i].size,
st[i].size, st[i].owner != NULL ? st[i].owner : "");
printk(KERN_INFO "\n");
}
void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
{
printk(KERN_INFO
"blk @0x%p: 0x%p-0x%p (%u)\n",
blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
}

141
arch/powerpc/lib/sstep.c Normal file
Просмотреть файл

@ -0,0 +1,141 @@
/*
* Single-step support.
*
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <asm/sstep.h>
#include <asm/processor.h>
extern char system_call_common[];
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffff
/*
* Determine whether a conditional branch instruction would branch.
*/
static int branch_taken(unsigned int instr, struct pt_regs *regs)
{
unsigned int bo = (instr >> 21) & 0x1f;
unsigned int bi;
if ((bo & 4) == 0) {
/* decrement counter */
--regs->ctr;
if (((bo >> 1) & 1) ^ (regs->ctr == 0))
return 0;
}
if ((bo & 0x10) == 0) {
/* check bit from CR */
bi = (instr >> 16) & 0x1f;
if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
return 0;
}
return 1;
}
/*
* Emulate instructions that cause a transfer of control.
* Returns 1 if the step was emulated, 0 if not,
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
int emulate_step(struct pt_regs *regs, unsigned int instr)
{
unsigned int opcode, rd;
unsigned long int imm;
opcode = instr >> 26;
switch (opcode) {
case 16: /* bc */
imm = (signed short)(instr & 0xfffc);
if ((instr & 2) == 0)
imm += regs->nip;
regs->nip += 4;
if ((regs->msr & MSR_SF) == 0)
regs->nip &= 0xffffffffUL;
if (instr & 1)
regs->link = regs->nip;
if (branch_taken(instr, regs))
regs->nip = imm;
return 1;
case 17: /* sc */
/*
* N.B. this uses knowledge about how the syscall
* entry code works. If that is changed, this will
* need to be changed also.
*/
regs->gpr[9] = regs->gpr[13];
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
regs->nip = (unsigned long) &system_call_common;
regs->msr = MSR_KERNEL;
return 1;
case 18: /* b */
imm = instr & 0x03fffffc;
if (imm & 0x02000000)
imm -= 0x04000000;
if ((instr & 2) == 0)
imm += regs->nip;
if (instr & 1) {
regs->link = regs->nip + 4;
if ((regs->msr & MSR_SF) == 0)
regs->link &= 0xffffffffUL;
}
if ((regs->msr & MSR_SF) == 0)
imm &= 0xffffffffUL;
regs->nip = imm;
return 1;
case 19:
switch (instr & 0x7fe) {
case 0x20: /* bclr */
case 0x420: /* bcctr */
imm = (instr & 0x400)? regs->ctr: regs->link;
regs->nip += 4;
if ((regs->msr & MSR_SF) == 0) {
regs->nip &= 0xffffffffUL;
imm &= 0xffffffffUL;
}
if (instr & 1)
regs->link = regs->nip;
if (branch_taken(instr, regs))
regs->nip = imm;
return 1;
case 0x24: /* rfid, scary */
return -1;
}
case 31:
rd = (instr >> 21) & 0x1f;
switch (instr & 0x7fe) {
case 0xa6: /* mfmsr */
regs->gpr[rd] = regs->msr & MSR_MASK;
regs->nip += 4;
if ((regs->msr & MSR_SF) == 0)
regs->nip &= 0xffffffffUL;
return 1;
case 0x164: /* mtmsrd */
/* only MSR_EE and MSR_RI get changed if bit 15 set */
/* mtmsrd doesn't change MSR_HV and MSR_ME */
imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
imm = (regs->msr & MSR_MASK & ~imm)
| (regs->gpr[rd] & imm);
if ((imm & MSR_RI) == 0)
/* can't step mtmsrd that would clear MSR_RI */
return -1;
regs->msr = imm;
regs->nip += 4;
if ((imm & MSR_SF) == 0)
regs->nip &= 0xffffffffUL;
return 1;
}
}
return 0;
}

Просмотреть файл

@ -0,0 +1,23 @@
#include <linux/ctype.h>
int strcasecmp(const char *s1, const char *s2)
{
int c1, c2;
do {
c1 = tolower(*s1++);
c2 = tolower(*s2++);
} while (c1 == c2 && c1 != 0);
return c1 - c2;
}
int strncasecmp(const char *s1, const char *s2, int n)
{
int c1, c2;
do {
c1 = tolower(*s1++);
c2 = tolower(*s2++);
} while ((--n > 0) && c1 == c2 && c1 != 0);
return c1 - c2;
}

203
arch/powerpc/lib/string.S Normal file
Просмотреть файл

@ -0,0 +1,203 @@
/*
* String handling functions for PowerPC.
*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
.stabs "string.S",N_SO,0,0,0f
0:
.section __ex_table,"a"
#ifdef CONFIG_PPC64
.align 3
#define EXTBL .llong
#else
.align 2
#define EXTBL .long
#endif
.text
_GLOBAL(strcpy)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r5)
bne 1b
blr
/* This clears out any unused part of the destination buffer,
just as the libc version does. -- paulus */
_GLOBAL(strncpy)
cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
bnelr /* if we didn't hit a null char, we're done */
mfctr r5
cmpwi 0,r5,0 /* any space left in destination buffer? */
beqlr /* we know r0 == 0 here */
2: stbu r0,1(r6) /* clear it out if so */
bdnz 2b
blr
_GLOBAL(strcat)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r5)
cmpwi 0,r0,0
bne 1b
addi r5,r5,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r5)
bne 1b
blr
_GLOBAL(strcmp)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
subf. r3,r0,r3
beqlr 1
beq 1b
blr
_GLOBAL(strlen)
addi r4,r3,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
bne 1b
subf r3,r3,r4
blr
_GLOBAL(memcmp)
cmpwi 0,r5,0
ble- 2f
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r6)
lbzu r0,1(r4)
subf. r3,r0,r3
bdnzt 2,1b
blr
2: li r3,0
blr
_GLOBAL(memchr)
cmpwi 0,r5,0
ble- 2f
mtctr r5
addi r3,r3,-1
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b
beqlr
2: li r3,0
blr
_GLOBAL(__clear_user)
addi r6,r3,-4
li r3,0
li r5,0
cmplwi 0,r4,4
blt 7f
/* clear a single word */
11: stwu r5,4(r6)
beqlr
/* clear word sized chunks */
andi. r0,r6,3
add r4,r0,r4
subf r6,r0,r6
srwi r0,r4,2
andi. r4,r4,3
mtctr r0
bdz 7f
1: stwu r5,4(r6)
bdnz 1b
/* clear byte sized chunks */
7: cmpwi 0,r4,0
beqlr
mtctr r4
addi r6,r6,3
8: stbu r5,1(r6)
bdnz 8b
blr
90: mr r3,r4
blr
91: mfctr r3
slwi r3,r3,2
add r3,r3,r4
blr
92: mfctr r3
blr
.section __ex_table,"a"
EXTBL 11b,90b
EXTBL 1b,91b
EXTBL 8b,92b
.text
_GLOBAL(__strncpy_from_user)
addi r6,r3,-1
addi r4,r4,-1
cmpwi 0,r5,0
beq 2f
mtctr r5
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
beq 3f
2: addi r6,r6,1
3: subf r3,r3,r6
blr
99: li r3,-EFAULT
blr
.section __ex_table,"a"
EXTBL 1b,99b
.text
/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
_GLOBAL(__strnlen_user)
addi r7,r3,-1
subf r6,r7,r5 /* top+1 - str */
cmplw 0,r4,r6
bge 0f
mr r6,r4
0: mtctr r6 /* ctr = min(len, top - str) */
1: lbzu r0,1(r7) /* get next byte */
cmpwi 0,r0,0
bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
addi r7,r7,1
subf r3,r3,r7 /* number of bytes we have looked at */
beqlr /* return if we found a 0 byte */
cmpw 0,r3,r4 /* did we look at all len bytes? */
blt 99f /* if not, must have hit top */
addi r3,r4,1 /* return len + 1 to indicate no null found */
blr
99: li r3,0 /* bad address, return 0 */
blr
.section __ex_table,"a"
EXTBL 1b,99b

Просмотреть файл

@ -0,0 +1,41 @@
/*
* Functions which are too large to be inlined.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/uaccess.h>
unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n)))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (likely(access_ok(VERIFY_WRITE, to, n)))
n = __copy_to_user(to, from, n);
return n;
}
unsigned long copy_in_user(void __user *to, const void __user *from,
unsigned long n)
{
might_sleep();
if (likely(access_ok(VERIFY_READ, from, n) &&
access_ok(VERIFY_WRITE, to, n)))
n =__copy_tofrom_user(to, from, n);
return n;
}
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(copy_in_user);

120
arch/powerpc/mm/44x_mmu.c Normal file
Просмотреть файл

@ -0,0 +1,120 @@
/*
* Modifications by Matt Porter (mporter@mvista.com) to support
* PPC44x Book E processors.
*
* This file contains the routines for initializing the MMU
* on the 4xx series of chips.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
#include <asm/setup.h>
#include "mmu_decl.h"
extern char etext[], _stext[];
/* Used by the 44x TLB replacement exception handler.
* Just needed it declared someplace.
*/
unsigned int tlb_44x_index = 0;
unsigned int tlb_44x_hwater = 62;
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem
*/
static void __init
ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
{
unsigned long attrib = 0;
__asm__ __volatile__("\
clrrwi %2,%2,10\n\
ori %2,%2,%4\n\
clrrwi %1,%1,10\n\
li %0,0\n\
ori %0,%0,%5\n\
tlbwe %2,%3,%6\n\
tlbwe %1,%3,%7\n\
tlbwe %0,%3,%8"
:
: "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
"i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
"i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
"i" (PPC44x_TLB_PAGEID),
"i" (PPC44x_TLB_XLAT),
"i" (PPC44x_TLB_ATTRIB));
}
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
void __init MMU_init_hw(void)
{
flush_instruction_cache();
}
unsigned long __init mmu_mapin_ram(void)
{
unsigned int pinned_tlbs = 1;
int i;
/* Determine number of entries necessary to cover lowmem */
pinned_tlbs = (unsigned int)
(_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
/* Write upper watermark to save location */
tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
/* If necessary, set additional pinned TLBs */
if (pinned_tlbs > 1)
for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
}
return total_lowmem;
}

141
arch/powerpc/mm/4xx_mmu.c Normal file
Просмотреть файл

@ -0,0 +1,141 @@
/*
* This file contains the routines for initializing the MMU
* on the 4xx series of chips.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
#include <asm/setup.h>
#include "mmu_decl.h"
extern int __map_without_ltlbs;
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
void __init MMU_init_hw(void)
{
/*
* The Zone Protection Register (ZPR) defines how protection will
* be applied to every page which is a member of a given zone. At
* present, we utilize only two of the 4xx's zones.
* The zone index bits (of ZSEL) in the PTE are used for software
* indicators, except the LSB. For user access, zone 1 is used,
* for kernel access, zone 0 is used. We set all but zone 1
* to zero, allowing only kernel access as indicated in the PTE.
* For zone 1, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
mtspr(SPRN_ZPR, 0x10000000);
flush_instruction_cache();
/*
* Set up the real-mode cache parameters for the exception vector
* handlers (which are run in real-mode).
*/
mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
/*
* Cache instruction and data space where the exception
* vectors and the kernel live in real-mode.
*/
mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */
mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */
}
#define LARGE_PAGE_SIZE_16M (1<<24)
#define LARGE_PAGE_SIZE_4M (1<<22)
unsigned long __init mmu_mapin_ram(void)
{
unsigned long v, s;
phys_addr_t p;
v = KERNELBASE;
p = PPC_MEMSTART;
s = 0;
if (__map_without_ltlbs) {
return s;
}
while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
spin_lock(&init_mm.page_table_lock);
pmdp = pmd_offset(pgd_offset_k(v), v);
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
spin_unlock(&init_mm.page_table_lock);
v += LARGE_PAGE_SIZE_16M;
p += LARGE_PAGE_SIZE_16M;
s += LARGE_PAGE_SIZE_16M;
}
while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
spin_lock(&init_mm.page_table_lock);
pmdp = pmd_offset(pgd_offset_k(v), v);
pmd_val(*pmdp) = val;
spin_unlock(&init_mm.page_table_lock);
v += LARGE_PAGE_SIZE_4M;
p += LARGE_PAGE_SIZE_4M;
s += LARGE_PAGE_SIZE_4M;
}
return s;
}

12
arch/powerpc/mm/Makefile Normal file
Просмотреть файл

@ -0,0 +1,12 @@
#
# Makefile for the linux ppc-specific parts of the memory manager.
#
obj-y := fault.o mem.o
obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \
mem_pieces.o tlb.o
obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o
obj-$(CONFIG_40x) += 4xx_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o

391
arch/powerpc/mm/fault.c Normal file
Просмотреть файл

@ -0,0 +1,391 @@
/*
* arch/ppc/mm/fault.c
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Modified by Cort Dougan and Paul Mackerras.
*
* Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/kdebug.h>
#include <asm/siginfo.h>
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
*/
static int store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int __user *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
return 0;
/* check major opcode */
switch (inst >> 26) {
case 37: /* stwu */
case 39: /* stbu */
case 45: /* sthu */
case 53: /* stfsu */
case 55: /* stfdu */
return 1;
case 62: /* std or stdu */
return (inst & 3) == 1;
case 31:
/* check minor opcode */
switch ((inst >> 1) & 0x3ff) {
case 181: /* stdux */
case 183: /* stwux */
case 247: /* stbux */
case 439: /* sthux */
case 695: /* stfsux */
case 759: /* stfdux */
return 1;
}
}
return 0;
}
static void do_dabr(struct pt_regs *regs, unsigned long error_code)
{
siginfo_t info;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
if (debugger_dabr_match(regs))
return;
/* Clear the DABR */
set_dabr(0);
/* Deliver the signal to userspace */
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_HWBKPT;
info.si_addr = (void __user *)regs->nip;
force_sig_info(SIGTRAP, &info, current);
}
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault. For 400-family processors
* the error_code parameter is ESR for a data fault, 0 for an instruction
* fault.
* For 64-bit processors, the error_code parameter is
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
int is_write = 0;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
* Fortunately the bit assignments in SRR1 for an instruction
* fault and DSISR for a data fault are mostly the same for the
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
error_code &= 0x48200000;
else
is_write = error_code & DSISR_ISSTORE;
#else
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return 0;
if (trap == 0x300) {
if (debugger_fault_handler(regs))
return 0;
}
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, error_code);
return 0;
}
#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with"
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunatly, in the case of an
* erroneous fault occuring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibilty of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
/*
* N.B. The POWER/Open ABI allows programs to access up to
* 288 bytes below the stack pointer.
* The kernel signal delivery code writes up to about 1.5kB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
goto bad_area;
/*
* A user-mode access to an address a long way below
* the stack pointer is only valid if the instruction
* is one which would update the stack pointer to the
* address accessed if the instruction completed,
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
* (or the byte, halfword, float or double forms).
*
* If we don't check this then any write to the area
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
if (address + 2048 < uregs->gpr[1]
&& (!user_mode(regs) || !store_updates_sp(regs)))
goto bad_area;
}
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
#if defined(CONFIG_6xx)
if (error_code & 0x95700000)
/* an error such as lwarx to I/O controller space,
address matching DABR, eciwx, etc. */
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
if (error_code & 0x10000000)
/* Guarded storage error. */
goto bad_area;
#endif /* CONFIG_8xx */
if (is_exec) {
#ifdef CONFIG_PPC64
/* protection fault */
if (error_code & DSISR_PROTFAULT)
goto bad_area;
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
#endif
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
pte_t *ptep;
/* Since 4xx/Book-E supports per-page execute permission,
* we lazily flush dcache to icache. */
ptep = NULL;
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
struct page *page = pte_page(*ptep);
if (! test_bit(PG_arch_1, &page->flags)) {
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
_tlbie(address);
pte_unmap(ptep);
up_read(&mm->mmap_sem);
return 0;
}
if (ptep != NULL)
pte_unmap(ptep);
#endif
/* a write */
} else if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
survive:
switch (handle_mm_fault(mm, vma, address, is_write)) {
case VM_FAULT_MINOR:
current->min_flt++;
break;
case VM_FAULT_MAJOR:
current->maj_flt++;
break;
case VM_FAULT_SIGBUS:
goto do_sigbus;
case VM_FAULT_OOM:
goto out_of_memory;
default:
BUG();
}
up_read(&mm->mmap_sem);
return 0;
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
return 0;
}
if (is_exec && (error_code & DSISR_PROTFAULT)
&& printk_ratelimit())
printk(KERN_CRIT "kernel tried to execute NX-protected"
" page (%lx) - exploit attempt? (uid: %d)\n",
address, current->uid);
return SIGSEGV;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (current->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
return SIGKILL;
do_sigbus:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
return 0;
}
return SIGBUS;
}
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = entry->fixup;
return;
}
/* kernel has accessed a bad area */
die("Kernel access of bad area", regs, sig);
}

Просмотреть файл

@ -0,0 +1,237 @@
/*
* Modifications by Kumar Gala (kumar.gala@freescale.com) to support
* E500 Book E processors.
*
* Copyright 2004 Freescale Semiconductor, Inc
*
* This file contains the routines for initializing the MMU
* on the 4xx series of chips.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
#include <asm/setup.h>
extern void loadcam_entry(unsigned int index);
unsigned int tlbcam_index;
unsigned int num_tlbcam_entries;
static unsigned long __cam0, __cam1, __cam2;
extern unsigned long total_lowmem;
extern unsigned long __max_low_memory;
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
#define NUM_TLBCAMS (16)
struct tlbcam {
u32 MAS0;
u32 MAS1;
u32 MAS2;
u32 MAS3;
u32 MAS7;
} TLBCAM[NUM_TLBCAMS];
struct tlbcamrange {
unsigned long start;
unsigned long limit;
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
extern unsigned int tlbcam_index;
/*
* Return PA for this VA if it is mapped by a CAM, or 0
*/
unsigned long v_mapped_by_tlbcam(unsigned long va)
{
int b;
for (b = 0; b < tlbcam_index; ++b)
if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
return 0;
}
/*
* Return VA for a given PA or 0 if not mapped
*/
unsigned long p_mapped_by_tlbcam(unsigned long pa)
{
int b;
for (b = 0; b < tlbcam_index; ++b)
if (pa >= tlbcam_addrs[b].phys
&& pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+tlbcam_addrs[b].phys)
return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
return 0;
}
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 4 between 4k and 256M.
*/
void settlbcam(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, int flags, unsigned int pid)
{
unsigned int tsize, lz;
asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
tsize = (21 - lz) / 2;
#ifdef CONFIG_SMP
if ((flags & _PAGE_NO_CACHE) == 0)
flags |= _PAGE_COHERENT;
#endif
TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
TLBCAM[index].MAS2 = virt & PAGE_MASK;
TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
#ifndef CONFIG_KGDB /* want user access for breakpoints */
if (flags & _PAGE_USER) {
TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
}
#else
TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
#endif
tlbcam_addrs[index].start = virt;
tlbcam_addrs[index].limit = virt + size - 1;
tlbcam_addrs[index].phys = phys;
loadcam_entry(index);
}
void invalidate_tlbcam_entry(int index)
{
TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
TLBCAM[index].MAS1 = ~MAS1_VALID;
loadcam_entry(index);
}
void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
unsigned long cam2)
{
settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
tlbcam_index++;
if (cam1) {
tlbcam_index++;
settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
}
if (cam2) {
tlbcam_index++;
settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
}
}
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
void __init MMU_init_hw(void)
{
flush_instruction_cache();
}
unsigned long __init mmu_mapin_ram(void)
{
cam_mapin_ram(__cam0, __cam1, __cam2);
return __cam0 + __cam1 + __cam2;
}
void __init
adjust_total_lowmem(void)
{
unsigned long max_low_mem = MAX_LOW_MEM;
unsigned long cam_max = 0x10000000;
unsigned long ram;
/* adjust CAM size to max_low_mem */
if (max_low_mem < cam_max)
cam_max = max_low_mem;
/* adjust lowmem size to max_low_mem */
if (max_low_mem < total_lowmem)
ram = max_low_mem;
else
ram = total_lowmem;
/* Calculate CAM values */
__cam0 = 1UL << 2 * (__ilog2(ram) / 2);
if (__cam0 > cam_max)
__cam0 = cam_max;
ram -= __cam0;
if (ram) {
__cam1 = 1UL << 2 * (__ilog2(ram) / 2);
if (__cam1 > cam_max)
__cam1 = cam_max;
ram -= __cam1;
}
if (ram) {
__cam2 = 1UL << 2 * (__ilog2(ram) / 2);
if (__cam2 > cam_max)
__cam2 = cam_max;
ram -= __cam2;
}
printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
" CAM2=%ldMb residual: %ldMb\n",
__cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
(total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
__max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
}

618
arch/powerpc/mm/hash_32.S Normal file
Просмотреть файл

@ -0,0 +1,618 @@
/*
* arch/ppc/kernel/hashtable.S
*
* $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Adapted for Power Macintosh by Paul Mackerras.
* Low-level exception handlers and MMU support
* rewritten by Paul Mackerras.
* Copyright (C) 1996 Paul Mackerras.
*
* This file contains low-level assembler routines for managing
* the PowerPC MMU hash table. (PPC 8xx processors don't use a
* hash table, so this file is not used on them.)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_SMP
.comm mmu_hash_lock,4
#endif /* CONFIG_SMP */
/*
* Sync CPUs with hash_page taking & releasing the hash
* table lock
*/
#ifdef CONFIG_SMP
.text
_GLOBAL(hash_page_sync)
lis r8,mmu_hash_lock@h
ori r8,r8,mmu_hash_lock@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
cmpwi 0,r6,0
bne 11b
10: lwarx r6,0,r8
cmpwi 0,r6,0
bne- 11b
stwcx. r0,0,r8
bne- 10b
isync
eieio
li r0,0
stw r0,0(r8)
blr
#endif
/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x400) if a write.
* r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG3 contains the physical address of the current task's thread.
*
* Returns to the caller if the access is illegal or there is no
* mapping for the address. Otherwise it places an appropriate PTE
* in the hash table and returns from the exception.
* Uses r0, r3 - r8, ctr, lr.
*/
.text
_GLOBAL(hash_page)
#ifdef CONFIG_PPC64BRIDGE
mfmsr r0
clrldi r0,r0,1 /* make sure it's in 32-bit mode */
MTMSRD(r0)
isync
#endif
tophys(r7,0) /* gets -KERNELBASE into r7 */
#ifdef CONFIG_SMP
addis r8,r7,mmu_hash_lock@h
ori r8,r8,mmu_hash_lock@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
cmpwi 0,r6,0
bne 11b
10: lwarx r6,0,r8
cmpwi 0,r6,0
bne- 11b
stwcx. r0,0,r8
bne- 10b
isync
#endif
/* Get PTE (linux-style) and check access */
lis r0,KERNELBASE@h /* check if kernel address */
cmplw 0,r4,r0
mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
lis r5,swapper_pg_dir@ha /* if kernel address, use */
addi r5,r5,swapper_pg_dir@l /* kernel page table */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112: add r5,r5,r7 /* convert to phys addr */
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
lwz r8,0(r5) /* get pmd entry */
rlwinm. r8,r8,0,0,19 /* extract address of pte page */
#ifdef CONFIG_SMP
beq- hash_page_out /* return if no mapping */
#else
/* XXX it seems like the 601 will give a machine fault on the
rfi if its alignment is wrong (bottom 4 bits of address are
8 or 0xc) and we have had a not-taken conditional branch
to the address following the rfi. */
beqlr-
#endif
rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
/*
* Update the linux PTE atomically. We do the lwarx up-front
* because almost always, there won't be a permission violation
* and there won't already be an HPTE, and thus we will have
* to update the PTE to set _PAGE_HASHPTE. -- paulus.
*/
retry:
lwarx r6,0,r8 /* get linux-style pte */
andc. r5,r3,r6 /* check access & ~permission */
#ifdef CONFIG_SMP
bne- hash_page_out /* return if access not permitted */
#else
bnelr-
#endif
or r5,r0,r6 /* set accessed/dirty bits */
stwcx. r5,0,r8 /* attempt to update PTE */
bne- retry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
mfctr r0
stw r0,_CTR(r11)
bl create_hpte /* add the hash table entry */
#ifdef CONFIG_SMP
eieio
addis r8,r7,mmu_hash_lock@ha
li r0,0
stw r0,mmu_hash_lock@l(r8)
#endif
/* Return from the exception */
lwz r5,_CTR(r11)
mtctr r5
lwz r0,GPR0(r11)
lwz r7,GPR7(r11)
lwz r8,GPR8(r11)
b fast_exception_return
#ifdef CONFIG_SMP
hash_page_out:
eieio
addis r8,r7,mmu_hash_lock@ha
li r0,0
stw r0,mmu_hash_lock@l(r8)
blr
#endif /* CONFIG_SMP */
/*
* Add an entry for a particular page to the hash table.
*
* add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
*
* We assume any necessary modifications to the pte (e.g. setting
* the accessed bit) have already been done and that there is actually
* a hash table in use (i.e. we're not on a 603).
*/
_GLOBAL(add_hash_page)
mflr r0
stw r0,4(r1)
/* Convert context and va to VSID */
mulli r3,r3,897*16 /* multiply context by context skew */
rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
mulli r0,r0,0x111 /* multiply by ESID skew */
add r3,r3,r0 /* note create_hpte trims to 24 bits */
#ifdef CONFIG_SMP
rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
oris r8,r8,12
#endif /* CONFIG_SMP */
/*
* We disable interrupts here, even on UP, because we don't
* want to race with hash_page, and because we want the
* _PAGE_HASHPTE bit to be a reliable indication of whether
* the HPTE exists (or at least whether one did once).
* We also turn off the MMU for data accesses so that we
* we can't take a hash table miss (assuming the code is
* covered by a BAT). -- paulus
*/
mfmsr r10
SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
mtmsr r0
SYNC_601
isync
tophys(r7,0)
#ifdef CONFIG_SMP
addis r9,r7,mmu_hash_lock@ha
addi r9,r9,mmu_hash_lock@l
10: lwarx r0,0,r9 /* take the mmu_hash_lock */
cmpi 0,r0,0
bne- 11f
stwcx. r8,0,r9
beq+ 12f
11: lwz r0,0(r9)
cmpi 0,r0,0
beq 10b
b 11b
12: isync
#endif
/*
* Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
* If _PAGE_HASHPTE was already set, we don't replace the existing
* HPTE, so we just unlock and return.
*/
mr r8,r5
rlwimi r8,r4,22,20,29
1: lwarx r6,0,r8
andi. r0,r6,_PAGE_HASHPTE
bne 9f /* if HASHPTE already set, done */
ori r5,r6,_PAGE_HASHPTE
stwcx. r5,0,r8
bne- 1b
bl create_hpte
9:
#ifdef CONFIG_SMP
eieio
li r0,0
stw r0,0(r9) /* clear mmu_hash_lock */
#endif
/* reenable interrupts and DR */
mtmsr r10
SYNC_601
isync
lwz r0,4(r1)
mtlr r0
blr
/*
* This routine adds a hardware PTE to the hash table.
* It is designed to be called with the MMU either on or off.
* r3 contains the VSID, r4 contains the virtual address,
* r5 contains the linux PTE, r6 contains the old value of the
* linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
* offset to be added to addresses (0 if the MMU is on,
* -KERNELBASE if it is off).
* On SMP, the caller should have the mmu_hash_lock held.
* We assume that the caller has (or will) set the _PAGE_HASHPTE
* bit in the linux PTE in memory. The value passed in r6 should
* be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
* this routine will skip the search for an existing HPTE.
* This procedure modifies r0, r3 - r6, r8, cr0.
* -- paulus.
*
* For speed, 4 of the instructions get patched once the size and
* physical address of the hash table are known. These definitions
* of Hash_base and Hash_bits below are just an example.
*/
Hash_base = 0xc0180000
Hash_bits = 12 /* e.g. 256kB hash table */
Hash_msk = (((1 << Hash_bits) - 1) * 64)
#ifndef CONFIG_PPC64BRIDGE
/* defines for the PTE format for 32-bit PPCs */
#define PTE_SIZE 8
#define PTEG_SIZE 64
#define LG_PTEG_SIZE 6
#define LDPTEu lwzu
#define STPTE stw
#define CMPPTE cmpw
#define PTE_H 0x40
#define PTE_V 0x80000000
#define TST_V(r) rlwinm. r,r,0,0,0
#define SET_V(r) oris r,r,PTE_V@h
#define CLR_V(r,t) rlwinm r,r,0,1,31
#else
/* defines for the PTE format for 64-bit PPCs */
#define PTE_SIZE 16
#define PTEG_SIZE 128
#define LG_PTEG_SIZE 7
#define LDPTEu ldu
#define STPTE std
#define CMPPTE cmpd
#define PTE_H 2
#define PTE_V 1
#define TST_V(r) andi. r,r,PTE_V
#define SET_V(r) ori r,r,PTE_V
#define CLR_V(r,t) li t,PTE_V; andc r,r,t
#endif /* CONFIG_PPC64BRIDGE */
#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
#define HASH_RIGHT 31-LG_PTEG_SIZE
_GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
and r8,r8,r0 /* writable if _RW & _DIRTY */
rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r8,r8,0xe14 /* clear out reserved bits and M */
andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
BEGIN_FTR_SECTION
ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
/* Construct the high word of the PPC-style PTE (r5) */
#ifndef CONFIG_PPC64BRIDGE
rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
#else /* CONFIG_PPC64BRIDGE */
clrlwi r3,r3,8 /* reduce vsid to 24 bits */
sldi r5,r3,12 /* shift vsid into position */
rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
#endif /* CONFIG_PPC64BRIDGE */
SET_V(r5) /* set V (valid) bit */
/* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(hash_page_patch_A)
addis r0,r7,Hash_base@h /* base address of hash table */
rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r0 /* make primary hash */
li r0,8 /* PTEs/group */
/*
* Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
* if it is clear, meaning that the HPTE isn't there already...
*/
andi. r6,r6,_PAGE_HASHPTE
beq+ 10f /* no PTE: go look for an empty slot */
tlbie r4
addis r4,r7,htab_hash_searches@ha
lwz r6,htab_hash_searches@l(r4)
addi r6,r6,1 /* count how many searches we do */
stw r6,htab_hash_searches@l(r4)
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r0
addi r4,r3,-PTE_SIZE
1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
CMPPTE 0,r6,r5
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_slot
/* Search the secondary PTEG for a matching PTE */
ori r5,r5,PTE_H /* set H (secondary hash) bit */
_GLOBAL(hash_page_patch_B)
xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xori r4,r4,(-PTEG_SIZE & 0xffff)
addi r4,r4,-PTE_SIZE
mtctr r0
2: LDPTEu r6,PTE_SIZE(r4)
CMPPTE 0,r6,r5
bdnzf 2,2b
beq+ found_slot
xori r5,r5,PTE_H /* clear H bit again */
/* Search the primary PTEG for an empty slot */
10: mtctr r0
addi r4,r3,-PTE_SIZE /* search primary PTEG */
1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
TST_V(r6) /* test valid bit */
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_empty
/* update counter of times that the primary PTEG is full */
addis r4,r7,primary_pteg_full@ha
lwz r6,primary_pteg_full@l(r4)
addi r6,r6,1
stw r6,primary_pteg_full@l(r4)
/* Search the secondary PTEG for an empty slot */
ori r5,r5,PTE_H /* set H (secondary hash) bit */
_GLOBAL(hash_page_patch_C)
xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xori r4,r4,(-PTEG_SIZE & 0xffff)
addi r4,r4,-PTE_SIZE
mtctr r0
2: LDPTEu r6,PTE_SIZE(r4)
TST_V(r6)
bdnzf 2,2b
beq+ found_empty
xori r5,r5,PTE_H /* clear H bit again */
/*
* Choose an arbitrary slot in the primary PTEG to overwrite.
* Since both the primary and secondary PTEGs are full, and we
* have no information that the PTEs in the primary PTEG are
* more important or useful than those in the secondary PTEG,
* and we know there is a definite (although small) speed
* advantage to putting the PTE in the primary PTEG, we always
* put the PTE in the primary PTEG.
*/
addis r4,r7,next_slot@ha
lwz r6,next_slot@l(r4)
addi r6,r6,PTE_SIZE
andi. r6,r6,7*PTE_SIZE
stw r6,next_slot@l(r4)
add r4,r3,r6
#ifndef CONFIG_SMP
/* Store PTE in PTEG */
found_empty:
STPTE r5,0(r4)
found_slot:
STPTE r8,PTE_SIZE/2(r4)
#else /* CONFIG_SMP */
/*
* Between the tlbie above and updating the hash table entry below,
* another CPU could read the hash table entry and put it in its TLB.
* There are 3 cases:
* 1. using an empty slot
* 2. updating an earlier entry to change permissions (i.e. enable write)
* 3. taking over the PTE for an unrelated address
*
* In each case it doesn't really matter if the other CPUs have the old
* PTE in their TLB. So we don't need to bother with another tlbie here,
* which is convenient as we've overwritten the register that had the
* address. :-) The tlbie above is mainly to make sure that this CPU comes
* and gets the new PTE from the hash table.
*
* We do however have to make sure that the PTE is never in an invalid
* state with the V bit set.
*/
found_empty:
found_slot:
CLR_V(r5,r0) /* clear V (valid) bit in PTE */
STPTE r5,0(r4)
sync
TLBSYNC
STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
sync
SET_V(r5)
STPTE r5,0(r4) /* finally set V bit in PTE */
#endif /* CONFIG_SMP */
sync /* make sure pte updates get to memory */
blr
.comm next_slot,4
.comm primary_pteg_full,4
.comm htab_hash_searches,4
/*
* Flush the entry for a particular page from the hash table.
*
* flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
* int count)
*
* We assume that there is a hash table in use (Hash != 0).
*/
_GLOBAL(flush_hash_pages)
tophys(r7,0)
/*
* We disable interrupts here, even on UP, because we want
* the _PAGE_HASHPTE bit to be a reliable indication of
* whether the HPTE exists (or at least whether one did once).
* We also turn off the MMU for data accesses so that we
* we can't take a hash table miss (assuming the code is
* covered by a BAT). -- paulus
*/
mfmsr r10
SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
mtmsr r0
SYNC_601
isync
/* First find a PTE in the range that has _PAGE_HASHPTE set */
rlwimi r5,r4,22,20,29
1: lwz r0,0(r5)
cmpwi cr1,r6,1
andi. r0,r0,_PAGE_HASHPTE
bne 2f
ble cr1,19f
addi r4,r4,0x1000
addi r5,r5,4
addi r6,r6,-1
b 1b
/* Convert context and va to VSID */
2: mulli r3,r3,897*16 /* multiply context by context skew */
rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
mulli r0,r0,0x111 /* multiply by ESID skew */
add r3,r3,r0 /* note code below trims to 24 bits */
/* Construct the high word of the PPC-style PTE (r11) */
#ifndef CONFIG_PPC64BRIDGE
rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
#else /* CONFIG_PPC64BRIDGE */
clrlwi r3,r3,8 /* reduce vsid to 24 bits */
sldi r11,r3,12 /* shift vsid into position */
rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
#endif /* CONFIG_PPC64BRIDGE */
SET_V(r11) /* set V (valid) bit */
#ifdef CONFIG_SMP
addis r9,r7,mmu_hash_lock@ha
addi r9,r9,mmu_hash_lock@l
rlwinm r8,r1,0,0,18
add r8,r8,r7
lwz r8,TI_CPU(r8)
oris r8,r8,9
10: lwarx r0,0,r9
cmpi 0,r0,0
bne- 11f
stwcx. r8,0,r9
beq+ 12f
11: lwz r0,0(r9)
cmpi 0,r0,0
beq 10b
b 11b
12: isync
#endif
/*
* Check the _PAGE_HASHPTE bit in the linux PTE. If it is
* already clear, we're done (for this pte). If not,
* clear it (atomically) and proceed. -- paulus.
*/
33: lwarx r8,0,r5 /* fetch the pte */
andi. r0,r8,_PAGE_HASHPTE
beq 8f /* done if HASHPTE is already clear */
rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */
bne- 33b
/* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(flush_hash_patch_A)
addis r8,r7,Hash_base@h /* base address of hash table */
rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r8,r0,r8 /* make primary hash */
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
li r0,8 /* PTEs/group */
mtctr r0
addi r12,r8,-PTE_SIZE
1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
CMPPTE 0,r0,r11
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ 3f
/* Search the secondary PTEG for a matching PTE */
ori r11,r11,PTE_H /* set H (secondary hash) bit */
li r0,8 /* PTEs/group */
_GLOBAL(flush_hash_patch_B)
xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
xori r12,r12,(-PTEG_SIZE & 0xffff)
addi r12,r12,-PTE_SIZE
mtctr r0
2: LDPTEu r0,PTE_SIZE(r12)
CMPPTE 0,r0,r11
bdnzf 2,2b
xori r11,r11,PTE_H /* clear H again */
bne- 4f /* should rarely fail to find it */
3: li r0,0
STPTE r0,0(r12) /* invalidate entry */
4: sync
tlbie r4 /* in hw tlb too */
sync
8: ble cr1,9f /* if all ptes checked */
81: addi r6,r6,-1
addi r5,r5,4 /* advance to next pte */
addi r4,r4,0x1000
lwz r0,0(r5) /* check next pte */
cmpwi cr1,r6,1
andi. r0,r0,_PAGE_HASHPTE
bne 33b
bgt cr1,81b
9:
#ifdef CONFIG_SMP
TLBSYNC
li r0,0
stw r0,0(r9) /* clear mmu_hash_lock */
#endif
19: mtmsr r10
SYNC_601
isync
blr

581
arch/powerpc/mm/init.c Normal file
Просмотреть файл

@ -0,0 +1,581 @@
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
#include <asm/bootinfo.h>
#include <asm/prom.h>
#include "mem_pieces.h"
#include "mmu_decl.h"
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
#endif
#endif
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long total_memory;
unsigned long total_lowmem;
unsigned long ppc_memstart;
unsigned long ppc_memoffset = PAGE_OFFSET;
int mem_init_done;
int init_bootmem_done;
int boot_mapsize;
#ifdef CONFIG_PPC_PMAC
unsigned long agp_special_page;
#endif
extern char _end[];
extern char etext[], _stext[];
extern char __init_begin, __init_end;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
EXPORT_SYMBOL(kmap_pte);
#endif
void MMU_init(void);
void set_phys_avail(unsigned long total_ram);
/* XXX should be in current.h -- paulus */
extern struct task_struct *current_set[NR_CPUS];
char *klimit = _end;
struct mem_pieces phys_avail;
struct device_node *memory_node;
/*
* this tells the system to map all of ram with the segregs
* (i.e. page tables) instead of the bats.
* -- Cort
*/
int __map_without_bats;
int __map_without_ltlbs;
/* max amount of RAM to use */
unsigned long __max_memory;
/* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM;
/*
* Read in a property describing some pieces of memory.
*/
static int __init get_mem_prop(char *name, struct mem_pieces *mp)
{
struct reg_property *rp;
int i, s;
unsigned int *ip;
int nac = prom_n_addr_cells(memory_node);
int nsc = prom_n_size_cells(memory_node);
ip = (unsigned int *) get_property(memory_node, name, &s);
if (ip == NULL) {
printk(KERN_ERR "error: couldn't get %s property on /memory\n",
name);
return 0;
}
s /= (nsc + nac) * 4;
rp = mp->regions;
for (i = 0; i < s; ++i, ip += nac+nsc) {
if (nac >= 2 && ip[nac-2] != 0)
continue;
rp->address = ip[nac-1];
if (nsc >= 2 && ip[nac+nsc-2] != 0)
rp->size = ~0U;
else
rp->size = ip[nac+nsc-1];
++rp;
}
mp->n_regions = rp - mp->regions;
/* Make sure the pieces are sorted. */
mem_pieces_sort(mp);
mem_pieces_coalesce(mp);
return 1;
}
/*
* Collect information about physical RAM and which pieces are
* already in use from the device tree.
*/
unsigned long __init find_end_of_memory(void)
{
unsigned long a, total;
struct mem_pieces phys_mem;
/*
* Find out where physical memory is, and check that it
* starts at 0 and is contiguous. It seems that RAM is
* always physically contiguous on Power Macintoshes.
*
* Supporting discontiguous physical memory isn't hard,
* it just makes the virtual <-> physical mapping functions
* more complicated (or else you end up wasting space
* in mem_map).
*/
memory_node = find_devices("memory");
if (memory_node == NULL || !get_mem_prop("reg", &phys_mem)
|| phys_mem.n_regions == 0)
panic("No RAM??");
a = phys_mem.regions[0].address;
if (a != 0)
panic("RAM doesn't start at physical address 0");
total = phys_mem.regions[0].size;
if (phys_mem.n_regions > 1) {
printk("RAM starting at 0x%x is not contiguous\n",
phys_mem.regions[1].address);
printk("Using RAM from 0 to 0x%lx\n", total-1);
}
return total;
}
/*
* Check for command-line options that affect what MMU_init will do.
*/
void MMU_setup(void)
{
/* Check for nobats option (used in mapin_ram). */
if (strstr(cmd_line, "nobats")) {
__map_without_bats = 1;
}
if (strstr(cmd_line, "noltlbs")) {
__map_without_ltlbs = 1;
}
/* Look for mem= option on command line */
if (strstr(cmd_line, "mem=")) {
char *p, *q;
unsigned long maxmem = 0;
for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
q = p + 4;
if (p > cmd_line && p[-1] != ' ')
continue;
maxmem = simple_strtoul(q, &q, 0);
if (*q == 'k' || *q == 'K') {
maxmem <<= 10;
++q;
} else if (*q == 'm' || *q == 'M') {
maxmem <<= 20;
++q;
}
}
__max_memory = maxmem;
}
}
/*
* MMU_init sets up the basic memory mappings for the kernel,
* including both RAM and possibly some I/O regions,
* and sets up the page tables and the MMU hardware ready to go.
*/
void __init MMU_init(void)
{
if (ppc_md.progress)
ppc_md.progress("MMU:enter", 0x111);
/* parse args from command line */
MMU_setup();
/*
* Figure out how much memory we have, how much
* is lowmem, and how much is highmem. If we were
* passed the total memory size from the bootloader,
* just use it.
*/
if (boot_mem_size)
total_memory = boot_mem_size;
else
total_memory = ppc_md.find_end_of_memory();
if (__max_memory && total_memory > __max_memory)
total_memory = __max_memory;
total_lowmem = total_memory;
#ifdef CONFIG_FSL_BOOKE
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
* entries, so we need to adjust lowmem to match the amount we can map
* in the fixed entries */
adjust_total_lowmem();
#endif /* CONFIG_FSL_BOOKE */
if (total_lowmem > __max_low_memory) {
total_lowmem = __max_low_memory;
#ifndef CONFIG_HIGHMEM
total_memory = total_lowmem;
#endif /* CONFIG_HIGHMEM */
}
set_phys_avail(total_lowmem);
/* Initialize the MMU hardware */
if (ppc_md.progress)
ppc_md.progress("MMU:hw init", 0x300);
MMU_init_hw();
/* Map in all of RAM starting at KERNELBASE */
if (ppc_md.progress)
ppc_md.progress("MMU:mapin", 0x301);
mapin_ram();
#ifdef CONFIG_HIGHMEM
ioremap_base = PKMAP_BASE;
#else
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM */
ioremap_bot = ioremap_base;
/* Map in I/O resources */
if (ppc_md.progress)
ppc_md.progress("MMU:setio", 0x302);
if (ppc_md.setup_io_mappings)
ppc_md.setup_io_mappings();
/* Initialize the context management stuff */
mmu_context_init();
if (ppc_md.progress)
ppc_md.progress("MMU:exit", 0x211);
#ifdef CONFIG_BOOTX_TEXT
/* By default, we are no longer mapped */
boot_text_mapped = 0;
/* Must be done last, or ppc_md.progress will die. */
map_boot_text();
#endif
}
/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
void *p;
if (init_bootmem_done) {
p = alloc_bootmem_pages(PAGE_SIZE);
} else {
p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE);
}
return p;
}
/* Free up now-unused memory */
static void free_sec(unsigned long start, unsigned long end, const char *name)
{
unsigned long cnt = 0;
while (start < end) {
ClearPageReserved(virt_to_page(start));
set_page_count(virt_to_page(start), 1);
free_page(start);
cnt++;
start += PAGE_SIZE;
}
if (cnt) {
printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
totalram_pages += cnt;
}
}
void free_initmem(void)
{
#define FREESEC(TYPE) \
free_sec((unsigned long)(&__ ## TYPE ## _begin), \
(unsigned long)(&__ ## TYPE ## _end), \
#TYPE);
printk ("Freeing unused kernel memory:");
FREESEC(init);
printk("\n");
ppc_md.progress = NULL;
#undef FREESEC
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
}
#endif
/*
* Initialize the bootmem system and give it all the memory we
* have available.
*/
void __init do_init_bootmem(void)
{
unsigned long start, size;
int i;
/*
* Find an area to use for the bootmem bitmap.
* We look for the first area which is at least
* 128kB in length (128kB is enough for a bitmap
* for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned).
*/
start = 0;
size = 0;
for (i = 0; i < phys_avail.n_regions; ++i) {
unsigned long a = phys_avail.regions[i].address;
unsigned long s = phys_avail.regions[i].size;
if (s <= size)
continue;
start = a;
size = s;
if (s >= 33 * PAGE_SIZE)
break;
}
start = PAGE_ALIGN(start);
min_low_pfn = start >> PAGE_SHIFT;
max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT;
max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT;
boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn,
PPC_MEMSTART >> PAGE_SHIFT,
max_low_pfn);
/* remove the bootmem bitmap from the available memory */
mem_pieces_remove(&phys_avail, start, boot_mapsize, 1);
/* add everything in phys_avail into the bootmem map */
for (i = 0; i < phys_avail.n_regions; ++i)
free_bootmem(phys_avail.regions[i].address,
phys_avail.regions[i].size);
init_bootmem_done = 1;
}
/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES], i;
#ifdef CONFIG_HIGHMEM
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
/*
* All pages are DMA-able so we put them all in the DMA zone.
*/
zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
for (i = 1; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
#endif /* CONFIG_HIGHMEM */
free_area_init(zones_size);
}
void __init mem_init(void)
{
unsigned long addr;
int codepages = 0;
int datapages = 0;
int initpages = 0;
#ifdef CONFIG_HIGHMEM
unsigned long highmem_mapnr;
highmem_mapnr = total_lowmem >> PAGE_SHIFT;
#endif /* CONFIG_HIGHMEM */
max_mapnr = total_memory >> PAGE_SHIFT;
high_memory = (void *) __va(PPC_MEMSTART + total_lowmem);
num_physpages = max_mapnr; /* RAM is assumed contiguous */
totalram_pages += free_all_bootmem();
#ifdef CONFIG_BLK_DEV_INITRD
/* if we are booted from BootX with an initial ramdisk,
make sure the ramdisk pages aren't reserved. */
if (initrd_start) {
for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
ClearPageReserved(virt_to_page(addr));
}
#endif /* CONFIG_BLK_DEV_INITRD */
#ifdef CONFIG_PPC_OF
/* mark the RTAS pages as reserved */
if ( rtas_data )
for (addr = (ulong)__va(rtas_data);
addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ;
addr += PAGE_SIZE)
SetPageReserved(virt_to_page(addr));
#endif
#ifdef CONFIG_PPC_PMAC
if (agp_special_page)
SetPageReserved(virt_to_page(agp_special_page));
#endif
for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory;
addr += PAGE_SIZE) {
if (!PageReserved(virt_to_page(addr)))
continue;
if (addr < (ulong) etext)
codepages++;
else if (addr >= (unsigned long)&__init_begin
&& addr < (unsigned long)&__init_end)
initpages++;
else if (addr < (ulong) klimit)
datapages++;
}
#ifdef CONFIG_HIGHMEM
{
unsigned long pfn;
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
struct page *page = mem_map + pfn;
ClearPageReserved(page);
set_page_count(page, 1);
__free_page(page);
totalhigh_pages++;
}
totalram_pages += totalhigh_pages;
}
#endif /* CONFIG_HIGHMEM */
printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
(unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
initpages<< (PAGE_SHIFT-10),
(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
#ifdef CONFIG_PPC_PMAC
if (agp_special_page)
printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page);
#endif
mem_init_done = 1;
}
/*
* Set phys_avail to the amount of physical memory,
* less the kernel text/data/bss.
*/
void __init
set_phys_avail(unsigned long total_memory)
{
unsigned long kstart, ksize;
/*
* Initially, available physical memory is equivalent to all
* physical memory.
*/
phys_avail.regions[0].address = PPC_MEMSTART;
phys_avail.regions[0].size = total_memory;
phys_avail.n_regions = 1;
/*
* Map out the kernel text/data/bss from the available physical
* memory.
*/
kstart = __pa(_stext); /* should be 0 */
ksize = PAGE_ALIGN(klimit - _stext);
mem_pieces_remove(&phys_avail, kstart, ksize, 0);
mem_pieces_remove(&phys_avail, 0, 0x4000, 0);
#if defined(CONFIG_BLK_DEV_INITRD)
/* Remove the init RAM disk from the available memory. */
if (initrd_start) {
mem_pieces_remove(&phys_avail, __pa(initrd_start),
initrd_end - initrd_start, 1);
}
#endif /* CONFIG_BLK_DEV_INITRD */
#ifdef CONFIG_PPC_OF
/* remove the RTAS pages from the available memory */
if (rtas_data)
mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1);
#endif
#ifdef CONFIG_PPC_PMAC
/* Because of some uninorth weirdness, we need a page of
* memory as high as possible (it must be outside of the
* bus address seen as the AGP aperture). It will be used
* by the r128 DRM driver
*
* FIXME: We need to make sure that page doesn't overlap any of the\
* above. This could be done by improving mem_pieces_find to be able
* to do a backward search from the end of the list.
*/
if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) {
agp_special_page = (total_memory - PAGE_SIZE);
mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0);
agp_special_page = (unsigned long)__va(agp_special_page);
}
#endif /* CONFIG_PPC_PMAC */
}
/* Mark some memory as reserved by removing it from phys_avail. */
void __init reserve_phys_mem(unsigned long start, unsigned long size)
{
mem_pieces_remove(&phys_avail, start, size, 1);
}

385
arch/powerpc/mm/init64.c Normal file
Просмотреть файл

@ -0,0 +1,385 @@
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
#include <asm/imalloc.h>
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
int mem_init_done;
unsigned long ioremap_bot = IMALLOC_BASE;
static unsigned long phbs_io_bot = PHBS_IO_BASE;
extern pgd_t swapper_pg_dir[];
extern struct task_struct *current_set[NR_CPUS];
unsigned long klimit = (unsigned long)_end;
unsigned long _SDR1=0;
unsigned long _ASR=0;
/* max amount of RAM to use */
unsigned long __max_memory;
/* info on what we think the IO hole is */
unsigned long io_hole_start;
unsigned long io_hole_size;
/*
* Do very early mm setup.
*/
void __init mm_init_ppc64(void)
{
#ifndef CONFIG_PPC_ISERIES
unsigned long i;
#endif
ppc64_boot_msg(0x100, "MM Init");
/* This is the story of the IO hole... please, keep seated,
* unfortunately, we are out of oxygen masks at the moment.
* So we need some rough way to tell where your big IO hole
* is. On pmac, it's between 2G and 4G, on POWER3, it's around
* that area as well, on POWER4 we don't have one, etc...
* We need that as a "hint" when sizing the TCE table on POWER3
* So far, the simplest way that seem work well enough for us it
* to just assume that the first discontinuity in our physical
* RAM layout is the IO hole. That may not be correct in the future
* (and isn't on iSeries but then we don't care ;)
*/
#ifndef CONFIG_PPC_ISERIES
for (i = 1; i < lmb.memory.cnt; i++) {
unsigned long base, prevbase, prevsize;
prevbase = lmb.memory.region[i-1].base;
prevsize = lmb.memory.region[i-1].size;
base = lmb.memory.region[i].base;
if (base > (prevbase + prevsize)) {
io_hole_start = prevbase + prevsize;
io_hole_size = base - (prevbase + prevsize);
break;
}
}
#endif /* CONFIG_PPC_ISERIES */
if (io_hole_start)
printk("IO Hole assumed to be %lx -> %lx\n",
io_hole_start, io_hole_start + io_hole_size - 1);
ppc64_boot_msg(0x100, "MM Init Done");
}
void free_initmem(void)
{
unsigned long addr;
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
memset((void *)addr, 0xcc, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
printk ("Freeing unused kernel memory: %luk freed\n",
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
}
#endif
/*
* Initialize the bootmem system and give it all the memory we
* have available.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
unsigned long i;
unsigned long start, bootmap_pages;
unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
int boot_mapsize;
/*
* Find an area to use for the bootmem bitmap. Calculate the size of
* bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
* Add 1 additional page in case the address isn't page-aligned.
*/
bootmap_pages = bootmem_bootmap_pages(total_pages);
start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
max_pfn = max_low_pfn;
/* Add all physical memory to the bootmem map, mark each area
* present.
*/
for (i=0; i < lmb.memory.cnt; i++)
free_bootmem(lmb.memory.region[i].base,
lmb_size_bytes(&lmb.memory, i));
/* reserve the sections we're already using */
for (i=0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i));
for (i=0; i < lmb.memory.cnt; i++)
memory_present(0, lmb_start_pfn(&lmb.memory, i),
lmb_end_pfn(&lmb.memory, i));
}
/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
unsigned long total_ram = lmb_phys_mem_size();
unsigned long top_of_ram = lmb_end_of_DRAM();
printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
printk(KERN_INFO "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
/*
* All pages are DMA-able so we put them all in the DMA zone.
*/
memset(zones_size, 0, sizeof(zones_size));
memset(zholes_size, 0, sizeof(zholes_size));
zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
free_area_init_node(0, NODE_DATA(0), zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
}
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
static struct kcore_list kcore_vmem;
static int __init setup_kcore(void)
{
int i;
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base, size;
struct kcore_list *kcore_mem;
base = lmb.memory.region[i].base;
size = lmb.memory.region[i].size;
/* GFP_ATOMIC to avoid might_sleep warnings during boot */
kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
if (!kcore_mem)
panic("mem_init: kmalloc failed\n");
kclist_add(kcore_mem, __va(base), size);
}
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
return 0;
}
module_init(setup_kcore);
void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
int nid;
#endif
pg_data_t *pgdat;
unsigned long i;
struct page *page;
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
#ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
totalram_pages +=
free_all_bootmem_node(NODE_DATA(nid));
}
}
#else
max_mapnr = num_physpages;
totalram_pages += free_all_bootmem();
#endif
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
}
}
codesize = (unsigned long)&_etext - (unsigned long)&_stext;
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
"%luk reserved, %luk data, %luk bss, %luk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
bsssize >> 10,
initsize >> 10);
mem_init_done = 1;
/* Initialize the vDSO */
vdso_init();
}
void __iomem * reserve_phb_iospace(unsigned long size)
{
void __iomem *virt_addr;
if (phbs_io_bot >= IMALLOC_BASE)
panic("reserve_phb_iospace(): phb io space overflow\n");
virt_addr = (void __iomem *) phbs_io_bot;
phbs_io_bot += size;
return virt_addr;
}
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
static const int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pgd_pte_cache", "pud_pmd_cache",
};
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
void pgtable_cache_init(void)
{
int i;
BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
int size = pgtable_cache_size[i];
const char *name = pgtable_cache_name[i];
pgtable_cache[i] = kmem_cache_create(name,
size, size,
SLAB_HWCACHE_ALIGN
| SLAB_MUST_HWCACHE_ALIGN,
zero_ctor,
NULL);
if (! pgtable_cache[i])
panic("pgtable_cache_init(): could not create %s!\n",
name);
}
}
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
unsigned long size, pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
if (!page_is_ram(addr >> PAGE_SHIFT))
vma_prot = __pgprot(pgprot_val(vma_prot)
| _PAGE_GUARDED | _PAGE_NO_CACHE);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

299
arch/powerpc/mm/mem.c Normal file
Просмотреть файл

@ -0,0 +1,299 @@
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
#include <asm/bootinfo.h>
#include <asm/prom.h>
#include "mem_pieces.h"
#include "mmu_decl.h"
#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
#define CPU_FTR_NOEXECUTE 0
#endif
/*
* This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not
*/
int page_is_ram(unsigned long pfn)
{
unsigned long paddr = (pfn << PAGE_SHIFT);
#ifndef CONFIG_PPC64 /* XXX for now */
return paddr < __pa(high_memory);
#else
int i;
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base;
base = lmb.memory.region[i].base;
if ((paddr >= base) &&
(paddr < (base + lmb.memory.region[i].size))) {
return 1;
}
}
return 0;
#endif
}
EXPORT_SYMBOL(page_is_ram);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
unsigned long size, pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
if (!page_is_ram(addr >> PAGE_SHIFT))
vma_prot = __pgprot(pgprot_val(vma_prot)
| _PAGE_GUARDED | _PAGE_NO_CACHE);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
void show_mem(void)
{
unsigned long total = 0, reserved = 0;
unsigned long shared = 0, cached = 0;
unsigned long highmem = 0;
struct page *page;
pg_data_t *pgdat;
unsigned long i;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
page = pgdat_page_nr(pgdat, i);
total++;
if (PageHighMem(page))
highmem++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (page_count(page))
shared += page_count(page) - 1;
}
}
printk("%ld pages of RAM\n", total);
#ifdef CONFIG_HIGHMEM
printk("%ld pages of HIGHMEM\n", highmem);
#endif
printk("%ld reserved pages\n", reserved);
printk("%ld pages shared\n", shared);
printk("%ld pages swap cached\n", cached);
}
/*
* This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache
* flush later when the page is given to a user process, if necessary.
*/
void flush_dcache_page(struct page *page)
{
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &page->flags))
clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);
void flush_dcache_icache_page(struct page *page)
{
#ifdef CONFIG_BOOKE
void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
__flush_dcache_icache(start);
kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
#elif defined(CONFIG_8xx)
/* On 8xx there is no need to kmap since highmem is not supported */
__flush_dcache_icache(page_address(page));
#else
__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
}
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/*
* We shouldnt have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &pg->flags))
clear_bit(PG_arch_1, &pg->flags);
}
EXPORT_SYMBOL(clear_user_page);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
copy_page(vto, vfrom);
/*
* We should be able to use the following optimisation, however
* there are two problems.
* Firstly a bug in some versions of binutils meant PLT sections
* were not marked executable.
* Secondly the first word in the GOT section is blrl, used
* to establish the GOT address. Until recently the GOT was
* not marked executable.
* - Anton
*/
#if 0
if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
return;
#endif
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &pg->flags))
clear_bit(PG_arch_1, &pg->flags);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long maddr;
maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
flush_icache_range(maddr, maddr + len);
kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the mm->page_table_lock held
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
/* handle i-cache coherency */
unsigned long pfn = pte_pfn(pte);
#ifdef CONFIG_PPC32
pmd_t *pmd;
#else
unsigned long vsid;
void *pgdir;
pte_t *ptep;
int local = 0;
cpumask_t tmp;
unsigned long flags;
#endif
/* handle i-cache coherency */
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)
&& !test_bit(PG_arch_1, &page->flags)) {
if (vma->vm_mm == current->active_mm) {
#ifdef CONFIG_8xx
/* On 8xx, cache control instructions (particularly
* "dcbst" from flush_dcache_icache) fault as write
* operation if there is an unpopulated TLB entry
* for the address in question. To workaround that,
* we invalidate the TLB here, thus avoiding dcbst
* misbehaviour.
*/
_tlbie(address);
#endif
__flush_dcache_icache((void *) address);
} else
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
}
#ifdef CONFIG_PPC_STD_MMU
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte) || address >= TASK_SIZE)
return;
#ifdef CONFIG_PPC32
if (Hash == 0)
return;
pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
if (!pmd_none(*pmd))
add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
#else
pgdir = vma->vm_mm->pgd;
if (pgdir == NULL)
return;
ptep = find_linux_pte(pgdir, ea);
if (!ptep)
return;
vsid = get_vsid(vma->vm_mm->context.id, ea);
local_irq_save(flags);
tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
local_irq_restore(flags);
#endif
#endif
}

259
arch/powerpc/mm/mem64.c Normal file
Просмотреть файл

@ -0,0 +1,259 @@
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
#include <asm/imalloc.h>
/*
* This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not
*/
int page_is_ram(unsigned long pfn)
{
int i;
unsigned long paddr = (pfn << PAGE_SHIFT);
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base;
base = lmb.memory.region[i].base;
if ((paddr >= base) &&
(paddr < (base + lmb.memory.region[i].size))) {
return 1;
}
}
return 0;
}
EXPORT_SYMBOL(page_is_ram);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
unsigned long size, pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
if (!page_is_ram(addr >> PAGE_SHIFT))
vma_prot = __pgprot(pgprot_val(vma_prot)
| _PAGE_GUARDED | _PAGE_NO_CACHE);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
void show_mem(void)
{
unsigned long total = 0, reserved = 0;
unsigned long shared = 0, cached = 0;
struct page *page;
pg_data_t *pgdat;
unsigned long i;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
page = pgdat_page_nr(pgdat, i);
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (page_count(page))
shared += page_count(page) - 1;
}
}
printk("%ld pages of RAM\n", total);
printk("%ld reserved pages\n", reserved);
printk("%ld pages shared\n", shared);
printk("%ld pages swap cached\n", cached);
}
/*
* This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache
* flush later when the page is given to a user process, if necessary.
*/
void flush_dcache_page(struct page *page)
{
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &page->flags))
clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/*
* We shouldnt have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &pg->flags))
clear_bit(PG_arch_1, &pg->flags);
}
EXPORT_SYMBOL(clear_user_page);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
copy_page(vto, vfrom);
/*
* We should be able to use the following optimisation, however
* there are two problems.
* Firstly a bug in some versions of binutils meant PLT sections
* were not marked executable.
* Secondly the first word in the GOT section is blrl, used
* to establish the GOT address. Until recently the GOT was
* not marked executable.
* - Anton
*/
#if 0
if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
return;
#endif
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &pg->flags))
clear_bit(PG_arch_1, &pg->flags);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long maddr;
maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
flush_icache_range(maddr, maddr + len);
}
EXPORT_SYMBOL(flush_icache_user_range);
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the mm->page_table_lock held
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
pte_t pte)
{
unsigned long vsid;
void *pgdir;
pte_t *ptep;
int local = 0;
cpumask_t tmp;
unsigned long flags;
/* handle i-cache coherency */
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
!cpu_has_feature(CPU_FTR_NOEXECUTE)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)
&& !test_bit(PG_arch_1, &page->flags)) {
__flush_dcache_icache(page_address(page));
set_bit(PG_arch_1, &page->flags);
}
}
}
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte))
return;
pgdir = vma->vm_mm->pgd;
if (pgdir == NULL)
return;
ptep = find_linux_pte(pgdir, ea);
if (!ptep)
return;
vsid = get_vsid(vma->vm_mm->context.id, ea);
local_irq_save(flags);
tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
local_irq_restore(flags);
}

Просмотреть файл

@ -0,0 +1,163 @@
/*
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Changes to accommodate Power Macintoshes.
* Cort Dougan <cort@cs.nmt.edu>
* Rewrites.
* Grant Erickson <grant@lcse.umn.edu>
* General rework and split from mm/init.c.
*
* Module name: mem_pieces.c
*
* Description:
* Routines and data structures for manipulating and representing
* phyiscal memory extents (i.e. address/length pairs).
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <asm/page.h>
#include "mem_pieces.h"
extern struct mem_pieces phys_avail;
static void mem_pieces_print(struct mem_pieces *);
/*
* Scan a region for a piece of a given size with the required alignment.
*/
void __init *
mem_pieces_find(unsigned int size, unsigned int align)
{
int i;
unsigned a, e;
struct mem_pieces *mp = &phys_avail;
for (i = 0; i < mp->n_regions; ++i) {
a = mp->regions[i].address;
e = a + mp->regions[i].size;
a = (a + align - 1) & -align;
if (a + size <= e) {
mem_pieces_remove(mp, a, size, 1);
return (void *) __va(a);
}
}
panic("Couldn't find %u bytes at %u alignment\n", size, align);
return NULL;
}
/*
* Remove some memory from an array of pieces
*/
void __init
mem_pieces_remove(struct mem_pieces *mp, unsigned int start, unsigned int size,
int must_exist)
{
int i, j;
unsigned int end, rs, re;
struct reg_property *rp;
end = start + size;
for (i = 0, rp = mp->regions; i < mp->n_regions; ++i, ++rp) {
if (end > rp->address && start < rp->address + rp->size)
break;
}
if (i >= mp->n_regions) {
if (must_exist)
printk("mem_pieces_remove: [%x,%x) not in any region\n",
start, end);
return;
}
for (; i < mp->n_regions && end > rp->address; ++i, ++rp) {
rs = rp->address;
re = rs + rp->size;
if (must_exist && (start < rs || end > re)) {
printk("mem_pieces_remove: bad overlap [%x,%x) with",
start, end);
mem_pieces_print(mp);
must_exist = 0;
}
if (start > rs) {
rp->size = start - rs;
if (end < re) {
/* need to split this entry */
if (mp->n_regions >= MEM_PIECES_MAX)
panic("eek... mem_pieces overflow");
for (j = mp->n_regions; j > i + 1; --j)
mp->regions[j] = mp->regions[j-1];
++mp->n_regions;
rp[1].address = end;
rp[1].size = re - end;
}
} else {
if (end < re) {
rp->address = end;
rp->size = re - end;
} else {
/* need to delete this entry */
for (j = i; j < mp->n_regions - 1; ++j)
mp->regions[j] = mp->regions[j+1];
--mp->n_regions;
--i;
--rp;
}
}
}
}
static void __init
mem_pieces_print(struct mem_pieces *mp)
{
int i;
for (i = 0; i < mp->n_regions; ++i)
printk(" [%x, %x)", mp->regions[i].address,
mp->regions[i].address + mp->regions[i].size);
printk("\n");
}
void __init
mem_pieces_sort(struct mem_pieces *mp)
{
unsigned long a, s;
int i, j;
for (i = 1; i < mp->n_regions; ++i) {
a = mp->regions[i].address;
s = mp->regions[i].size;
for (j = i - 1; j >= 0; --j) {
if (a >= mp->regions[j].address)
break;
mp->regions[j+1] = mp->regions[j];
}
mp->regions[j+1].address = a;
mp->regions[j+1].size = s;
}
}
void __init
mem_pieces_coalesce(struct mem_pieces *mp)
{
unsigned long a, s, ns;
int i, j, d;
d = 0;
for (i = 0; i < mp->n_regions; i = j) {
a = mp->regions[i].address;
s = mp->regions[i].size;
for (j = i + 1; j < mp->n_regions
&& mp->regions[j].address - a <= s; ++j) {
ns = mp->regions[j].address + mp->regions[j].size - a;
if (ns > s)
s = ns;
}
mp->regions[d].address = a;
mp->regions[d].size = s;
++d;
}
mp->n_regions = d;
}

Просмотреть файл

@ -0,0 +1,48 @@
/*
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Changes to accommodate Power Macintoshes.
* Cort Dougan <cort@cs.nmt.edu>
* Rewrites.
* Grant Erickson <grant@lcse.umn.edu>
* General rework and split from mm/init.c.
*
* Module name: mem_pieces.h
*
* Description:
* Routines and data structures for manipulating and representing
* phyiscal memory extents (i.e. address/length pairs).
*
*/
#ifndef __MEM_PIECES_H__
#define __MEM_PIECES_H__
#include <asm/prom.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Type Definitions */
#define MEM_PIECES_MAX 32
struct mem_pieces {
int n_regions;
struct reg_property regions[MEM_PIECES_MAX];
};
/* Function Prototypes */
extern void *mem_pieces_find(unsigned int size, unsigned int align);
extern void mem_pieces_remove(struct mem_pieces *mp, unsigned int start,
unsigned int size, int must_exist);
extern void mem_pieces_coalesce(struct mem_pieces *mp);
extern void mem_pieces_sort(struct mem_pieces *mp);
#ifdef __cplusplus
}
#endif
#endif /* __MEM_PIECES_H__ */

Просмотреть файл

@ -0,0 +1,86 @@
/*
* This file contains the routines for handling the MMU on those
* PowerPC implementations where the MMU substantially follows the
* architecture specification. This includes the 6xx, 7xx, 7xxx,
* 8260, and POWER3 implementations but excludes the 8xx and 4xx.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
mm_context_t next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
#ifdef FEW_CONTEXTS
atomic_t nr_free_contexts;
struct mm_struct *context_mm[LAST_CONTEXT+1];
void steal_context(void);
#endif /* FEW_CONTEXTS */
/*
* Initialize the context management stuff.
*/
void __init
mmu_context_init(void)
{
/*
* Some processors have too few contexts to reserve one for
* init_mm, and require using context 0 for a normal task.
* Other processors reserve the use of context zero for the kernel.
* This code assumes FIRST_CONTEXT < 32.
*/
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
#ifdef FEW_CONTEXTS
atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
#endif /* FEW_CONTEXTS */
}
#ifdef FEW_CONTEXTS
/*
* Steal a context from a task that has one at the moment.
* This is only used on 8xx and 4xx and we presently assume that
* they don't do SMP. If they do then this will have to check
* whether the MM we steal is in use.
* We also assume that this is only used on systems that don't
* use an MMU hash table - this is true for 8xx and 4xx.
* This isn't an LRU system, it just frees up each context in
* turn (sort-of pseudo-random replacement :). This would be the
* place to implement an LRU scheme if anyone was motivated to do it.
* -- paulus
*/
void
steal_context(void)
{
struct mm_struct *mm;
/* free up context `next_mmu_context' */
/* if we shouldn't free context 0, don't... */
if (next_mmu_context < FIRST_CONTEXT)
next_mmu_context = FIRST_CONTEXT;
mm = context_mm[next_mmu_context];
flush_tlb_mm(mm);
destroy_context(mm);
}
#endif /* FEW_CONTEXTS */

Просмотреть файл

@ -0,0 +1,63 @@
/*
* MMU context allocation for 64-bit kernels.
*
* Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <asm/mmu_context.h>
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDR(mmu_context_idr);
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
int err;
again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
goto again;
else if (err)
return err;
if (index > MAX_CONTEXT) {
idr_remove(&mmu_context_idr, index);
return -ENOMEM;
}
mm->context.id = index;
return 0;
}
void destroy_context(struct mm_struct *mm)
{
spin_lock(&mmu_context_lock);
idr_remove(&mmu_context_idr, mm->context.id);
spin_unlock(&mmu_context_lock);
mm->context.id = NO_CONTEXT;
}

Просмотреть файл

@ -0,0 +1,85 @@
/*
* Declarations of procedures and variables shared between files
* in arch/ppc/mm/.
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <asm/tlbflush.h>
#include <asm/mmu.h>
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
extern void setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags);
extern void reserve_phys_mem(unsigned long start, unsigned long size);
extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, int flags, unsigned int pid);
extern void invalidate_tlbcam_entry(int index);
extern int __map_without_bats;
extern unsigned long ioremap_base;
extern unsigned long ioremap_bot;
extern unsigned int rtas_data, rtas_size;
extern unsigned long total_memory;
extern unsigned long total_lowmem;
extern int mem_init_done;
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern unsigned int num_tlbcam_entries;
/* ...and now those things that may be slightly different between processor
* architectures. -- Dan
*/
#if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
#else
/* anything except 4xx or 8xx */
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here
* in that case (I think). -- Dan.
*/
static inline void flush_HPTE(unsigned context, unsigned long va,
unsigned long pdval)
{
if ((Hash != 0) &&
cpu_has_feature(CPU_FTR_HPTE_TABLE))
flush_hash_pages(0, va, pdval, 1);
else
_tlbie(va);
}
#endif

470
arch/powerpc/mm/pgtable.c Normal file
Просмотреть файл

@ -0,0 +1,470 @@
/*
* This file contains the routines setting up the linux page tables.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include "mmu_decl.h"
unsigned long ioremap_base;
unsigned long ioremap_bot;
int io_bat_index;
#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
#define HAVE_BATS 1
#endif
#if defined(CONFIG_FSL_BOOKE)
#define HAVE_TLBCAM 1
#endif
extern char etext[], _stext[];
#ifdef CONFIG_SMP
extern void hash_page_sync(void);
#endif
#ifdef HAVE_BATS
extern unsigned long v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(unsigned long pa);
void setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags);
#else /* !HAVE_BATS */
#define v_mapped_by_bats(x) (0UL)
#define p_mapped_by_bats(x) (0UL)
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
extern unsigned int tlbcam_index;
extern unsigned long v_mapped_by_tlbcam(unsigned long va);
extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
#else /* !HAVE_TLBCAM */
#define v_mapped_by_tlbcam(x) (0UL)
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
#ifdef CONFIG_PTE_64BIT
/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
#define PGDIR_ORDER 1
#else
#define PGDIR_ORDER 0
#endif
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
return ret;
}
void pgd_free(pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
clear_page(pte);
}
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
int flags = GFP_KERNEL | __GFP_REPEAT;
#endif
ptepage = alloc_pages(flags, 0);
if (ptepage)
clear_highpage(ptepage);
return ptepage;
}
void pte_free_kernel(pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
free_page((unsigned long)pte);
}
void pte_free(struct page *ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
#endif
__free_page(ptepage);
}
#ifndef CONFIG_PHYS_64BIT
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
#else /* CONFIG_PHYS_64BIT */
void __iomem *
ioremap64(unsigned long long addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
return ioremap64(addr64, size);
}
#endif /* CONFIG_PHYS_64BIT */
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
unsigned long v, i;
phys_addr_t p;
int err;
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
* Before then, we use space going down from ioremap_base
* (ioremap_bot records where we're up to).
*/
p = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - p;
/*
* If the address lies within the first 16 MB, assume it's in ISA
* memory space
*/
if (p < 16*1024*1024)
p += _ISA_MEM_BASE;
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
if ( mem_init_done && (p < virt_to_phys(high_memory)) )
{
printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
__builtin_return_address(0));
return NULL;
}
if (size == 0)
return NULL;
/*
* Is it already mapped? Perhaps overlapped by a previous
* BAT mapping. If the whole area is mapped then we're done,
* otherwise remap it since we want to keep the virt addrs for
* each request contiguous.
*
* We make the assumption here that if the bottom and top
* of the range we want are mapped then it's mapped to the
* same virt address (and this is contiguous).
* -- Cort
*/
if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
goto out;
if ((v = p_mapped_by_tlbcam(p)))
goto out;
if (mem_init_done) {
struct vm_struct *area;
area = get_vm_area(size, VM_IOREMAP);
if (area == 0)
return NULL;
v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
}
if ((flags & _PAGE_PRESENT) == 0)
flags |= _PAGE_KERNEL;
if (flags & _PAGE_NO_CACHE)
flags |= _PAGE_GUARDED;
/*
* Should check if it is a candidate for a BAT mapping
*/
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(v+i, p+i, flags);
if (err) {
if (mem_init_done)
vunmap((void *)v);
return NULL;
}
out:
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
void iounmap(volatile void __iomem *addr)
{
/*
* If mapped by BATs then there is nothing to do.
* Calling vfree() generates a benign warning.
*/
if (v_mapped_by_bats((unsigned long)addr)) return;
if (addr > high_memory && (unsigned long) addr < ioremap_bot)
vunmap((void *) (PAGE_MASK & (unsigned long)addr));
}
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
return (void __iomem *) (port + _IO_BASE);
}
void ioport_unmap(void __iomem *addr)
{
/* Nothing to do */
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
int
map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
spin_lock(&init_mm.page_table_lock);
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(&init_mm, pd, va);
if (pg != 0) {
err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
}
spin_unlock(&init_mm.page_table_lock);
return err;
}
/*
* Map in all of physical memory starting at KERNELBASE.
*/
void __init mapin_ram(void)
{
unsigned long v, p, s, f;
s = mmu_mapin_ram();
v = KERNELBASE + s;
p = PPC_MEMSTART + s;
for (; s < total_lowmem; s += PAGE_SIZE) {
if ((char *) v >= _stext && (char *) v < etext)
f = _PAGE_RAM_TEXT;
else
f = _PAGE_RAM;
map_page(v, p, f);
v += PAGE_SIZE;
p += PAGE_SIZE;
}
}
/* is x a power of 2? */
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
/* is x a power of 4? */
#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
/*
* Set up a mapping for a block of I/O.
* virt, phys, size must all be page-aligned.
* This should only be called before ioremap is called.
*/
void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
unsigned int size, int flags)
{
int i;
if (virt > KERNELBASE && virt < ioremap_bot)
ioremap_bot = ioremap_base = virt;
#ifdef HAVE_BATS
/*
* Use a BAT for this if possible...
*/
if (io_bat_index < 2 && is_power_of_2(size)
&& (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
setbat(io_bat_index, virt, phys, size, flags);
++io_bat_index;
return;
}
#endif /* HAVE_BATS */
#ifdef HAVE_TLBCAM
/*
* Use a CAM for this if possible...
*/
if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
&& (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
settlbcam(tlbcam_index, virt, phys, size, flags, 0);
++tlbcam_index;
return;
}
#endif /* HAVE_TLBCAM */
/* No BATs available, put it in the page tables. */
for (i = 0; i < size; i += PAGE_SIZE)
map_page(virt + i, phys + i, flags);
}
/* Scan the real Linux page tables and return a PTE pointer for
* a virtual address in a context.
* Returns true (1) if PTE was found, zero otherwise. The pointer to
* the PTE pointer is unmodified if PTE is not found.
*/
int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int retval = 0;
pgd = pgd_offset(mm, addr & PAGE_MASK);
if (pgd) {
pmd = pmd_offset(pgd, addr & PAGE_MASK);
if (pmd_present(*pmd)) {
pte = pte_offset_map(pmd, addr & PAGE_MASK);
if (pte) {
retval = 1;
*ptep = pte;
/* XXX caller needs to do pte_unmap, yuck */
}
}
}
return(retval);
}
/* Find physical address for this virtual address. Normally used by
* I/O functions, but anyone can call it.
*/
unsigned long iopa(unsigned long addr)
{
unsigned long pa;
/* I don't know why this won't work on PMacs or CHRP. It
* appears there is some bug, or there is some implicit
* mapping done not properly represented by BATs or in page
* tables.......I am actively working on resolving this, but
* can't hold up other stuff. -- Dan
*/
pte_t *pte;
struct mm_struct *mm;
/* Check the BATs */
pa = v_mapped_by_bats(addr);
if (pa)
return pa;
/* Allow mapping of user addresses (within the thread)
* for DMA if necessary.
*/
if (addr < TASK_SIZE)
mm = current->mm;
else
mm = &init_mm;
pa = 0;
if (get_pteptr(mm, addr, &pte)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte);
}
return(pa);
}
/* This is will find the virtual address for a physical one....
* Swiped from APUS, could be dangerous :-).
* This is only a placeholder until I really find a way to make this
* work. -- Dan
*/
unsigned long
mm_ptov (unsigned long paddr)
{
unsigned long ret;
#if 0
if (paddr < 16*1024*1024)
ret = ZTWO_VADDR(paddr);
else {
int i;
for (i = 0; i < kmap_chunk_count;){
unsigned long phys = kmap_chunks[i++];
unsigned long size = kmap_chunks[i++];
unsigned long virt = kmap_chunks[i++];
if (paddr >= phys
&& paddr < (phys + size)){
ret = virt + paddr - phys;
goto exit;
}
}
ret = (unsigned long) __va(paddr);
}
exit:
#ifdef DEBUGPV
printk ("PTOV(%lx)=%lx\n", paddr, ret);
#endif
#else
ret = (unsigned long)paddr + KERNELBASE;
#endif
return ret;
}

357
arch/powerpc/mm/pgtable64.c Normal file
Просмотреть файл

@ -0,0 +1,357 @@
/*
* This file contains ioremap and related functions for 64-bit machines.
*
* Derived from arch/ppc64/mm/init.c
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
#include <asm/imalloc.h>
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
int mem_init_done;
unsigned long ioremap_bot = IMALLOC_BASE;
static unsigned long phbs_io_bot = PHBS_IO_BASE;
extern pgd_t swapper_pg_dir[];
extern struct task_struct *current_set[NR_CPUS];
unsigned long klimit = (unsigned long)_end;
/* max amount of RAM to use */
unsigned long __max_memory;
/* info on what we think the IO hole is */
unsigned long io_hole_start;
unsigned long io_hole_size;
#ifdef CONFIG_PPC_ISERIES
void __iomem *ioremap(unsigned long addr, unsigned long size)
{
return (void __iomem *)addr;
}
extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
unsigned long flags)
{
return (void __iomem *)addr;
}
void iounmap(volatile void __iomem *addr)
{
return;
}
#else
/*
* map_io_page currently only called by __ioremap
* map_io_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
static int map_io_page(unsigned long ea, unsigned long pa, int flags)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long vsid;
if (mem_init_done) {
spin_lock(&init_mm.page_table_lock);
pgdp = pgd_offset_k(ea);
pudp = pud_alloc(&init_mm, pgdp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
if (!pmdp)
return -ENOMEM;
ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
if (!ptep)
return -ENOMEM;
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
spin_unlock(&init_mm.page_table_lock);
} else {
unsigned long va, vpn, hash, hpteg;
/*
* If the mm subsystem is not fully up, we cannot create a
* linux page table entry for this mapping. Simply bolt an
* entry in the hardware page table.
*/
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0xFFFFFFF);
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, 0);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Panic if a pte grpup is full */
if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
HPTE_V_BOLTED,
_PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
== -1) {
panic("map_io_page: could not insert mapping");
}
}
return 0;
}
static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
unsigned long ea, unsigned long size,
unsigned long flags)
{
unsigned long i;
if ((flags & _PAGE_PRESENT) == 0)
flags |= pgprot_val(PAGE_KERNEL);
for (i = 0; i < size; i += PAGE_SIZE)
if (map_io_page(ea+i, pa+i, flags))
return NULL;
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
}
void __iomem *
ioremap(unsigned long addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
}
void __iomem * __ioremap(unsigned long addr, unsigned long size,
unsigned long flags)
{
unsigned long pa, ea;
void __iomem *ret;
/*
* Choose an address to map it to.
* Once the imalloc system is running, we use it.
* Before that, we map using addresses going
* up from ioremap_bot. imalloc will use
* the addresses from ioremap_bot through
* IMALLOC_END
*
*/
pa = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - pa;
if (size == 0)
return NULL;
if (mem_init_done) {
struct vm_struct *area;
area = im_get_free_area(size);
if (area == NULL)
return NULL;
ea = (unsigned long)(area->addr);
ret = __ioremap_com(addr, pa, ea, size, flags);
if (!ret)
im_free(area->addr);
} else {
ea = ioremap_bot;
ret = __ioremap_com(addr, pa, ea, size, flags);
if (ret)
ioremap_bot += size;
}
return ret;
}
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
int __ioremap_explicit(unsigned long pa, unsigned long ea,
unsigned long size, unsigned long flags)
{
struct vm_struct *area;
void __iomem *ret;
/* For now, require page-aligned values for pa, ea, and size */
if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
!IS_PAGE_ALIGNED(size)) {
printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
return 1;
}
if (!mem_init_done) {
/* Two things to consider in this case:
* 1) No records will be kept (imalloc, etc) that the region
* has been remapped
* 2) It won't be easy to iounmap() the region later (because
* of 1)
*/
;
} else {
area = im_get_area(ea, size,
IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
if (area == NULL) {
/* Expected when PHB-dlpar is in play */
return 1;
}
if (ea != (unsigned long) area->addr) {
printk(KERN_ERR "unexpected addr return from "
"im_get_area\n");
return 1;
}
}
ret = __ioremap_com(pa, pa, ea, size, flags);
if (ret == NULL) {
printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
return 1;
}
if (ret != (void *) ea) {
printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
return 1;
}
return 0;
}
/*
* Unmap an IO region and remove it from imalloc'd list.
* Access to IO memory should be serialized by driver.
* This code is modeled after vmalloc code - unmap_vm_area()
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
void iounmap(volatile void __iomem *token)
{
void *addr;
if (!mem_init_done)
return;
addr = (void *) ((unsigned long __force) token & PAGE_MASK);
im_free(addr);
}
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
/* Check whether subsets of this region exist */
area = im_get_area(addr, size, IM_REGION_SUPERSET);
if (area == NULL)
return 1;
while (area) {
iounmap((void __iomem *) area->addr);
area = im_get_area(addr, size,
IM_REGION_SUPERSET);
}
return 0;
}
int iounmap_explicit(volatile void __iomem *start, unsigned long size)
{
struct vm_struct *area;
unsigned long addr;
int rc;
addr = (unsigned long __force) start & PAGE_MASK;
/* Verify that the region either exists or is a subset of an existing
* region. In the latter case, split the parent region to create
* the exact region
*/
area = im_get_area(addr, size,
IM_REGION_EXISTS | IM_REGION_SUBSET);
if (area == NULL) {
/* Determine whether subset regions exist. If so, unmap */
rc = iounmap_subset_regions(addr, size);
if (rc) {
printk(KERN_ERR
"%s() cannot unmap nonexistent range 0x%lx\n",
__FUNCTION__, addr);
return 1;
}
} else {
iounmap((void __iomem *) area->addr);
}
/*
* FIXME! This can't be right:
iounmap(area->addr);
* Maybe it should be "iounmap(area);"
*/
return 0;
}
#endif
EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);

296
arch/powerpc/mm/ppc_mmu.c Normal file
Просмотреть файл

@ -0,0 +1,296 @@
/*
* This file contains the routines for handling the MMU on those
* PowerPC implementations where the MMU substantially follows the
* architecture specification. This includes the 6xx, 7xx, 7xxx,
* 8260, and POWER3 implementations but excludes the 8xx and 4xx.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/prom.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
#include "mmu_decl.h"
#include "mem_pieces.h"
PTE *Hash, *Hash_end;
unsigned long Hash_size, Hash_mask;
unsigned long _SDR1;
union ubat { /* BAT register values to be loaded */
BAT bat;
#ifdef CONFIG_PPC64BRIDGE
u64 word[2];
#else
u32 word[2];
#endif
} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
struct batrange { /* stores address ranges mapped by BATs */
unsigned long start;
unsigned long limit;
unsigned long phys;
} bat_addrs[4];
/*
* Return PA for this VA if it is mapped by a BAT, or 0
*/
unsigned long v_mapped_by_bats(unsigned long va)
{
int b;
for (b = 0; b < 4; ++b)
if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
return bat_addrs[b].phys + (va - bat_addrs[b].start);
return 0;
}
/*
* Return VA for a given PA or 0 if not mapped
*/
unsigned long p_mapped_by_bats(unsigned long pa)
{
int b;
for (b = 0; b < 4; ++b)
if (pa >= bat_addrs[b].phys
&& pa < (bat_addrs[b].limit-bat_addrs[b].start)
+bat_addrs[b].phys)
return bat_addrs[b].start+(pa-bat_addrs[b].phys);
return 0;
}
unsigned long __init mmu_mapin_ram(void)
{
#ifdef CONFIG_POWER4
return 0;
#else
unsigned long tot, bl, done;
unsigned long max_size = (256<<20);
unsigned long align;
if (__map_without_bats)
return 0;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the
smallest alignment of the physical address. */
/* alignment of PPC_MEMSTART */
align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
/* set BAT block size to MIN(max_size, align) */
if (align && align < max_size)
max_size = align;
tot = total_lowmem;
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > tot)
break;
}
setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */
tot -= done;
for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot)
break;
setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
}
return done;
#endif
}
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 2 between 128k and 256M.
*/
void __init setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags)
{
unsigned int bl;
int wimgxpp;
union ubat *bat = BATS[index];
if (((flags & _PAGE_NO_CACHE) == 0) &&
cpu_has_feature(CPU_FTR_NEED_COHERENT))
flags |= _PAGE_COHERENT;
bl = (size >> 17) - 1;
if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
/* 603, 604, etc. */
/* Do DBAT first */
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
| _PAGE_COHERENT | _PAGE_GUARDED);
wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[1].word[1] = phys | wimgxpp;
#ifndef CONFIG_KGDB /* want user access for breakpoints */
if (flags & _PAGE_USER)
#endif
bat[1].bat.batu.vp = 1;
if (flags & _PAGE_GUARDED) {
/* G bit must be zero in IBATs */
bat[0].word[0] = bat[0].word[1] = 0;
} else {
/* make IBAT same as DBAT */
bat[0] = bat[1];
}
} else {
/* 601 cpu */
if (bl > BL_8M)
bl = BL_8M;
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
| _PAGE_COHERENT);
wimgxpp |= (flags & _PAGE_RW)?
((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
bat->word[1] = phys | bl | 0x40; /* V=1 */
}
bat_addrs[index].start = virt;
bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
bat_addrs[index].phys = phys;
}
/*
* Initialize the hash table and patch the instructions in hashtable.S.
*/
void __init MMU_init_hw(void)
{
unsigned int hmask, mb, mb2;
unsigned int n_hpteg, lg_n_hpteg;
extern unsigned int hash_page_patch_A[];
extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
extern unsigned int hash_page[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
/*
* Put a blr (procedure return) instruction at the
* start of hash_page, since we can still get DSI
* exceptions on a 603.
*/
hash_page[0] = 0x4e800020;
flush_icache_range((unsigned long) &hash_page[0],
(unsigned long) &hash_page[1]);
return;
}
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
#ifdef CONFIG_PPC64BRIDGE
#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */
#define SDR1_LOW_BITS (lg_n_hpteg - 11)
#define MIN_N_HPTEG 2048 /* min 256kB hash table */
#else
#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
#define MIN_N_HPTEG 1024 /* min 64kB hash table */
#endif
#ifdef CONFIG_POWER4
/* The hash table has already been allocated and initialized
in prom.c */
n_hpteg = Hash_size >> LG_HPTEG_SIZE;
lg_n_hpteg = __ilog2(n_hpteg);
/* Remove the hash table from the available memory */
if (Hash)
reserve_phys_mem(__pa(Hash), Hash_size);
#else /* CONFIG_POWER4 */
/*
* Allow 1 HPTE (1/8 HPTEG) for each page of memory.
* This is less than the recommended amount, but then
* Linux ain't AIX.
*/
n_hpteg = total_memory / (PAGE_SIZE * 8);
if (n_hpteg < MIN_N_HPTEG)
n_hpteg = MIN_N_HPTEG;
lg_n_hpteg = __ilog2(n_hpteg);
if (n_hpteg & (n_hpteg - 1)) {
++lg_n_hpteg; /* round up if not power of 2 */
n_hpteg = 1 << lg_n_hpteg;
}
Hash_size = n_hpteg << LG_HPTEG_SIZE;
/*
* Find some memory for the hash table.
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = mem_pieces_find(Hash_size, Hash_size);
cacheable_memzero(Hash, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
#endif /* CONFIG_POWER4 */
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
total_memory >> 20, Hash_size >> 10, Hash);
/*
* Patch up the instructions in hashtable.S:create_hpte
*/
if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
Hash_mask = n_hpteg - 1;
hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
if (lg_n_hpteg > 16)
mb2 = 16 - LG_HPTEG_SIZE;
hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
| ((unsigned int)(Hash) >> 16);
hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
/*
* Ensure that the locations we've patched have been written
* out from the data cache and invalidated in the instruction
* cache, on those machines with split caches.
*/
flush_icache_range((unsigned long) &hash_page_patch_A[0],
(unsigned long) &hash_page_patch_C[1]);
/*
* Patch up the instructions in hashtable.S:flush_hash_page
*/
flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
| ((unsigned int)(Hash) >> 16);
flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
flush_icache_range((unsigned long) &flush_hash_patch_A[0],
(unsigned long) &flush_hash_patch_B[1]);
if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
}

183
arch/powerpc/mm/tlb.c Normal file
Просмотреть файл

@ -0,0 +1,183 @@
/*
* This file contains the routines for TLB flushing.
* On machines where the MMU uses a hash table to store virtual to
* physical translations, these routines flush entries from the
* hash table also.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "mmu_decl.h"
/*
* Called when unmapping pages to flush entries from the TLB/hash table.
*/
void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
{
unsigned long ptephys;
if (Hash != 0) {
ptephys = __pa(ptep) & PAGE_MASK;
flush_hash_pages(mm->context, addr, ptephys, 1);
}
}
/*
* Called by ptep_set_access_flags, must flush on CPUs for which the
* DSI handler can't just "fixup" the TLB on a write fault
*/
void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
{
if (Hash != 0)
return;
_tlbie(addr);
}
/*
* Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done.
*/
void tlb_flush(struct mmu_gather *tlb)
{
if (Hash == 0) {
/*
* 603 needs to flush the whole TLB here since
* it doesn't use a hash table.
*/
_tlbia();
}
}
/*
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes kernel pages
*
* since the hardware hash table functions as an extension of the
* tlb as far as the linux tables are concerned, flush it too.
* -- Cort
*/
/*
* 750 SMP is a Bad Idea because the 750 doesn't broadcast all
* the cache operations on the bus. Hence we need to use an IPI
* to get the other CPU(s) to invalidate their TLBs.
*/
#ifdef CONFIG_SMP_750
#define FINISH_FLUSH smp_send_tlb_invalidate(0)
#else
#define FINISH_FLUSH do { } while (0)
#endif
static void flush_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
pmd_t *pmd;
unsigned long pmd_end;
int count;
unsigned int ctx = mm->context;
if (Hash == 0) {
_tlbia();
return;
}
start &= PAGE_MASK;
if (start >= end)
return;
end = (end - 1) | ~PAGE_MASK;
pmd = pmd_offset(pgd_offset(mm, start), start);
for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end)
pmd_end = end;
if (!pmd_none(*pmd)) {
count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
flush_hash_pages(ctx, start, pmd_val(*pmd), count);
}
if (pmd_end == end)
break;
start = pmd_end + 1;
++pmd;
}
}
/*
* Flush kernel TLB entries in the given range
*/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_range(&init_mm, start, end);
FINISH_FLUSH;
}
/*
* Flush all the (user) entries for the address space described by mm.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
if (Hash == 0) {
_tlbia();
return;
}
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
if (Hash == 0) {
_tlbie(vmaddr);
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
/*
* For each address in the range, find the pte for the address
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
* the corresponding HPTE.
*/
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_range(vma->vm_mm, start, end);
FINISH_FLUSH;
}

Просмотреть файл

@ -0,0 +1,280 @@
config 4xx
bool
depends on 40x || 44x
default y
config WANT_EARLY_SERIAL
bool
select SERIAL_8250
default n
menu "AMCC 4xx options"
depends on 4xx
choice
prompt "Machine Type"
depends on 40x
default WALNUT
config BUBINGA
bool "Bubinga"
select WANT_EARLY_SERIAL
help
This option enables support for the IBM 405EP evaluation board.
config CPCI405
bool "CPCI405"
help
This option enables support for the CPCI405 board.
config EP405
bool "EP405/EP405PC"
help
This option enables support for the EP405/EP405PC boards.
config REDWOOD_5
bool "Redwood-5"
help
This option enables support for the IBM STB04 evaluation board.
config REDWOOD_6
bool "Redwood-6"
help
This option enables support for the IBM STBx25xx evaluation board.
config SYCAMORE
bool "Sycamore"
help
This option enables support for the IBM PPC405GPr evaluation board.
config WALNUT
bool "Walnut"
help
This option enables support for the IBM PPC405GP evaluation board.
config XILINX_ML300
bool "Xilinx-ML300"
help
This option enables support for the Xilinx ML300 evaluation board.
endchoice
choice
prompt "Machine Type"
depends on 44x
default EBONY
config BAMBOO
bool "Bamboo"
select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440EP evaluation board.
config EBONY
bool "Ebony"
select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440GP evaluation board.
config LUAN
bool "Luan"
select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440SP evaluation board.
config OCOTEA
bool "Ocotea"
select WANT_EARLY_SERIAL
help
This option enables support for the IBM PPC440GX evaluation board.
endchoice
config EP405PC
bool "EP405PC Support"
depends on EP405
# It's often necessary to know the specific 4xx processor type.
# Fortunately, it is impled (so far) from the board type, so we
# don't need to ask more redundant questions.
config NP405H
bool
depends on ASH
default y
config 440EP
bool
depends on BAMBOO
select PPC_FPU
default y
config 440GP
bool
depends on EBONY
default y
config 440GX
bool
depends on OCOTEA
default y
config 440SP
bool
depends on LUAN
default y
config 440
bool
depends on 440GP || 440SP || 440EP
default y
config 440A
bool
depends on 440GX
default y
config IBM440EP_ERR42
bool
depends on 440EP
default y
# All 405-based cores up until the 405GPR and 405EP have this errata.
config IBM405_ERR77
bool
depends on 40x && !403GCX && !405GPR && !405EP
default y
# All 40x-based cores, up until the 405GPR and 405EP have this errata.
config IBM405_ERR51
bool
depends on 40x && !405GPR && !405EP
default y
config BOOKE
bool
depends on 44x
default y
config IBM_OCP
bool
depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
default y
config XILINX_OCP
bool
depends on XILINX_ML300
default y
config IBM_EMAC4
bool
depends on 440GX || 440SP
default y
config BIOS_FIXUP
bool
depends on BUBINGA || EP405 || SYCAMORE || WALNUT
default y
# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
config 403GCX
bool
depends OAK
default y
config 405EP
bool
depends on BUBINGA
default y
config 405GP
bool
depends on CPCI405 || EP405 || WALNUT
default y
config 405GPR
bool
depends on SYCAMORE
default y
config VIRTEX_II_PRO
bool
depends on XILINX_ML300
default y
config STB03xxx
bool
depends on REDWOOD_5 || REDWOOD_6
default y
config EMBEDDEDBOOT
bool
depends on EP405 || XILINX_ML300
default y
config IBM_OPENBIOS
bool
depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
default y
config PPC4xx_DMA
bool "PPC4xx DMA controller support"
depends on 4xx
config PPC4xx_EDMA
bool
depends on !STB03xxx && PPC4xx_DMA
default y
config PPC_GEN550
bool
depends on 4xx
default y
choice
prompt "TTYS0 device and default console"
depends on 40x
default UART0_TTYS0
config UART0_TTYS0
bool "UART0"
config UART0_TTYS1
bool "UART1"
endchoice
config SERIAL_SICC
bool "SICC Serial port support"
depends on STB03xxx
config UART1_DFLT_CONSOLE
bool
depends on SERIAL_SICC && UART0_TTYS1
default y
config SERIAL_SICC_CONSOLE
bool
depends on SERIAL_SICC && UART0_TTYS1
default y
endmenu
menu "IBM 40x options"
depends on 40x
config SERIAL_SICC
bool "SICC Serial port"
depends on STB03xxx
config UART1_DFLT_CONSOLE
bool
depends on SERIAL_SICC && UART0_TTYS1
default y
config SERIAL_SICC_CONSOLE
bool
depends on SERIAL_SICC && UART0_TTYS1
default y
endmenu

Просмотреть файл

@ -0,0 +1,86 @@
config 85xx
bool
depends on E500
default y
config PPC_INDIRECT_PCI_BE
bool
depends on 85xx
default y
menu "Freescale 85xx options"
depends on E500
choice
prompt "Machine Type"
depends on 85xx
default MPC8540_ADS
config MPC8540_ADS
bool "Freescale MPC8540 ADS"
help
This option enables support for the MPC 8540 ADS evaluation board.
config MPC8548_CDS
bool "Freescale MPC8548 CDS"
help
This option enablese support for the MPC8548 CDS evaluation board.
config MPC8555_CDS
bool "Freescale MPC8555 CDS"
help
This option enablese support for the MPC8555 CDS evaluation board.
config MPC8560_ADS
bool "Freescale MPC8560 ADS"
help
This option enables support for the MPC 8560 ADS evaluation board.
config SBC8560
bool "WindRiver PowerQUICC III SBC8560"
help
This option enables support for the WindRiver PowerQUICC III
SBC8560 board.
config STX_GP3
bool "Silicon Turnkey Express GP3"
help
This option enables support for the Silicon Turnkey Express GP3
board.
endchoice
# It's often necessary to know the specific 85xx processor type.
# Fortunately, it is implied (so far) from the board type, so we
# don't need to ask more redundant questions.
config MPC8540
bool
depends on MPC8540_ADS
default y
config MPC8548
bool
depends on MPC8548_CDS
default y
config MPC8555
bool
depends on MPC8555_CDS
default y
config MPC8560
bool
depends on SBC8560 || MPC8560_ADS || STX_GP3
default y
config 85xx_PCI2
bool "Supprt for 2nd PCI host controller"
depends on MPC8555_CDS
default y
config PPC_GEN550
bool
depends on MPC8540 || SBC8560 || MPC8555
default y
endmenu

Просмотреть файл

@ -0,0 +1,352 @@
config FADS
bool
choice
prompt "8xx Machine Type"
depends on 8xx
default RPXLITE
config RPXLITE
bool "RPX-Lite"
---help---
Single-board computers based around the PowerPC MPC8xx chips and
intended for embedded applications. The following types are
supported:
RPX-Lite:
Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823.
RPX-Classic:
Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on
the MPC 860
BSE-IP:
Bright Star Engineering ip-Engine.
TQM823L:
TQM850L:
TQM855L:
TQM860L:
MPC8xx based family of mini modules, half credit card size,
up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports,
2 x CAN bus interface, ...
Manufacturer: TQ Components, www.tq-group.de
Date of Release: October (?) 1999
End of Life: not yet :-)
URL:
- module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>
- starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf>
- images: <http://www.denx.de/embedded-ppc-en.html>
FPS850L:
FingerPrint Sensor System (based on TQM850L)
Manufacturer: IKENDI AG, <http://www.ikendi.com/>
Date of Release: November 1999
End of life: end 2000 ?
URL: see TQM850L
IVMS8:
MPC860 based board used in the "Integrated Voice Mail System",
Small Version (8 voice channels)
Manufacturer: Speech Design, <http://www.speech-design.de/>
Date of Release: December 2000 (?)
End of life: -
URL: <http://www.speech-design.de/>
IVML24:
MPC860 based board used in the "Integrated Voice Mail System",
Large Version (24 voice channels)
Manufacturer: Speech Design, <http://www.speech-design.de/>
Date of Release: March 2001 (?)
End of life: -
URL: <http://www.speech-design.de/>
HERMES:
Hermes-Pro ISDN/LAN router with integrated 8 x hub
Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik
<http://www.multidata.de/>
Date of Release: 2000 (?)
End of life: -
URL: <http://www.multidata.de/english/products/hpro.htm>
IP860:
VMEBus IP (Industry Pack) carrier board with MPC860
Manufacturer: MicroSys GmbH, <http://www.microsys.de/>
Date of Release: ?
End of life: -
URL: <http://www.microsys.de/html/ip860.html>
PCU_E:
PCU = Peripheral Controller Unit, Extended
Manufacturer: Siemens AG, ICN (Information and Communication Networks)
<http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html>
Date of Release: April 2001
End of life: August 2001
URL: n. a.
config RPXCLASSIC
bool "RPX-Classic"
help
The RPX-Classic is a single-board computer based on the Motorola
MPC860. It features 16MB of DRAM and a variable amount of flash,
I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two
LEDs. Variants with Ethernet ports exist. Say Y here to support it
directly.
config BSEIP
bool "BSE-IP"
help
Say Y here to support the Bright Star Engineering ipEngine SBC.
This is a credit-card-sized device featuring a MPC823 processor,
26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video
controller, and two RS232 ports.
config MPC8XXFADS
bool "FADS"
select FADS
config MPC86XADS
bool "MPC86XADS"
help
MPC86x Application Development System by Freescale Semiconductor.
The MPC86xADS is meant to serve as a platform for s/w and h/w
development around the MPC86X processor families.
select FADS
config MPC885ADS
bool "MPC885ADS"
help
Freescale Semiconductor MPC885 Application Development System (ADS).
Also known as DUET.
The MPC885ADS is meant to serve as a platform for s/w and h/w
development around the MPC885 processor family.
config TQM823L
bool "TQM823L"
help
Say Y here to support the TQM823L, one of an MPC8xx-based family of
mini SBCs (half credit-card size) from TQ Components first released
in late 1999. Technical references are at
<http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
<http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
<http://www.denx.de/embedded-ppc-en.html>.
config TQM850L
bool "TQM850L"
help
Say Y here to support the TQM850L, one of an MPC8xx-based family of
mini SBCs (half credit-card size) from TQ Components first released
in late 1999. Technical references are at
<http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
<http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
<http://www.denx.de/embedded-ppc-en.html>.
config TQM855L
bool "TQM855L"
help
Say Y here to support the TQM855L, one of an MPC8xx-based family of
mini SBCs (half credit-card size) from TQ Components first released
in late 1999. Technical references are at
<http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
<http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
<http://www.denx.de/embedded-ppc-en.html>.
config TQM860L
bool "TQM860L"
help
Say Y here to support the TQM860L, one of an MPC8xx-based family of
mini SBCs (half credit-card size) from TQ Components first released
in late 1999. Technical references are at
<http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
<http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
<http://www.denx.de/embedded-ppc-en.html>.
config FPS850L
bool "FPS850L"
config IVMS8
bool "IVMS8"
help
Say Y here to support the Integrated Voice-Mail Small 8-channel SBC
from Speech Design, released March 2001. The manufacturer's website
is at <http://www.speech-design.de/>.
config IVML24
bool "IVML24"
help
Say Y here to support the Integrated Voice-Mail Large 24-channel SBC
from Speech Design, released March 2001. The manufacturer's website
is at <http://www.speech-design.de/>.
config HERMES_PRO
bool "HERMES"
config IP860
bool "IP860"
config LWMON
bool "LWMON"
config PCU_E
bool "PCU_E"
config CCM
bool "CCM"
config LANTEC
bool "LANTEC"
config MBX
bool "MBX"
help
MBX is a line of Motorola single-board computer based around the
MPC821 and MPC860 processors, and intended for embedded-controller
applications. Say Y here to support these boards directly.
config WINCEPT
bool "WinCept"
help
The Wincept 100/110 is a Motorola single-board computer based on the
MPC821 PowerPC, introduced in 1998 and designed to be used in
thin-client machines. Say Y to support it directly.
endchoice
#
# MPC8xx Communication options
#
menu "MPC8xx CPM Options"
depends on 8xx
config SCC_ENET
bool "CPM SCC Ethernet"
depends on NET_ETHERNET
help
Enable Ethernet support via the Motorola MPC8xx serial
communications controller.
choice
prompt "SCC used for Ethernet"
depends on SCC_ENET
default SCC1_ENET
config SCC1_ENET
bool "SCC1"
help
Use MPC8xx serial communications controller 1 to drive Ethernet
(default).
config SCC2_ENET
bool "SCC2"
help
Use MPC8xx serial communications controller 2 to drive Ethernet.
config SCC3_ENET
bool "SCC3"
help
Use MPC8xx serial communications controller 3 to drive Ethernet.
endchoice
config FEC_ENET
bool "860T FEC Ethernet"
depends on NET_ETHERNET
help
Enable Ethernet support via the Fast Ethernet Controller (FCC) on
the Motorola MPC8260.
config USE_MDIO
bool "Use MDIO for PHY configuration"
depends on FEC_ENET
help
On some boards the hardware configuration of the ethernet PHY can be
used without any software interaction over the MDIO interface, so
all MII code can be omitted. Say N here if unsure or if you don't
need link status reports.
config FEC_AM79C874
bool "Support AMD79C874 PHY"
depends on USE_MDIO
config FEC_LXT970
bool "Support LXT970 PHY"
depends on USE_MDIO
config FEC_LXT971
bool "Support LXT971 PHY"
depends on USE_MDIO
config FEC_QS6612
bool "Support QS6612 PHY"
depends on USE_MDIO
config ENET_BIG_BUFFERS
bool "Use Big CPM Ethernet Buffers"
depends on SCC_ENET || FEC_ENET
help
Allocate large buffers for MPC8xx Ethernet. Increases throughput
and decreases the likelihood of dropped packets, but costs memory.
config HTDMSOUND
bool "Embedded Planet HIOX Audio"
depends on SOUND=y
# This doesn't really belong here, but it is convenient to ask
# 8xx specific questions.
comment "Generic MPC8xx Options"
config 8xx_COPYBACK
bool "Copy-Back Data Cache (else Writethrough)"
help
Saying Y here will cause the cache on an MPC8xx processor to be used
in Copy-Back mode. If you say N here, it is used in Writethrough
mode.
If in doubt, say Y here.
config 8xx_CPU6
bool "CPU6 Silicon Errata (860 Pre Rev. C)"
help
MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
require workarounds for Linux (and most other OSes to work). If you
get a BUG() very early in boot, this might fix the problem. For
more details read the document entitled "MPC860 Family Device Errata
Reference" on Motorola's website. This option also incurs a
performance hit.
If in doubt, say N here.
choice
prompt "Microcode patch selection"
default NO_UCODE_PATCH
help
Help not implemented yet, coming soon.
config NO_UCODE_PATCH
bool "None"
config USB_SOF_UCODE_PATCH
bool "USB SOF patch"
help
Help not implemented yet, coming soon.
config I2C_SPI_UCODE_PATCH
bool "I2C/SPI relocation patch"
help
Help not implemented yet, coming soon.
config I2C_SPI_SMC1_UCODE_PATCH
bool "I2C/SPI/SMC1 relocation patch"
help
Help not implemented yet, coming soon.
endchoice
config UCODE_PATCH
bool
default y
depends on !NO_UCODE_PATCH
endmenu

Просмотреть файл

@ -0,0 +1,130 @@
config AMIGA
bool
depends on APUS
default y
help
This option enables support for the Amiga series of computers.
config ZORRO
bool
depends on APUS
default y
help
This enables support for the Zorro bus in the Amiga. If you have
expansion cards in your Amiga that conform to the Amiga
AutoConfig(tm) specification, say Y, otherwise N. Note that even
expansion cards that do not fit in the Zorro slots but fit in e.g.
the CPU slot may fall in this category, so you have to say Y to let
Linux use these.
config ABSTRACT_CONSOLE
bool
depends on APUS
default y
config APUS_FAST_EXCEPT
bool
depends on APUS
default y
config AMIGA_PCMCIA
bool "Amiga 1200/600 PCMCIA support"
depends on APUS && EXPERIMENTAL
help
Include support in the kernel for pcmcia on Amiga 1200 and Amiga
600. If you intend to use pcmcia cards say Y; otherwise say N.
config AMIGA_BUILTIN_SERIAL
tristate "Amiga builtin serial support"
depends on APUS
help
If you want to use your Amiga's built-in serial port in Linux,
answer Y.
To compile this driver as a module, choose M here.
config GVPIOEXT
tristate "GVP IO-Extender support"
depends on APUS
help
If you want to use a GVP IO-Extender serial card in Linux, say Y.
Otherwise, say N.
config GVPIOEXT_LP
tristate "GVP IO-Extender parallel printer support"
depends on GVPIOEXT
help
Say Y to enable driving a printer from the parallel port on your
GVP IO-Extender card, N otherwise.
config GVPIOEXT_PLIP
tristate "GVP IO-Extender PLIP support"
depends on GVPIOEXT
help
Say Y to enable doing IP over the parallel port on your GVP
IO-Extender card, N otherwise.
config MULTIFACE_III_TTY
tristate "Multiface Card III serial support"
depends on APUS
help
If you want to use a Multiface III card's serial port in Linux,
answer Y.
To compile this driver as a module, choose M here.
config A2232
tristate "Commodore A2232 serial support (EXPERIMENTAL)"
depends on EXPERIMENTAL && APUS
---help---
This option supports the 2232 7-port serial card shipped with the
Amiga 2000 and other Zorro-bus machines, dating from 1989. At
a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
ports were connected with 8 pin DIN connectors on the card bracket,
for which 8 pin to DB25 adapters were supplied. The card also had
jumpers internally to toggle various pinning configurations.
This driver can be built as a module; but then "generic_serial"
will also be built as a module. This has to be loaded before
"ser_a2232". If you want to do this, answer M here.
config WHIPPET_SERIAL
tristate "Hisoft Whippet PCMCIA serial support"
depends on AMIGA_PCMCIA
help
HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
is no listing for the Whippet in their Amiga section.
config APNE
tristate "PCMCIA NE2000 support"
depends on AMIGA_PCMCIA
help
If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
say N.
To compile this driver as a module, choose M here: the
module will be called apne.
config SERIAL_CONSOLE
bool "Support for serial port console"
depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
config HEARTBEAT
bool "Use power LED as a heartbeat"
depends on APUS
help
Use the power-on LED on your machine as a load meter. The exact
behavior is platform-dependent, but normally the flash frequency is
a hyperbolic function of the 5-minute load average.
config PROC_HARDWARE
bool "/proc/hardware support"
depends on APUS
source "drivers/zorro/Kconfig"
config PCI_PERMEDIA
bool "PCI for Permedia2"
depends on !4xx && !8xx && APUS

Просмотреть файл

@ -0,0 +1,313 @@
choice
prompt "Machine Type"
depends on EMBEDDED6xx
config APUS
bool "Amiga-APUS"
depends on BROKEN
help
Select APUS if configuring for a PowerUP Amiga.
More information is available at:
<http://linux-apus.sourceforge.net/>.
config KATANA
bool "Artesyn-Katana"
help
Select KATANA if configuring an Artesyn KATANA 750i or 3750
cPCI board.
config WILLOW
bool "Cogent-Willow"
config CPCI690
bool "Force-CPCI690"
help
Select CPCI690 if configuring a Force CPCI690 cPCI board.
config POWERPMC250
bool "Force-PowerPMC250"
config CHESTNUT
bool "IBM 750FX Eval board or 750GX Eval board"
help
Select CHESTNUT if configuring an IBM 750FX Eval Board or a
IBM 750GX Eval board.
config SPRUCE
bool "IBM-Spruce"
config HDPU
bool "Sky-HDPU"
help
Select HDPU if configuring a Sky Computers Compute Blade.
config HDPU_FEATURES
depends HDPU
tristate "HDPU-Features"
help
Select to enable HDPU enhanced features.
config EV64260
bool "Marvell-EV64260BP"
help
Select EV64260 if configuring a Marvell (formerly Galileo)
EV64260BP Evaluation platform.
config LOPEC
bool "Motorola-LoPEC"
config MVME5100
bool "Motorola-MVME5100"
config PPLUS
bool "Motorola-PowerPlus"
config PRPMC750
bool "Motorola-PrPMC750"
config PRPMC800
bool "Motorola-PrPMC800"
config SANDPOINT
bool "Motorola-Sandpoint"
help
Select SANDPOINT if configuring for a Motorola Sandpoint X3
(any flavor).
config RADSTONE_PPC7D
bool "Radstone Technology PPC7D board"
config PAL4
bool "SBS-Palomar4"
config GEMINI
bool "Synergy-Gemini"
depends on BROKEN
help
Select Gemini if configuring for a Synergy Microsystems' Gemini
series Single Board Computer. More information is available at:
<http://www.synergymicro.com/PressRel/97_10_15.html>.
config EST8260
bool "EST8260"
---help---
The EST8260 is a single-board computer manufactured by Wind River
Systems, Inc. (formerly Embedded Support Tools Corp.) and based on
the MPC8260. Wind River Systems has a website at
<http://www.windriver.com/>, but the EST8260 cannot be found on it
and has probably been discontinued or rebadged.
config SBC82xx
bool "SBC82xx"
---help---
SBC PowerQUICC II, single-board computer with MPC82xx CPU
Manufacturer: Wind River Systems, Inc.
Date of Release: May 2003
End of Life: -
URL: <http://www.windriver.com/>
config SBS8260
bool "SBS8260"
config RPX8260
bool "RPXSUPER"
config TQM8260
bool "TQM8260"
---help---
MPC8260 based module, little larger than credit card,
up to 128 MB global + 64 MB local RAM, 32 MB Flash,
32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet,
2 x serial ports, ...
Manufacturer: TQ Components, www.tq-group.de
Date of Release: June 2001
End of Life: not yet :-)
URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf>
config ADS8272
bool "ADS8272"
config PQ2FADS
bool "Freescale-PQ2FADS"
help
Select PQ2FADS if you wish to configure for a Freescale
PQ2FADS board (-VR or -ZU).
config LITE5200
bool "Freescale LITE5200 / (IceCube)"
select PPC_MPC52xx
help
Support for the LITE5200 dev board for the MPC5200 from Freescale.
This is for the LITE5200 version 2.0 board. Don't know if it changes
much but it's only been tested on this board version. I think this
board is also known as IceCube.
config MPC834x_SYS
bool "Freescale MPC834x SYS"
help
This option enables support for the MPC 834x SYS evaluation board.
Be aware that PCI buses can only function when SYS board is plugged
into the PIB (Platform IO Board) board from Freescale which provide
3 PCI slots. The PIBs PCI initialization is the bootloader's
responsiblilty.
config EV64360
bool "Marvell-EV64360BP"
help
Select EV64360 if configuring a Marvell EV64360BP Evaluation
platform.
endchoice
config PQ2ADS
bool
depends on ADS8272
default y
config TQM8xxL
bool
depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
default y
config PPC_MPC52xx
bool
config 8260
bool "CPM2 Support" if WILLOW
depends on 6xx
default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS
help
The MPC8260 is a typical embedded CPU made by Motorola. Selecting
this option means that you wish to build a kernel for a machine with
an 8260 class CPU.
config 8272
bool
depends on 6xx
default y if ADS8272
select 8260
help
The MPC8272 CPM has a different internal dpram setup than other CPM2
devices
config 83xx
bool
default y if MPC834x_SYS
config MPC834x
bool
default y if MPC834x_SYS
config CPM2
bool
depends on 8260 || MPC8560 || MPC8555
default y
help
The CPM2 (Communications Processor Module) is a coprocessor on
embedded CPUs made by Motorola. Selecting this option means that
you wish to build a kernel for a machine with a CPM2 coprocessor
on it (826x, 827x, 8560).
config PPC_GEN550
bool
depends on SANDPOINT || SPRUCE || PPLUS || \
PRPMC750 || PRPMC800 || LOPEC || \
(EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
83xx
default y
config FORCE
bool
depends on 6xx && POWERPMC250
default y
config GT64260
bool
depends on EV64260 || CPCI690
default y
config MV64360 # Really MV64360 & MV64460
bool
depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360
default y
config MV64X60
bool
depends on (GT64260 || MV64360)
default y
menu "Set bridge options"
depends on MV64X60
config NOT_COHERENT_CACHE
bool "Turn off Cache Coherency"
default n
help
Some 64x60 bridges lock up when trying to enforce cache coherency.
When this option is selected, cache coherency will be turned off.
Note that this can cause other problems (e.g., stale data being
speculatively loaded via a cached mapping). Use at your own risk.
config MV64X60_BASE
hex "Set bridge base used by firmware"
default "0xf1000000"
help
A firmware can leave the base address of the bridge's registers at
a non-standard location. If so, set this value to reflect the
address of that non-standard location.
config MV64X60_NEW_BASE
hex "Set bridge base used by kernel"
default "0xf1000000"
help
If the current base address of the bridge's registers is not where
you want it, set this value to the address that you want it moved to.
endmenu
config NONMONARCH_SUPPORT
bool "Enable Non-Monarch Support"
depends on PRPMC800
config HARRIER
bool
depends on PRPMC800
default y
config EPIC_SERIAL_MODE
bool
depends on 6xx && (LOPEC || SANDPOINT)
default y
config MPC10X_BRIDGE
bool
depends on POWERPMC250 || LOPEC || SANDPOINT
default y
config MPC10X_OPENPIC
bool
depends on POWERPMC250 || LOPEC || SANDPOINT
default y
config MPC10X_STORE_GATHERING
bool "Enable MPC10x store gathering"
depends on MPC10X_BRIDGE
config SANDPOINT_ENABLE_UART1
bool "Enable DUART mode on Sandpoint"
depends on SANDPOINT
help
If this option is enabled then the MPC824x processor will run
in DUART mode instead of UART mode.
config HARRIER_STORE_GATHERING
bool "Enable Harrier store gathering"
depends on HARRIER
config MVME5100_IPMC761_PRESENT
bool "MVME5100 configured with an IPMC761"
depends on MVME5100
config SPRUCE_BAUD_33M
bool "Spruce baud clock support"
depends on SPRUCE

Просмотреть файл

@ -0,0 +1,31 @@
menu "iSeries device drivers"
depends on PPC_ISERIES
config VIOCONS
tristate "iSeries Virtual Console Support"
config VIODASD
tristate "iSeries Virtual I/O disk support"
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
config VIOCD
tristate "iSeries Virtual I/O CD support"
help
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOTAPE
tristate "iSeries Virtual Tape Support"
help
If you are running Linux on an iSeries system and you want Linux
to read and/or write a tape drive owned by OS/400, say Y here.
endmenu
config VIOPATH
bool
depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
default y

Просмотреть файл

@ -0,0 +1,9 @@
obj-$(CONFIG_PPC_PMAC) += pmac_pic.o pmac_setup.o pmac_time.o \
pmac_feature.o pmac_pci.o pmac_sleep.o \
pmac_low_i2c.o pmac_cache.o
obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac_cpufreq.o
ifeq ($(CONFIG_PPC_PMAC),y)
obj-$(CONFIG_NVRAM) += pmac_nvram.o
obj-$(CONFIG_SMP) += pmac_smp.o
endif

Просмотреть файл

@ -0,0 +1,31 @@
#ifndef __PMAC_H__
#define __PMAC_H__
#include <linux/pci.h>
#include <linux/ide.h>
/*
* Declaration for the various functions exported by the
* pmac_* files. Mostly for use by pmac_setup
*/
extern void pmac_get_boot_time(struct rtc_time *tm);
extern void pmac_get_rtc_time(struct rtc_time *tm);
extern int pmac_set_rtc_time(struct rtc_time *tm);
extern void pmac_read_rtc_time(void);
extern void pmac_calibrate_decr(void);
extern void pmac_pcibios_fixup(void);
extern void pmac_pci_init(void);
extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
extern unsigned long pmac_ide_get_base(int index);
extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
unsigned long data_port, unsigned long ctrl_port, int *irq);
extern void pmac_nvram_init(void);
#endif /* __PMAC_H__ */

Просмотреть файл

@ -0,0 +1,202 @@
/*
* Miscellaneous procedures for dealing with the PowerMac hardware.
* Contains support for the backlight.
*
* Copyright (C) 2000 Benjamin Herrenschmidt
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/reboot.h>
#include <linux/nvram.h>
#include <linux/console.h>
#include <asm/sections.h>
#include <asm/ptrace.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
#include <asm/backlight.h>
#include <linux/adb.h>
#include <linux/pmu.h>
static struct backlight_controller *backlighter;
static void* backlighter_data;
static int backlight_autosave;
static int backlight_level = BACKLIGHT_MAX;
static int backlight_enabled = 1;
static int backlight_req_level = -1;
static int backlight_req_enable = -1;
static void backlight_callback(void *);
static DECLARE_WORK(backlight_work, backlight_callback, NULL);
void register_backlight_controller(struct backlight_controller *ctrler,
void *data, char *type)
{
struct device_node* bk_node;
char *prop;
int valid = 0;
/* There's already a matching controller, bail out */
if (backlighter != NULL)
return;
bk_node = find_devices("backlight");
#ifdef CONFIG_ADB_PMU
/* Special case for the old PowerBook since I can't test on it */
backlight_autosave = machine_is_compatible("AAPL,3400/2400")
|| machine_is_compatible("AAPL,3500");
if ((backlight_autosave
|| machine_is_compatible("AAPL,PowerBook1998")
|| machine_is_compatible("PowerBook1,1"))
&& !strcmp(type, "pmu"))
valid = 1;
#endif
if (bk_node) {
prop = get_property(bk_node, "backlight-control", NULL);
if (prop && !strncmp(prop, type, strlen(type)))
valid = 1;
}
if (!valid)
return;
backlighter = ctrler;
backlighter_data = data;
if (bk_node && !backlight_autosave)
prop = get_property(bk_node, "bklt", NULL);
else
prop = NULL;
if (prop) {
backlight_level = ((*prop)+1) >> 1;
if (backlight_level > BACKLIGHT_MAX)
backlight_level = BACKLIGHT_MAX;
}
#ifdef CONFIG_ADB_PMU
if (backlight_autosave) {
struct adb_request req;
pmu_request(&req, NULL, 2, 0xd9, 0);
while (!req.complete)
pmu_poll();
backlight_level = req.reply[0] >> 4;
}
#endif
acquire_console_sem();
if (!backlighter->set_enable(1, backlight_level, data))
backlight_enabled = 1;
release_console_sem();
printk(KERN_INFO "Registered \"%s\" backlight controller,"
"level: %d/15\n", type, backlight_level);
}
EXPORT_SYMBOL(register_backlight_controller);
void unregister_backlight_controller(struct backlight_controller
*ctrler, void *data)
{
/* We keep the current backlight level (for now) */
if (ctrler == backlighter && data == backlighter_data)
backlighter = NULL;
}
EXPORT_SYMBOL(unregister_backlight_controller);
static int __set_backlight_enable(int enable)
{
int rc;
if (!backlighter)
return -ENODEV;
acquire_console_sem();
rc = backlighter->set_enable(enable, backlight_level,
backlighter_data);
if (!rc)
backlight_enabled = enable;
release_console_sem();
return rc;
}
int set_backlight_enable(int enable)
{
if (!backlighter)
return -ENODEV;
backlight_req_enable = enable;
schedule_work(&backlight_work);
return 0;
}
EXPORT_SYMBOL(set_backlight_enable);
int get_backlight_enable(void)
{
if (!backlighter)
return -ENODEV;
return backlight_enabled;
}
EXPORT_SYMBOL(get_backlight_enable);
static int __set_backlight_level(int level)
{
int rc = 0;
if (!backlighter)
return -ENODEV;
if (level < BACKLIGHT_MIN)
level = BACKLIGHT_OFF;
if (level > BACKLIGHT_MAX)
level = BACKLIGHT_MAX;
acquire_console_sem();
if (backlight_enabled)
rc = backlighter->set_level(level, backlighter_data);
if (!rc)
backlight_level = level;
release_console_sem();
if (!rc && !backlight_autosave) {
level <<=1;
if (level & 0x10)
level |= 0x01;
// -- todo: save to property "bklt"
}
return rc;
}
int set_backlight_level(int level)
{
if (!backlighter)
return -ENODEV;
backlight_req_level = level;
schedule_work(&backlight_work);
return 0;
}
EXPORT_SYMBOL(set_backlight_level);
int get_backlight_level(void)
{
if (!backlighter)
return -ENODEV;
return backlight_level;
}
EXPORT_SYMBOL(get_backlight_level);
static void backlight_callback(void *dummy)
{
int level, enable;
do {
level = backlight_req_level;
enable = backlight_req_enable;
mb();
if (level >= 0)
__set_backlight_level(level);
if (enable >= 0)
__set_backlight_enable(enable);
} while(cmpxchg(&backlight_req_level, level, -1) != level ||
cmpxchg(&backlight_req_enable, enable, -1) != enable);
}

Просмотреть файл

@ -0,0 +1,359 @@
/*
* This file contains low-level cache management functions
* used for sleep and CPU speed changes on Apple machines.
* (In fact the only thing that is Apple-specific is that we assume
* that we can read from ROM at physical address 0xfff00000.)
*
* Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
* Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
/*
* Flush and disable all data caches (dL1, L2, L3). This is used
* when going to sleep, when doing a PMU based cpufreq transition,
* or when "offlining" a CPU on SMP machines. This code is over
* paranoid, but I've had enough issues with various CPU revs and
* bugs that I decided it was worth beeing over cautious
*/
_GLOBAL(flush_disable_caches)
#ifndef CONFIG_6xx
blr
#else
BEGIN_FTR_SECTION
b flush_disable_745x
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
BEGIN_FTR_SECTION
b flush_disable_75x
END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
b __flush_disable_L1
/* This is the code for G3 and 74[01]0 */
flush_disable_75x:
mflr r10
/* Turn off EE and DR in MSR */
mfmsr r11
rlwinm r0,r11,0,~MSR_EE
rlwinm r0,r0,0,~MSR_DR
sync
mtmsr r0
isync
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Stop DPM */
mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr SPRN_HID0,r4 /* Disable DPM */
sync
/* Disp-flush L1. We have a weird problem here that I never
* totally figured out. On 750FX, using the ROM for the flush
* results in a non-working flush. We use that workaround for
* now until I finally understand what's going on. --BenH
*/
/* ROM base by default */
lis r4,0xfff0
mfpvr r3
srwi r3,r3,16
cmplwi cr0,r3,0x7000
bne+ 1f
/* RAM base on 750FX */
li r4,0
1: li r4,0x4000
mtctr r4
1: lwz r0,0(r4)
addi r4,r4,32
bdnz 1b
sync
isync
/* Disable / invalidate / enable L1 data */
mfspr r3,SPRN_HID0
rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
mtspr SPRN_HID0,r3
sync
isync
ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
sync
isync
mtspr SPRN_HID0,r3
xori r3,r3,(HID0_DCI|HID0_ICFI)
mtspr SPRN_HID0,r3
sync
/* Get the current enable bit of the L2CR into r4 */
mfspr r5,SPRN_L2CR
/* Set to data-only (pre-745x bit) */
oris r3,r5,L2CR_L2DO@h
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r3
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: /* disp-flush L2. The interesting thing here is that the L2 can be
* up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
* but that is probbaly fine. We disp-flush over 4Mb to be safe
*/
lis r4,2
mtctr r4
lis r4,0xfff0
1: lwz r0,0(r4)
addi r4,r4,32
bdnz 1b
sync
isync
lis r4,2
mtctr r4
lis r4,0xfff0
1: dcbf 0,r4
addi r4,r4,32
bdnz 1b
sync
isync
/* now disable L2 */
rlwinm r5,r5,0,~L2CR_L2E
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r5
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: sync
isync
/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
oris r4,r5,L2CR_L2I@h
mtspr SPRN_L2CR,r4
sync
isync
/* Wait for the invalidation to complete */
1: mfspr r3,SPRN_L2CR
rlwinm. r0,r3,0,31,31
bne 1b
/* Clear L2I */
xoris r4,r4,L2CR_L2I@h
sync
mtspr SPRN_L2CR,r4
sync
/* now disable the L1 data cache */
mfspr r0,SPRN_HID0
rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
mtspr SPRN_HID0,r0
sync
isync
/* Restore HID0[DPM] to whatever it was before */
sync
mfspr r0,SPRN_HID0
rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
mtspr SPRN_HID0,r0
sync
/* restore DR and EE */
sync
mtmsr r11
isync
mtlr r10
blr
/* This code is for 745x processors */
flush_disable_745x:
/* Turn off EE and DR in MSR */
mfmsr r11
rlwinm r0,r11,0,~MSR_EE
rlwinm r0,r0,0,~MSR_DR
sync
mtmsr r0
isync
/* Stop prefetch streams */
DSSALL
sync
/* Disable L2 prefetching */
mfspr r0,SPRN_MSSCR0
rlwinm r0,r0,0,0,29
mtspr SPRN_MSSCR0,r0
sync
isync
lis r4,0
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
/* Due to a bug with the HW flush on some CPU revs, we occasionally
* experience data corruption. I'm adding a displacement flush along
* with a dcbf loop over a few Mb to "help". The problem isn't totally
* fixed by this in theory, but at least, in practice, I couldn't reproduce
* it even with a big hammer...
*/
lis r4,0x0002
mtctr r4
li r4,0
1:
lwz r0,0(r4)
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
isync
/* Now, flush the first 4MB of memory */
lis r4,0x0002
mtctr r4
li r4,0
sync
1:
dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
/* Flush and disable the L1 data cache */
mfspr r6,SPRN_LDSTCR
lis r3,0xfff0 /* read from ROM for displacement flush */
li r4,0xfe /* start with only way 0 unlocked */
li r5,128 /* 128 lines in each way */
1: mtctr r5
rlwimi r6,r4,0,24,31
mtspr SPRN_LDSTCR,r6
sync
isync
2: lwz r0,0(r3) /* touch each cache line */
addi r3,r3,32
bdnz 2b
rlwinm r4,r4,1,24,30 /* move on to the next way */
ori r4,r4,1
cmpwi r4,0xff /* all done? */
bne 1b
/* now unlock the L1 data cache */
li r4,0
rlwimi r6,r4,0,24,31
sync
mtspr SPRN_LDSTCR,r6
sync
isync
/* Flush the L2 cache using the hardware assist */
mfspr r3,SPRN_L2CR
cmpwi r3,0 /* check if it is enabled first */
bge 4f
oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
b 2f
/* When disabling/locking L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: sync
isync
ori r0,r3,L2CR_L2HWF_745x
sync
mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
andi. r0,r0,L2CR_L2HWF_745x
bne 3b
sync
rlwinm r3,r3,0,~L2CR_L2E
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: sync
isync
oris r4,r3,L2CR_L2I@h
mtspr SPRN_L2CR,r4
sync
isync
1: mfspr r4,SPRN_L2CR
andis. r0,r4,L2CR_L2I@h
bne 1b
sync
BEGIN_FTR_SECTION
/* Flush the L3 cache using the hardware assist */
4: mfspr r3,SPRN_L3CR
cmpwi r3,0 /* check if it is enabled */
bge 6f
oris r0,r3,L3CR_L3IO@h
ori r0,r0,L3CR_L3DO
sync
mtspr SPRN_L3CR,r0 /* lock the L3 cache */
sync
isync
ori r0,r0,L3CR_L3HWF
sync
mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
andi. r0,r0,L3CR_L3HWF
bne 5b
rlwinm r3,r3,0,~L3CR_L3E
sync
mtspr SPRN_L3CR,r3 /* disable the L3 cache */
sync
ori r4,r3,L3CR_L3I
mtspr SPRN_L3CR,r4
1: mfspr r4,SPRN_L3CR
andi. r0,r4,L3CR_L3I
bne 1b
sync
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
rlwinm r0,r0,0,~HID0_DCE
mtspr SPRN_HID0,r0
sync
isync
mtmsr r11 /* restore DR and EE */
isync
blr
#endif /* CONFIG_6xx */

Просмотреть файл

@ -0,0 +1,728 @@
/*
* arch/ppc/platforms/pmac_cpufreq.c
*
* Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* TODO: Need a big cleanup here. Basically, we need to have different
* cpufreq_driver structures for the different type of HW instead of the
* current mess. We also need to better deal with the detection of the
* type of machine.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/sysdev.h>
#include <linux/i2c.h>
#include <linux/hardirq.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/pmac_feature.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/system.h>
#include <asm/mpic.h>
#include <asm/keylargo.h>
/* WARNING !!! This will cause calibrate_delay() to be called,
* but this is an __init function ! So you MUST go edit
* init/main.c to make it non-init before enabling DEBUG_FREQ
*/
#undef DEBUG_FREQ
/*
* There is a problem with the core cpufreq code on SMP kernels,
* it won't recalculate the Bogomips properly
*/
#ifdef CONFIG_SMP
#warning "WARNING, CPUFREQ not recommended on SMP kernels"
#endif
extern void low_choose_7447a_dfs(int dfs);
extern void low_choose_750fx_pll(int pll);
extern void low_sleep_handler(void);
/*
* Currently, PowerMac cpufreq supports only high & low frequencies
* that are set by the firmware
*/
static unsigned int low_freq;
static unsigned int hi_freq;
static unsigned int cur_freq;
static unsigned int sleep_freq;
/*
* Different models uses different mecanisms to switch the frequency
*/
static int (*set_speed_proc)(int low_speed);
static unsigned int (*get_speed_proc)(void);
/*
* Some definitions used by the various speedprocs
*/
static u32 voltage_gpio;
static u32 frequency_gpio;
static u32 slew_done_gpio;
static int no_schedule;
static int has_cpu_l2lve;
static int is_pmu_based;
/* There are only two frequency states for each processor. Values
* are in kHz for the time being.
*/
#define CPUFREQ_HIGH 0
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
{CPUFREQ_HIGH, 0},
{CPUFREQ_LOW, 0},
{0, CPUFREQ_TABLE_END},
};
static struct freq_attr* pmac_cpu_freqs_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static inline void local_delay(unsigned long ms)
{
if (no_schedule)
mdelay(ms);
else
msleep(ms);
}
static inline void wakeup_decrementer(void)
{
set_dec(tb_ticks_per_jiffy);
/* No currently-supported powerbook has a 601,
* so use get_tbl, not native
*/
last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
}
#ifdef DEBUG_FREQ
static inline void debug_calc_bogomips(void)
{
/* This will cause a recalc of bogomips and display the
* result. We backup/restore the value to avoid affecting the
* core cpufreq framework's own calculation.
*/
extern void calibrate_delay(void);
unsigned long save_lpj = loops_per_jiffy;
calibrate_delay();
loops_per_jiffy = save_lpj;
}
#endif /* DEBUG_FREQ */
/* Switch CPU speed under 750FX CPU control
*/
static int cpu_750fx_cpu_speed(int low_speed)
{
u32 hid2;
if (low_speed == 0) {
/* ramping up, set voltage first */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
/* Make sure we sleep for at least 1ms */
local_delay(10);
/* tweak L2 for high voltage */
if (has_cpu_l2lve) {
hid2 = mfspr(SPRN_HID2);
hid2 &= ~0x2000;
mtspr(SPRN_HID2, hid2);
}
}
#ifdef CONFIG_6xx
low_choose_750fx_pll(low_speed);
#endif
if (low_speed == 1) {
/* tweak L2 for low voltage */
if (has_cpu_l2lve) {
hid2 = mfspr(SPRN_HID2);
hid2 |= 0x2000;
mtspr(SPRN_HID2, hid2);
}
/* ramping down, set voltage last */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
local_delay(10);
}
return 0;
}
static unsigned int cpu_750fx_get_cpu_speed(void)
{
if (mfspr(SPRN_HID1) & HID1_PS)
return low_freq;
else
return hi_freq;
}
/* Switch CPU speed using DFS */
static int dfs_set_cpu_speed(int low_speed)
{
if (low_speed == 0) {
/* ramping up, set voltage first */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
/* Make sure we sleep for at least 1ms */
local_delay(1);
}
/* set frequency */
#ifdef CONFIG_6xx
low_choose_7447a_dfs(low_speed);
#endif
udelay(100);
if (low_speed == 1) {
/* ramping down, set voltage last */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
local_delay(1);
}
return 0;
}
static unsigned int dfs_get_cpu_speed(void)
{
if (mfspr(SPRN_HID1) & HID1_DFS)
return low_freq;
else
return hi_freq;
}
/* Switch CPU speed using slewing GPIOs
*/
static int gpios_set_cpu_speed(int low_speed)
{
int gpio, timeout = 0;
/* If ramping up, set voltage first */
if (low_speed == 0) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
/* Delay is way too big but it's ok, we schedule */
local_delay(10);
}
/* Set frequency */
gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
if (low_speed == ((gpio & 0x01) == 0))
goto skip;
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
low_speed ? 0x04 : 0x05);
udelay(200);
do {
if (++timeout > 100)
break;
local_delay(1);
gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
} while((gpio & 0x02) == 0);
skip:
/* If ramping down, set voltage last */
if (low_speed == 1) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
/* Delay is way too big but it's ok, we schedule */
local_delay(10);
}
#ifdef DEBUG_FREQ
debug_calc_bogomips();
#endif
return 0;
}
/* Switch CPU speed under PMU control
*/
static int pmu_set_cpu_speed(int low_speed)
{
struct adb_request req;
unsigned long save_l2cr;
unsigned long save_l3cr;
unsigned int pic_prio;
unsigned long flags;
preempt_disable();
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
#endif
pmu_suspend();
/* Disable all interrupt sources on openpic */
pic_prio = mpic_cpu_get_priority();
mpic_cpu_set_priority(0xf);
/* Make sure the decrementer won't interrupt us */
asm volatile("mtdec %0" : : "r" (0x7fffffff));
/* Make sure any pending DEC interrupt occuring while we did
* the above didn't re-enable the DEC */
mb();
asm volatile("mtdec %0" : : "r" (0x7fffffff));
/* We can now disable MSR_EE */
local_irq_save(flags);
/* Giveup the FPU & vec */
enable_kernel_fp();
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
enable_kernel_altivec();
#endif /* CONFIG_ALTIVEC */
/* Save & disable L2 and L3 caches */
save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
/* Send the new speed command. My assumption is that this command
* will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
*/
pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
while (!req.complete)
pmu_poll();
/* Prepare the northbridge for the speed transition */
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
/* Call low level code to backup CPU state and recover from
* hardware reset
*/
low_sleep_handler();
/* Restore the northbridge */
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
/* Restore L2 cache */
if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
_set_L2CR(save_l2cr);
/* Restore L3 cache */
if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
set_context(current->active_mm->context, current->active_mm->pgd);
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
#endif
/* Restore low level PMU operations */
pmu_unlock();
/* Restore decrementer */
wakeup_decrementer();
/* Restore interrupts */
mpic_cpu_set_priority(pic_prio);
/* Let interrupts flow again ... */
local_irq_restore(flags);
#ifdef DEBUG_FREQ
debug_calc_bogomips();
#endif
pmu_resume();
preempt_enable();
return 0;
}
static int do_set_cpu_speed(int speed_mode, int notify)
{
struct cpufreq_freqs freqs;
unsigned long l3cr;
static unsigned long prev_l3cr;
freqs.old = cur_freq;
freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
freqs.cpu = smp_processor_id();
if (freqs.old == freqs.new)
return 0;
if (notify)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
if (l3cr & L3CR_L3E) {
prev_l3cr = l3cr;
_set_L3CR(0);
}
}
set_speed_proc(speed_mode == CPUFREQ_LOW);
if (speed_mode == CPUFREQ_HIGH &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
_set_L3CR(prev_l3cr);
}
if (notify)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
}
static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
{
return cur_freq;
}
static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
}
static int pmac_cpufreq_target( struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
target_freq, relation, &newstate))
return -EINVAL;
return do_set_cpu_speed(newstate, 1);
}
unsigned int pmac_get_one_cpufreq(int i)
{
/* Supports only one CPU for now */
return (i == 0) ? cur_freq : 0;
}
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -ENODEV;
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = cur_freq;
cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
}
static u32 read_gpio(struct device_node *np)
{
u32 *reg = (u32 *)get_property(np, "reg", NULL);
u32 offset;
if (reg == NULL)
return 0;
/* That works for all keylargos but shall be fixed properly
* some day... The problem is that it seems we can't rely
* on the "reg" property of the GPIO nodes, they are either
* relative to the base of KeyLargo or to the base of the
* GPIO space, and the device-tree doesn't help.
*/
offset = *reg;
if (offset < KEYLARGO_GPIO_LEVELS0)
offset += KEYLARGO_GPIO_LEVELS0;
return offset;
}
static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
{
/* Ok, this could be made a bit smarter, but let's be robust for now. We
* always force a speed change to high speed before sleep, to make sure
* we have appropriate voltage and/or bus speed for the wakeup process,
* and to make sure our loops_per_jiffies are "good enough", that is will
* not cause too short delays if we sleep in low speed and wake in high
* speed..
*/
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
do_set_cpu_speed(CPUFREQ_HIGH, 0);
return 0;
}
static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
{
/* If we resume, first check if we have a get() function */
if (get_speed_proc)
cur_freq = get_speed_proc();
else
cur_freq = 0;
/* We don't, hrm... we don't really know our speed here, best
* is that we force a switch to whatever it was, which is
* probably high speed due to our suspend() routine
*/
do_set_cpu_speed(sleep_freq == low_freq ?
CPUFREQ_LOW : CPUFREQ_HIGH, 0);
no_schedule = 0;
return 0;
}
static struct cpufreq_driver pmac_cpufreq_driver = {
.verify = pmac_cpufreq_verify,
.target = pmac_cpufreq_target,
.get = pmac_cpufreq_get_speed,
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_PM_NO_WARN,
.attr = pmac_cpu_freqs_attr,
.name = "powermac",
.owner = THIS_MODULE,
};
static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
{
struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
"voltage-gpio");
struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
"frequency-gpio");
struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
"slewing-done");
u32 *value;
/*
* Check to see if it's GPIO driven or PMU only
*
* The way we extract the GPIO address is slightly hackish, but it
* works well enough for now. We need to abstract the whole GPIO
* stuff sooner or later anyway
*/
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
if (freq_gpio_np)
frequency_gpio = read_gpio(freq_gpio_np);
if (slew_done_gpio_np)
slew_done_gpio = read_gpio(slew_done_gpio_np);
/* If we use the frequency GPIOs, calculate the min/max speeds based
* on the bus frequencies
*/
if (frequency_gpio && slew_done_gpio) {
int lenp, rc;
u32 *freqs, *ratio;
freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp);
lenp /= sizeof(u32);
if (freqs == NULL || lenp != 2) {
printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
return 1;
}
ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL);
if (ratio == NULL) {
printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
return 1;
}
/* Get the min/max bus frequencies */
low_freq = min(freqs[0], freqs[1]);
hi_freq = max(freqs[0], freqs[1]);
/* Grrrr.. It _seems_ that the device-tree is lying on the low bus
* frequency, it claims it to be around 84Mhz on some models while
* it appears to be approx. 101Mhz on all. Let's hack around here...
* fortunately, we don't need to be too precise
*/
if (low_freq < 98000000)
low_freq = 101000000;
/* Convert those to CPU core clocks */
low_freq = (low_freq * (*ratio)) / 2000;
hi_freq = (hi_freq * (*ratio)) / 2000;
/* Now we get the frequencies, we read the GPIO to see what is out current
* speed
*/
rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
cur_freq = (rc & 0x01) ? hi_freq : low_freq;
set_speed_proc = gpios_set_cpu_speed;
return 1;
}
/* If we use the PMU, look for the min & max frequencies in the
* device-tree
*/
value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL);
if (!value)
return 1;
low_freq = (*value) / 1000;
/* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
* here */
if (low_freq < 100000)
low_freq *= 10;
value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL);
if (!value)
return 1;
hi_freq = (*value) / 1000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
return 0;
}
static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
return 1;
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
if (!voltage_gpio){
printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
return 1;
}
/* OF only reports the high frequency */
hi_freq = cur_freq;
low_freq = cur_freq/2;
/* Read actual frequency from CPU */
cur_freq = dfs_get_cpu_speed();
set_speed_proc = dfs_set_cpu_speed;
get_speed_proc = dfs_get_cpu_speed;
return 0;
}
static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
u32 pvr, *value;
if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
return 1;
hi_freq = cur_freq;
value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL);
if (!value)
return 1;
low_freq = (*value) / 1000;
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
set_speed_proc = cpu_750fx_cpu_speed;
get_speed_proc = cpu_750fx_get_cpu_speed;
cur_freq = cpu_750fx_get_cpu_speed();
return 0;
}
/* Currently, we support the following machines:
*
* - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
* - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
* - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
* - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
* - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
* - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
* - Recent MacRISC3 laptops
* - All new machines with 7447A CPUs
*/
static int __init pmac_cpufreq_setup(void)
{
struct device_node *cpunode;
u32 *value;
if (strstr(cmd_line, "nocpufreq"))
return 0;
/* Assume only one CPU */
cpunode = find_type_devices("cpu");
if (!cpunode)
goto out;
/* Get current cpu clock freq */
value = (u32 *)get_property(cpunode, "clock-frequency", NULL);
if (!value)
goto out;
cur_freq = (*value) / 1000;
/* Check for 7447A based MacRISC3 */
if (machine_is_compatible("MacRISC3") &&
get_property(cpunode, "dynamic-power-step", NULL) &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
/* Check for other MacRISC3 machines */
} else if (machine_is_compatible("PowerBook3,4") ||
machine_is_compatible("PowerBook3,5") ||
machine_is_compatible("MacRISC3")) {
pmac_cpufreq_init_MacRISC3(cpunode);
/* Else check for iBook2 500/600 */
} else if (machine_is_compatible("PowerBook4,1")) {
hi_freq = cur_freq;
low_freq = 400000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for TiPb 400 & 500 */
else if (machine_is_compatible("PowerBook3,2")) {
/* We only know about the 400 MHz and the 500Mhz model
* they both have 300 MHz as low frequency
*/
if (cur_freq < 350000 || cur_freq > 550000)
goto out;
hi_freq = cur_freq;
low_freq = 300000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for 750FX */
else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
pmac_cpufreq_init_750FX(cpunode);
out:
if (set_speed_proc == NULL)
return -ENODEV;
pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
low_freq/1000, hi_freq/1000, cur_freq/1000);
return cpufreq_register_driver(&pmac_cpufreq_driver);
}
module_init(pmac_cpufreq_setup);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,523 @@
/*
* arch/ppc/platforms/pmac_low_i2c.c
*
* Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This file contains some low-level i2c access routines that
* need to be used by various bits of the PowerMac platform code
* at times where the real asynchronous & interrupt driven driver
* cannot be used. The API borrows some semantics from the darwin
* driver in order to ease the implementation of the platform
* properties parser
*/
#undef DEBUG
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_low_i2c.h>
#define MAX_LOW_I2C_HOST 4
#ifdef DEBUG
#define DBG(x...) do {\
printk(KERN_DEBUG "KW:" x); \
} while(0)
#else
#define DBG(x...)
#endif
struct low_i2c_host;
typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len);
struct low_i2c_host
{
struct device_node *np; /* OF device node */
struct semaphore mutex; /* Access mutex for use by i2c-keywest */
low_i2c_func_t func; /* Access function */
unsigned int is_open : 1; /* Poor man's access control */
int mode; /* Current mode */
int channel; /* Current channel */
int num_channels; /* Number of channels */
void __iomem *base; /* For keywest-i2c, base address */
int bsteps; /* And register stepping */
int speed; /* And speed */
};
static struct low_i2c_host low_i2c_hosts[MAX_LOW_I2C_HOST];
/* No locking is necessary on allocation, we are running way before
* anything can race with us
*/
static struct low_i2c_host *find_low_i2c_host(struct device_node *np)
{
int i;
for (i = 0; i < MAX_LOW_I2C_HOST; i++)
if (low_i2c_hosts[i].np == np)
return &low_i2c_hosts[i];
return NULL;
}
/*
*
* i2c-keywest implementation (UniNorth, U2, U3, Keylargo's)
*
*/
/*
* Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h,
* should be moved somewhere in include/asm-ppc/
*/
/* Register indices */
typedef enum {
reg_mode = 0,
reg_control,
reg_status,
reg_isr,
reg_ier,
reg_addr,
reg_subaddr,
reg_data
} reg_t;
/* Mode register */
#define KW_I2C_MODE_100KHZ 0x00
#define KW_I2C_MODE_50KHZ 0x01
#define KW_I2C_MODE_25KHZ 0x02
#define KW_I2C_MODE_DUMB 0x00
#define KW_I2C_MODE_STANDARD 0x04
#define KW_I2C_MODE_STANDARDSUB 0x08
#define KW_I2C_MODE_COMBINED 0x0C
#define KW_I2C_MODE_MODE_MASK 0x0C
#define KW_I2C_MODE_CHAN_MASK 0xF0
/* Control register */
#define KW_I2C_CTL_AAK 0x01
#define KW_I2C_CTL_XADDR 0x02
#define KW_I2C_CTL_STOP 0x04
#define KW_I2C_CTL_START 0x08
/* Status register */
#define KW_I2C_STAT_BUSY 0x01
#define KW_I2C_STAT_LAST_AAK 0x02
#define KW_I2C_STAT_LAST_RW 0x04
#define KW_I2C_STAT_SDA 0x08
#define KW_I2C_STAT_SCL 0x10
/* IER & ISR registers */
#define KW_I2C_IRQ_DATA 0x01
#define KW_I2C_IRQ_ADDR 0x02
#define KW_I2C_IRQ_STOP 0x04
#define KW_I2C_IRQ_START 0x08
#define KW_I2C_IRQ_MASK 0x0F
/* State machine states */
enum {
state_idle,
state_addr,
state_read,
state_write,
state_stop,
state_dead
};
#define WRONG_STATE(name) do {\
printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \
name, __kw_state_names[state], isr); \
} while(0)
static const char *__kw_state_names[] = {
"state_idle",
"state_addr",
"state_read",
"state_write",
"state_stop",
"state_dead"
};
static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg)
{
return readb(host->base + (((unsigned int)reg) << host->bsteps));
}
static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val)
{
writeb(val, host->base + (((unsigned)reg) << host->bsteps));
(void)__kw_read_reg(host, reg_subaddr);
}
#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
#define kw_read_reg(reg) __kw_read_reg(host, reg)
/* Don't schedule, the g5 fan controller is too
* timing sensitive
*/
static u8 kw_wait_interrupt(struct low_i2c_host* host)
{
int i, j;
u8 isr;
for (i = 0; i < 100000; i++) {
isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
if (isr != 0)
return isr;
/* This code is used with the timebase frozen, we cannot rely
* on udelay ! For now, just use a bogus loop
*/
for (j = 1; j < 10000; j++)
mb();
}
return isr;
}
static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr)
{
u8 ack;
DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr);
if (isr == 0) {
if (state != state_stop) {
DBG("KW: Timeout !\n");
*rc = -EIO;
goto stop;
}
if (state == state_stop) {
ack = kw_read_reg(reg_status);
if (!(ack & KW_I2C_STAT_BUSY)) {
state = state_idle;
kw_write_reg(reg_ier, 0x00);
}
}
return state;
}
if (isr & KW_I2C_IRQ_ADDR) {
ack = kw_read_reg(reg_status);
if (state != state_addr) {
kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
WRONG_STATE("KW_I2C_IRQ_ADDR");
*rc = -EIO;
goto stop;
}
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
*rc = -ENODEV;
DBG("KW: NAK on address\n");
return state_stop;
} else {
if (rw) {
state = state_read;
if (*len > 1)
kw_write_reg(reg_control, KW_I2C_CTL_AAK);
} else {
state = state_write;
kw_write_reg(reg_data, **data);
(*data)++; (*len)--;
}
}
kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
}
if (isr & KW_I2C_IRQ_DATA) {
if (state == state_read) {
**data = kw_read_reg(reg_data);
(*data)++; (*len)--;
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
if ((*len) == 0)
state = state_stop;
else if ((*len) == 1)
kw_write_reg(reg_control, 0);
} else if (state == state_write) {
ack = kw_read_reg(reg_status);
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
DBG("KW: nack on data write\n");
*rc = -EIO;
goto stop;
} else if (*len) {
kw_write_reg(reg_data, **data);
(*data)++; (*len)--;
} else {
kw_write_reg(reg_control, KW_I2C_CTL_STOP);
state = state_stop;
*rc = 0;
}
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
} else {
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
WRONG_STATE("KW_I2C_IRQ_DATA");
if (state != state_stop) {
*rc = -EIO;
goto stop;
}
}
}
if (isr & KW_I2C_IRQ_STOP) {
kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
if (state != state_stop) {
WRONG_STATE("KW_I2C_IRQ_STOP");
*rc = -EIO;
}
return state_idle;
}
if (isr & KW_I2C_IRQ_START)
kw_write_reg(reg_isr, KW_I2C_IRQ_START);
return state;
stop:
kw_write_reg(reg_control, KW_I2C_CTL_STOP);
return state_stop;
}
static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len)
{
u8 mode_reg = host->speed;
int state = state_addr;
int rc = 0;
/* Setup mode & subaddress if any */
switch(host->mode) {
case pmac_low_i2c_mode_dumb:
printk(KERN_ERR "low_i2c: Dumb mode not supported !\n");
return -EINVAL;
case pmac_low_i2c_mode_std:
mode_reg |= KW_I2C_MODE_STANDARD;
break;
case pmac_low_i2c_mode_stdsub:
mode_reg |= KW_I2C_MODE_STANDARDSUB;
break;
case pmac_low_i2c_mode_combined:
mode_reg |= KW_I2C_MODE_COMBINED;
break;
}
/* Setup channel & clear pending irqs */
kw_write_reg(reg_isr, kw_read_reg(reg_isr));
kw_write_reg(reg_mode, mode_reg | (host->channel << 4));
kw_write_reg(reg_status, 0);
/* Set up address and r/w bit */
kw_write_reg(reg_addr, addr);
/* Set up the sub address */
if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
|| (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
kw_write_reg(reg_subaddr, subaddr);
/* Start sending address & disable interrupt*/
kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/);
kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
/* State machine, to turn into an interrupt handler */
while(state != state_idle) {
u8 isr = kw_wait_interrupt(host);
state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr);
}
return rc;
}
static void keywest_low_i2c_add(struct device_node *np)
{
struct low_i2c_host *host = find_low_i2c_host(NULL);
u32 *psteps, *prate, steps, aoffset = 0;
struct device_node *parent;
if (host == NULL) {
printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
np->full_name);
return;
}
memset(host, 0, sizeof(*host));
init_MUTEX(&host->mutex);
host->np = of_node_get(np);
psteps = (u32 *)get_property(np, "AAPL,address-step", NULL);
steps = psteps ? (*psteps) : 0x10;
for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
steps >>= 1;
parent = of_get_parent(np);
host->num_channels = 1;
if (parent && parent->name[0] == 'u') {
host->num_channels = 2;
aoffset = 3;
}
/* Select interface rate */
host->speed = KW_I2C_MODE_100KHZ;
prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL);
if (prate) switch(*prate) {
case 100:
host->speed = KW_I2C_MODE_100KHZ;
break;
case 50:
host->speed = KW_I2C_MODE_50KHZ;
break;
case 25:
host->speed = KW_I2C_MODE_25KHZ;
break;
}
host->mode = pmac_low_i2c_mode_std;
host->base = ioremap(np->addrs[0].address + aoffset,
np->addrs[0].size);
host->func = keywest_low_i2c_func;
}
/*
*
* PMU implementation
*
*/
#ifdef CONFIG_ADB_PMU
static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len)
{
// TODO
return -ENODEV;
}
static void pmu_low_i2c_add(struct device_node *np)
{
struct low_i2c_host *host = find_low_i2c_host(NULL);
if (host == NULL) {
printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
np->full_name);
return;
}
memset(host, 0, sizeof(*host));
init_MUTEX(&host->mutex);
host->np = of_node_get(np);
host->num_channels = 3;
host->mode = pmac_low_i2c_mode_std;
host->func = pmu_low_i2c_func;
}
#endif /* CONFIG_ADB_PMU */
void __init pmac_init_low_i2c(void)
{
struct device_node *np;
/* Probe keywest-i2c busses */
np = of_find_compatible_node(NULL, "i2c", "keywest-i2c");
while(np) {
keywest_low_i2c_add(np);
np = of_find_compatible_node(np, "i2c", "keywest-i2c");
}
#ifdef CONFIG_ADB_PMU
/* Probe PMU busses */
np = of_find_node_by_name(NULL, "via-pmu");
if (np)
pmu_low_i2c_add(np);
#endif /* CONFIG_ADB_PMU */
/* TODO: Add CUDA support as well */
}
int pmac_low_i2c_lock(struct device_node *np)
{
struct low_i2c_host *host = find_low_i2c_host(np);
if (!host)
return -ENODEV;
down(&host->mutex);
return 0;
}
EXPORT_SYMBOL(pmac_low_i2c_lock);
int pmac_low_i2c_unlock(struct device_node *np)
{
struct low_i2c_host *host = find_low_i2c_host(np);
if (!host)
return -ENODEV;
up(&host->mutex);
return 0;
}
EXPORT_SYMBOL(pmac_low_i2c_unlock);
int pmac_low_i2c_open(struct device_node *np, int channel)
{
struct low_i2c_host *host = find_low_i2c_host(np);
if (!host)
return -ENODEV;
if (channel >= host->num_channels)
return -EINVAL;
down(&host->mutex);
host->is_open = 1;
host->channel = channel;
return 0;
}
EXPORT_SYMBOL(pmac_low_i2c_open);
int pmac_low_i2c_close(struct device_node *np)
{
struct low_i2c_host *host = find_low_i2c_host(np);
if (!host)
return -ENODEV;
host->is_open = 0;
up(&host->mutex);
return 0;
}
EXPORT_SYMBOL(pmac_low_i2c_close);
int pmac_low_i2c_setmode(struct device_node *np, int mode)
{
struct low_i2c_host *host = find_low_i2c_host(np);
if (!host)
return -ENODEV;
WARN_ON(!host->is_open);
host->mode = mode;
return 0;
}
EXPORT_SYMBOL(pmac_low_i2c_setmode);
int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len)
{
struct low_i2c_host *host = find_low_i2c_host(np);
if (!host)
return -ENODEV;
WARN_ON(!host->is_open);
return host->func(host, addrdir, subaddr, data, len);
}
EXPORT_SYMBOL(pmac_low_i2c_xfer);

Просмотреть файл

@ -0,0 +1,584 @@
/*
* arch/ppc/platforms/pmac_nvram.c
*
* Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Todo: - add support for the OF persistent properties
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/nvram.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/bootmem.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
#define DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */
#define CORE99_SIGNATURE 0x5a
#define CORE99_ADLER_START 0x14
/* On Core99, nvram is either a sharp, a micron or an AMD flash */
#define SM_FLASH_STATUS_DONE 0x80
#define SM_FLASH_STATUS_ERR 0x38
#define SM_FLASH_CMD_ERASE_CONFIRM 0xd0
#define SM_FLASH_CMD_ERASE_SETUP 0x20
#define SM_FLASH_CMD_RESET 0xff
#define SM_FLASH_CMD_WRITE_SETUP 0x40
#define SM_FLASH_CMD_CLEAR_STATUS 0x50
#define SM_FLASH_CMD_READ_STATUS 0x70
/* CHRP NVRAM header */
struct chrp_header {
u8 signature;
u8 cksum;
u16 len;
char name[12];
u8 data[0];
};
struct core99_header {
struct chrp_header hdr;
u32 adler;
u32 generation;
u32 reserved[2];
};
/*
* Read and write the non-volatile RAM on PowerMacs and CHRP machines.
*/
static int nvram_naddrs;
static volatile unsigned char *nvram_addr;
static volatile unsigned char *nvram_data;
static int nvram_mult, is_core_99;
static int core99_bank = 0;
static int nvram_partitions[3];
static DEFINE_SPINLOCK(nv_lock);
extern int pmac_newworld;
extern int system_running;
static int (*core99_write_bank)(int bank, u8* datas);
static int (*core99_erase_bank)(int bank);
static char *nvram_image;
static unsigned char core99_nvram_read_byte(int addr)
{
if (nvram_image == NULL)
return 0xff;
return nvram_image[addr];
}
static void core99_nvram_write_byte(int addr, unsigned char val)
{
if (nvram_image == NULL)
return;
nvram_image[addr] = val;
}
static unsigned char direct_nvram_read_byte(int addr)
{
return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
}
static void direct_nvram_write_byte(int addr, unsigned char val)
{
out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
}
static unsigned char indirect_nvram_read_byte(int addr)
{
unsigned char val;
unsigned long flags;
spin_lock_irqsave(&nv_lock, flags);
out_8(nvram_addr, addr >> 5);
val = in_8(&nvram_data[(addr & 0x1f) << 4]);
spin_unlock_irqrestore(&nv_lock, flags);
return val;
}
static void indirect_nvram_write_byte(int addr, unsigned char val)
{
unsigned long flags;
spin_lock_irqsave(&nv_lock, flags);
out_8(nvram_addr, addr >> 5);
out_8(&nvram_data[(addr & 0x1f) << 4], val);
spin_unlock_irqrestore(&nv_lock, flags);
}
#ifdef CONFIG_ADB_PMU
static void pmu_nvram_complete(struct adb_request *req)
{
if (req->arg)
complete((struct completion *)req->arg);
}
static unsigned char pmu_nvram_read_byte(int addr)
{
struct adb_request req;
DECLARE_COMPLETION(req_complete);
req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
(addr >> 8) & 0xff, addr & 0xff))
return 0xff;
if (system_state == SYSTEM_RUNNING)
wait_for_completion(&req_complete);
while (!req.complete)
pmu_poll();
return req.reply[0];
}
static void pmu_nvram_write_byte(int addr, unsigned char val)
{
struct adb_request req;
DECLARE_COMPLETION(req_complete);
req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
(addr >> 8) & 0xff, addr & 0xff, val))
return;
if (system_state == SYSTEM_RUNNING)
wait_for_completion(&req_complete);
while (!req.complete)
pmu_poll();
}
#endif /* CONFIG_ADB_PMU */
static u8 chrp_checksum(struct chrp_header* hdr)
{
u8 *ptr;
u16 sum = hdr->signature;
for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
sum += *ptr;
while (sum > 0xFF)
sum = (sum & 0xFF) + (sum>>8);
return sum;
}
static u32 core99_calc_adler(u8 *buffer)
{
int cnt;
u32 low, high;
buffer += CORE99_ADLER_START;
low = 1;
high = 0;
for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
if ((cnt % 5000) == 0) {
high %= 65521UL;
high %= 65521UL;
}
low += buffer[cnt];
high += low;
}
low %= 65521UL;
high %= 65521UL;
return (high << 16) | low;
}
static u32 core99_check(u8* datas)
{
struct core99_header* hdr99 = (struct core99_header*)datas;
if (hdr99->hdr.signature != CORE99_SIGNATURE) {
DBG("Invalid signature\n");
return 0;
}
if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
DBG("Invalid checksum\n");
return 0;
}
if (hdr99->adler != core99_calc_adler(datas)) {
DBG("Invalid adler\n");
return 0;
}
return hdr99->generation;
}
static int sm_erase_bank(int bank)
{
int stat, i;
unsigned long timeout;
u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank);
out_8(base, SM_FLASH_CMD_ERASE_SETUP);
out_8(base, SM_FLASH_CMD_ERASE_CONFIRM);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n");
break;
}
out_8(base, SM_FLASH_CMD_READ_STATUS);
stat = in_8(base);
} while (!(stat & SM_FLASH_STATUS_DONE));
out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
out_8(base, SM_FLASH_CMD_RESET);
for (i=0; i<NVRAM_SIZE; i++)
if (base[i] != 0xff) {
printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
return -ENXIO;
}
return 0;
}
static int sm_write_bank(int bank, u8* datas)
{
int i, stat = 0;
unsigned long timeout;
u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: Sharp/Micron Writing bank %d...\n", bank);
for (i=0; i<NVRAM_SIZE; i++) {
out_8(base+i, SM_FLASH_CMD_WRITE_SETUP);
udelay(1);
out_8(base+i, datas[i]);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n");
break;
}
out_8(base, SM_FLASH_CMD_READ_STATUS);
stat = in_8(base);
} while (!(stat & SM_FLASH_STATUS_DONE));
if (!(stat & SM_FLASH_STATUS_DONE))
break;
}
out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
out_8(base, SM_FLASH_CMD_RESET);
for (i=0; i<NVRAM_SIZE; i++)
if (base[i] != datas[i]) {
printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
return -ENXIO;
}
return 0;
}
static int amd_erase_bank(int bank)
{
int i, stat = 0;
unsigned long timeout;
u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: AMD Erasing bank %d...\n", bank);
/* Unlock 1 */
out_8(base+0x555, 0xaa);
udelay(1);
/* Unlock 2 */
out_8(base+0x2aa, 0x55);
udelay(1);
/* Sector-Erase */
out_8(base+0x555, 0x80);
udelay(1);
out_8(base+0x555, 0xaa);
udelay(1);
out_8(base+0x2aa, 0x55);
udelay(1);
out_8(base, 0x30);
udelay(1);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: AMD flash erase timeout !\n");
break;
}
stat = in_8(base) ^ in_8(base);
} while (stat != 0);
/* Reset */
out_8(base, 0xf0);
udelay(1);
for (i=0; i<NVRAM_SIZE; i++)
if (base[i] != 0xff) {
printk(KERN_ERR "nvram: AMD flash erase failed !\n");
return -ENXIO;
}
return 0;
}
static int amd_write_bank(int bank, u8* datas)
{
int i, stat = 0;
unsigned long timeout;
u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: AMD Writing bank %d...\n", bank);
for (i=0; i<NVRAM_SIZE; i++) {
/* Unlock 1 */
out_8(base+0x555, 0xaa);
udelay(1);
/* Unlock 2 */
out_8(base+0x2aa, 0x55);
udelay(1);
/* Write single word */
out_8(base+0x555, 0xa0);
udelay(1);
out_8(base+i, datas[i]);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: AMD flash write timeout !\n");
break;
}
stat = in_8(base) ^ in_8(base);
} while (stat != 0);
if (stat != 0)
break;
}
/* Reset */
out_8(base, 0xf0);
udelay(1);
for (i=0; i<NVRAM_SIZE; i++)
if (base[i] != datas[i]) {
printk(KERN_ERR "nvram: AMD flash write failed !\n");
return -ENXIO;
}
return 0;
}
static void __init lookup_partitions(void)
{
u8 buffer[17];
int i, offset;
struct chrp_header* hdr;
if (pmac_newworld) {
nvram_partitions[pmac_nvram_OF] = -1;
nvram_partitions[pmac_nvram_XPRAM] = -1;
nvram_partitions[pmac_nvram_NR] = -1;
hdr = (struct chrp_header *)buffer;
offset = 0;
buffer[16] = 0;
do {
for (i=0;i<16;i++)
buffer[i] = nvram_read_byte(offset+i);
if (!strcmp(hdr->name, "common"))
nvram_partitions[pmac_nvram_OF] = offset + 0x10;
if (!strcmp(hdr->name, "APL,MacOS75")) {
nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
nvram_partitions[pmac_nvram_NR] = offset + 0x110;
}
offset += (hdr->len * 0x10);
} while(offset < NVRAM_SIZE);
} else {
nvram_partitions[pmac_nvram_OF] = 0x1800;
nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
nvram_partitions[pmac_nvram_NR] = 0x1400;
}
DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
}
static void core99_nvram_sync(void)
{
struct core99_header* hdr99;
unsigned long flags;
if (!is_core_99 || !nvram_data || !nvram_image)
return;
spin_lock_irqsave(&nv_lock, flags);
if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
NVRAM_SIZE))
goto bail;
DBG("Updating nvram...\n");
hdr99 = (struct core99_header*)nvram_image;
hdr99->generation++;
hdr99->hdr.signature = CORE99_SIGNATURE;
hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
hdr99->adler = core99_calc_adler(nvram_image);
core99_bank = core99_bank ? 0 : 1;
if (core99_erase_bank)
if (core99_erase_bank(core99_bank)) {
printk("nvram: Error erasing bank %d\n", core99_bank);
goto bail;
}
if (core99_write_bank)
if (core99_write_bank(core99_bank, nvram_image))
printk("nvram: Error writing bank %d\n", core99_bank);
bail:
spin_unlock_irqrestore(&nv_lock, flags);
#ifdef DEBUG
mdelay(2000);
#endif
}
void __init pmac_nvram_init(void)
{
struct device_node *dp;
nvram_naddrs = 0;
dp = find_devices("nvram");
if (dp == NULL) {
printk(KERN_ERR "Can't find NVRAM device\n");
return;
}
nvram_naddrs = dp->n_addrs;
is_core_99 = device_is_compatible(dp, "nvram,flash");
if (is_core_99) {
int i;
u32 gen_bank0, gen_bank1;
if (nvram_naddrs < 1) {
printk(KERN_ERR "nvram: no address\n");
return;
}
nvram_image = alloc_bootmem(NVRAM_SIZE);
if (nvram_image == NULL) {
printk(KERN_ERR "nvram: can't allocate ram image\n");
return;
}
nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
DBG("nvram: Checking bank 0...\n");
gen_bank0 = core99_check((u8 *)nvram_data);
gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
DBG("nvram: Active bank is: %d\n", core99_bank);
for (i=0; i<NVRAM_SIZE; i++)
nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
ppc_md.nvram_read_val = core99_nvram_read_byte;
ppc_md.nvram_write_val = core99_nvram_write_byte;
ppc_md.nvram_sync = core99_nvram_sync;
/*
* Maybe we could be smarter here though making an exclusive list
* of known flash chips is a bit nasty as older OF didn't provide us
* with a useful "compatible" entry. A solution would be to really
* identify the chip using flash id commands and base ourselves on
* a list of known chips IDs
*/
if (device_is_compatible(dp, "amd-0137")) {
core99_erase_bank = amd_erase_bank;
core99_write_bank = amd_write_bank;
} else {
core99_erase_bank = sm_erase_bank;
core99_write_bank = sm_write_bank;
}
} else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
dp->addrs[0].size);
nvram_mult = 1;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
} else if (nvram_naddrs == 1) {
nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
} else if (nvram_naddrs == 2) {
nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
ppc_md.nvram_read_val = indirect_nvram_read_byte;
ppc_md.nvram_write_val = indirect_nvram_write_byte;
} else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
#ifdef CONFIG_ADB_PMU
nvram_naddrs = -1;
ppc_md.nvram_read_val = pmu_nvram_read_byte;
ppc_md.nvram_write_val = pmu_nvram_write_byte;
#endif /* CONFIG_ADB_PMU */
} else {
printk(KERN_ERR "Don't know how to access NVRAM with %d addresses\n",
nvram_naddrs);
}
lookup_partitions();
}
int pmac_get_partition(int partition)
{
return nvram_partitions[partition];
}
u8 pmac_xpram_read(int xpaddr)
{
int offset = nvram_partitions[pmac_nvram_XPRAM];
if (offset < 0)
return 0xff;
return ppc_md.nvram_read_val(xpaddr + offset);
}
void pmac_xpram_write(int xpaddr, u8 data)
{
int offset = nvram_partitions[pmac_nvram_XPRAM];
if (offset < 0)
return;
ppc_md.nvram_write_val(xpaddr + offset, data);
}
EXPORT_SYMBOL(pmac_get_partition);
EXPORT_SYMBOL(pmac_xpram_read);
EXPORT_SYMBOL(pmac_xpram_write);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,655 @@
/*
* Support for the interrupt controllers found on Power Macintosh,
* currently Apple's "Grand Central" interrupt controller in all
* it's incarnations. OpenPIC support used on newer machines is
* in a separate file
*
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
*
* Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sysdev.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/time.h>
#include <asm/open_pic.h>
#include <asm/xmon.h>
#include <asm/pmac_feature.h>
#include <asm/mpic.h>
#include "pmac_pic.h"
/*
* XXX this should be in xmon.h, but putting it there means xmon.h
* has to include <linux/interrupt.h> (to get irqreturn_t), which
* causes all sorts of problems. -- paulus
*/
extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
struct pmac_irq_hw {
unsigned int event;
unsigned int enable;
unsigned int ack;
unsigned int level;
};
/* Default addresses */
static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
(struct pmac_irq_hw *) 0xf3000020,
(struct pmac_irq_hw *) 0xf3000010,
(struct pmac_irq_hw *) 0xf4000020,
(struct pmac_irq_hw *) 0xf4000010,
};
#define GC_LEVEL_MASK 0x3ff00000
#define OHARE_LEVEL_MASK 0x1ff00000
#define HEATHROW_LEVEL_MASK 0x1ff00000
static int max_irqs;
static int max_real_irqs;
static u32 level_mask[4];
static DEFINE_SPINLOCK(pmac_pic_lock);
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
/*
* Mark an irq as "lost". This is only used on the pmac
* since it can lose interrupts (see pmac_set_irq_mask).
* -- Cort
*/
void
__set_lost(unsigned long irq_nr, int nokick)
{
if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
atomic_inc(&ppc_n_lost_interrupts);
if (!nokick)
set_dec(1);
}
}
static void
pmac_mask_and_ack_irq(unsigned int irq_nr)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
unsigned long flags;
if ((unsigned)irq_nr >= max_irqs)
return;
clear_bit(irq_nr, ppc_cached_irq_mask);
if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
spin_lock_irqsave(&pmac_pic_lock, flags);
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
out_le32(&pmac_irq_hw[i]->ack, bit);
do {
/* make sure ack gets to controller before we enable
interrupts */
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
unsigned long flags;
if ((unsigned)irq_nr >= max_irqs)
return;
spin_lock_irqsave(&pmac_pic_lock, flags);
/* enable unmasked interrupts */
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
do {
/* make sure mask gets to controller before we
return to user */
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
/*
* Unfortunately, setting the bit in the enable register
* when the device interrupt is already on *doesn't* set
* the bit in the flag register or request another interrupt.
*/
if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
__set_lost((ulong)irq_nr, nokicklost);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
/* When an irq gets requested for the first client, if it's an
* edge interrupt, we clear any previous one on the controller
*/
static unsigned int pmac_startup_irq(unsigned int irq_nr)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
out_le32(&pmac_irq_hw[i]->ack, bit);
set_bit(irq_nr, ppc_cached_irq_mask);
pmac_set_irq_mask(irq_nr, 0);
return 0;
}
static void pmac_mask_irq(unsigned int irq_nr)
{
clear_bit(irq_nr, ppc_cached_irq_mask);
pmac_set_irq_mask(irq_nr, 0);
mb();
}
static void pmac_unmask_irq(unsigned int irq_nr)
{
set_bit(irq_nr, ppc_cached_irq_mask);
pmac_set_irq_mask(irq_nr, 0);
}
static void pmac_end_irq(unsigned int irq_nr)
{
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
&& irq_desc[irq_nr].action) {
set_bit(irq_nr, ppc_cached_irq_mask);
pmac_set_irq_mask(irq_nr, 1);
}
}
struct hw_interrupt_type pmac_pic = {
.typename = " PMAC-PIC ",
.startup = pmac_startup_irq,
.enable = pmac_unmask_irq,
.disable = pmac_mask_irq,
.ack = pmac_mask_and_ack_irq,
.end = pmac_end_irq,
};
struct hw_interrupt_type gatwick_pic = {
.typename = " GATWICK ",
.startup = pmac_startup_irq,
.enable = pmac_unmask_irq,
.disable = pmac_mask_irq,
.ack = pmac_mask_and_ack_irq,
.end = pmac_end_irq,
};
static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
{
int irq, bits;
for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
/* We must read level interrupts from the level register */
bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
irq += __ilog2(bits);
__do_IRQ(irq, regs);
return IRQ_HANDLED;
}
printk("gatwick irq not from gatwick pic\n");
return IRQ_NONE;
}
int
pmac_get_irq(struct pt_regs *regs)
{
int irq;
unsigned long bits = 0;
#ifdef CONFIG_SMP
void psurge_smp_message_recv(struct pt_regs *);
/* IPI's are a hack on the powersurge -- Cort */
if ( smp_processor_id() != 0 ) {
psurge_smp_message_recv(regs);
return -2; /* ignore, already handled */
}
#endif /* CONFIG_SMP */
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
/* We must read level interrupts from the level register */
bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
irq += __ilog2(bits);
break;
}
return irq;
}
/* This routine will fix some missing interrupt values in the device tree
* on the gatwick mac-io controller used by some PowerBooks
*/
static void __init
pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
{
struct device_node *node;
int count;
memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
node = gw->child;
count = 0;
while(node)
{
/* Fix SCC */
if (strcasecmp(node->name, "escc") == 0)
if (node->child) {
if (node->child->n_intrs < 3) {
node->child->intrs = &gatwick_int_pool[count];
count += 3;
}
node->child->n_intrs = 3;
node->child->intrs[0].line = 15+irq_base;
node->child->intrs[1].line = 4+irq_base;
node->child->intrs[2].line = 5+irq_base;
printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
node->child->intrs[0].line,
node->child->intrs[1].line,
node->child->intrs[2].line);
}
/* Fix media-bay & left SWIM */
if (strcasecmp(node->name, "media-bay") == 0) {
struct device_node* ya_node;
if (node->n_intrs == 0)
node->intrs = &gatwick_int_pool[count++];
node->n_intrs = 1;
node->intrs[0].line = 29+irq_base;
printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
node->intrs[0].line);
ya_node = node->child;
while(ya_node)
{
if (strcasecmp(ya_node->name, "floppy") == 0) {
if (ya_node->n_intrs < 2) {
ya_node->intrs = &gatwick_int_pool[count];
count += 2;
}
ya_node->n_intrs = 2;
ya_node->intrs[0].line = 19+irq_base;
ya_node->intrs[1].line = 1+irq_base;
printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
ya_node->intrs[0].line, ya_node->intrs[1].line);
}
if (strcasecmp(ya_node->name, "ata4") == 0) {
if (ya_node->n_intrs < 2) {
ya_node->intrs = &gatwick_int_pool[count];
count += 2;
}
ya_node->n_intrs = 2;
ya_node->intrs[0].line = 14+irq_base;
ya_node->intrs[1].line = 3+irq_base;
printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
ya_node->intrs[0].line, ya_node->intrs[1].line);
}
ya_node = ya_node->sibling;
}
}
node = node->sibling;
}
if (count > 10) {
printk("WARNING !! Gatwick interrupt pool overflow\n");
printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
printk(" requested = %d\n", count);
}
}
/*
* The PowerBook 3400/2400/3500 can have a combo ethernet/modem
* card which includes an ohare chip that acts as a second interrupt
* controller. If we find this second ohare, set it up and fix the
* interrupt value in the device tree for the ethernet chip.
*/
static int __init enable_second_ohare(void)
{
unsigned char bus, devfn;
unsigned short cmd;
unsigned long addr;
struct device_node *irqctrler = find_devices("pci106b,7");
struct device_node *ether;
if (irqctrler == NULL || irqctrler->n_addrs <= 0)
return -1;
addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
max_irqs = 64;
if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
if (!hose)
printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
else {
early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
cmd &= ~PCI_COMMAND_IO;
early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
}
}
/* Fix interrupt for the modem/ethernet combo controller. The number
in the device tree (27) is bogus (correct for the ethernet-only
board but not the combo ethernet/modem board).
The real interrupt is 28 on the second controller -> 28+32 = 60.
*/
ether = find_devices("pci1011,14");
if (ether && ether->n_intrs > 0) {
ether->intrs[0].line = 60;
printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
ether->intrs[0].line);
}
/* Return the interrupt number of the cascade */
return irqctrler->intrs[0].line;
}
static int pmac_u3_cascade(struct pt_regs *regs, void *data)
{
return mpic_get_one_irq((struct mpic *)data, regs);
}
#ifdef CONFIG_XMON
static struct irqaction xmon_action = {
.handler = xmon_irq,
.flags = 0,
.mask = CPU_MASK_NONE,
.name = "NMI - XMON"
};
#endif
static struct irqaction gatwick_cascade_action = {
.handler = gatwick_action,
.flags = SA_INTERRUPT,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
void __init pmac_pic_init(void)
{
int i;
struct device_node *irqctrler = NULL;
struct device_node *irqctrler2 = NULL;
struct device_node *np;
unsigned long addr;
int irq_cascade = -1;
struct mpic *mpic1, *mpic2;
/* We first try to detect Apple's new Core99 chipset, since mac-io
* is quite different on those machines and contains an IBM MPIC2.
*/
np = find_type_devices("open-pic");
while (np) {
if (np->parent && !strcmp(np->parent->name, "u3"))
irqctrler2 = np;
else
irqctrler = np;
np = np->next;
}
if (irqctrler != NULL && irqctrler->n_addrs > 0) {
unsigned char senses[128];
printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
(unsigned int)irqctrler->addrs[0].address);
prom_get_irq_senses(senses, 0, 128);
mpic1 = mpic_alloc(irqctrler->addrs[0].address,
MPIC_PRIMARY | MPIC_WANTS_RESET,
0, 0, 128, 256, senses, 128, " K2-MPIC ");
BUG_ON(mpic1 == NULL);
mpic_init(mpic1);
if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
irqctrler2->n_addrs > 0) {
printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
(u32)irqctrler2->addrs[0].address,
irqctrler2->intrs[0].line);
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
prom_get_irq_senses(senses, 128, 128 + 128);
/* We don't need to set MPIC_BROKEN_U3 here since we don't have
* hypertransport interrupts routed to it
*/
mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
0, 128, 128, 0, senses, 128, " U3-MPIC ");
BUG_ON(mpic2 == NULL);
mpic_init(mpic2);
mpic_setup_cascade(irqctrler2->intrs[0].line,
pmac_u3_cascade, mpic2);
}
}
/* Get the level/edge settings, assume if it's not
* a Grand Central nor an OHare, then it's an Heathrow
* (or Paddington).
*/
if (find_devices("gc"))
level_mask[0] = GC_LEVEL_MASK;
else if (find_devices("ohare")) {
level_mask[0] = OHARE_LEVEL_MASK;
/* We might have a second cascaded ohare */
level_mask[1] = OHARE_LEVEL_MASK;
} else {
level_mask[0] = HEATHROW_LEVEL_MASK;
level_mask[1] = 0;
/* We might have a second cascaded heathrow */
level_mask[2] = HEATHROW_LEVEL_MASK;
level_mask[3] = 0;
}
/*
* G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
* 1998 G3 Series PowerBooks have 128,
* other powermacs have 32.
* The combo ethernet/modem card for the Powerstar powerbooks
* (2400/3400/3500, ohare based) has a second ohare chip
* effectively making a total of 64.
*/
max_irqs = max_real_irqs = 32;
irqctrler = find_devices("mac-io");
if (irqctrler)
{
max_real_irqs = 64;
if (irqctrler->next)
max_irqs = 128;
else
max_irqs = 64;
}
for ( i = 0; i < max_real_irqs ; i++ )
irq_desc[i].handler = &pmac_pic;
/* get addresses of first controller */
if (irqctrler) {
if (irqctrler->n_addrs > 0) {
addr = (unsigned long)
ioremap(irqctrler->addrs[0].address, 0x40);
for (i = 0; i < 2; ++i)
pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
(addr + (2 - i) * 0x10);
}
/* get addresses of second controller */
irqctrler = irqctrler->next;
if (irqctrler && irqctrler->n_addrs > 0) {
addr = (unsigned long)
ioremap(irqctrler->addrs[0].address, 0x40);
for (i = 2; i < 4; ++i)
pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
(addr + (4 - i) * 0x10);
irq_cascade = irqctrler->intrs[0].line;
if (device_is_compatible(irqctrler, "gatwick"))
pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
}
} else {
/* older powermacs have a GC (grand central) or ohare at
f3000000, with interrupt control registers at f3000020. */
addr = (unsigned long) ioremap(0xf3000000, 0x40);
pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
}
/* PowerBooks 3400 and 3500 can have a second controller in a second
ohare chip, on the combo ethernet/modem card */
if (machine_is_compatible("AAPL,3400/2400")
|| machine_is_compatible("AAPL,3500"))
irq_cascade = enable_second_ohare();
/* disable all interrupts in all controllers */
for (i = 0; i * 32 < max_irqs; ++i)
out_le32(&pmac_irq_hw[i]->enable, 0);
/* mark level interrupts */
for (i = 0; i < max_irqs; i++)
if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
irq_desc[i].status = IRQ_LEVEL;
/* get interrupt line of secondary interrupt controller */
if (irq_cascade >= 0) {
printk(KERN_INFO "irq: secondary controller on irq %d\n",
(int)irq_cascade);
for ( i = max_real_irqs ; i < max_irqs ; i++ )
irq_desc[i].handler = &gatwick_pic;
setup_irq(irq_cascade, &gatwick_cascade_action);
}
printk("System has %d possible interrupts\n", max_irqs);
if (max_irqs != max_real_irqs)
printk(KERN_DEBUG "%d interrupts on main controller\n",
max_real_irqs);
#ifdef CONFIG_XMON
setup_irq(20, &xmon_action);
#endif /* CONFIG_XMON */
}
#ifdef CONFIG_PM
/*
* These procedures are used in implementing sleep on the powerbooks.
* sleep_save_intrs() saves the states of all interrupt enables
* and disables all interrupts except for the nominated one.
* sleep_restore_intrs() restores the states of all interrupt enables.
*/
unsigned long sleep_save_mask[2];
/* This used to be passed by the PMU driver but that link got
* broken with the new driver model. We use this tweak for now...
*/
static int pmacpic_find_viaint(void)
{
int viaint = -1;
#ifdef CONFIG_ADB_PMU
struct device_node *np;
if (pmu_get_model() != PMU_OHARE_BASED)
goto not_found;
np = of_find_node_by_name(NULL, "via-pmu");
if (np == NULL)
goto not_found;
viaint = np->intrs[0].line;
#endif /* CONFIG_ADB_PMU */
not_found:
return viaint;
}
static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
{
int viaint = pmacpic_find_viaint();
sleep_save_mask[0] = ppc_cached_irq_mask[0];
sleep_save_mask[1] = ppc_cached_irq_mask[1];
ppc_cached_irq_mask[0] = 0;
ppc_cached_irq_mask[1] = 0;
if (viaint > 0)
set_bit(viaint, ppc_cached_irq_mask);
out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
if (max_real_irqs > 32)
out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
(void)in_le32(&pmac_irq_hw[0]->event);
/* make sure mask gets to controller before we return to caller */
mb();
(void)in_le32(&pmac_irq_hw[0]->enable);
return 0;
}
static int pmacpic_resume(struct sys_device *sysdev)
{
int i;
out_le32(&pmac_irq_hw[0]->enable, 0);
if (max_real_irqs > 32)
out_le32(&pmac_irq_hw[1]->enable, 0);
mb();
for (i = 0; i < max_real_irqs; ++i)
if (test_bit(i, sleep_save_mask))
pmac_unmask_irq(i);
return 0;
}
#endif /* CONFIG_PM */
static struct sysdev_class pmacpic_sysclass = {
set_kset_name("pmac_pic"),
};
static struct sys_device device_pmacpic = {
.id = 0,
.cls = &pmacpic_sysclass,
};
static struct sysdev_driver driver_pmacpic = {
#ifdef CONFIG_PM
.suspend = &pmacpic_suspend,
.resume = &pmacpic_resume,
#endif /* CONFIG_PM */
};
static int __init init_pmacpic_sysfs(void)
{
if (max_irqs == 0)
return -ENODEV;
printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
sysdev_class_register(&pmacpic_sysclass);
sysdev_register(&device_pmacpic);
sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
return 0;
}
subsys_initcall(init_pmacpic_sysfs);

Просмотреть файл

@ -0,0 +1,11 @@
#ifndef __PPC_PLATFORMS_PMAC_PIC_H
#define __PPC_PLATFORMS_PMAC_PIC_H
#include <linux/irq.h>
extern struct hw_interrupt_type pmac_pic;
void pmac_pic_init(void);
int pmac_get_irq(struct pt_regs *regs);
#endif /* __PPC_PLATFORMS_PMAC_PIC_H */

Просмотреть файл

@ -0,0 +1,662 @@
/*
* arch/ppc/platforms/setup.c
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
*
* Derived from "arch/alpha/kernel/setup.c"
* Copyright (C) 1995 Linus Torvalds
*
* Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
/*
* bootup setup stuff..
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/initrd.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <linux/ide.h>
#include <linux/pci.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/bitops.h>
#include <linux/suspend.h>
#include <asm/reg.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/dma.h>
#include <asm/bootx.h>
#include <asm/cputable.h>
#include <asm/btext.h>
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/of_device.h>
#include <asm/mmu_context.h>
#include "pmac_pic.h"
#undef SHOW_GATWICK_IRQS
extern long pmac_time_init(void);
extern unsigned long pmac_get_rtc_time(void);
extern int pmac_set_rtc_time(unsigned long nowtime);
extern void pmac_read_rtc_time(void);
extern void pmac_calibrate_decr(void);
extern void pmac_pcibios_fixup(void);
extern void pmac_find_bridges(void);
extern unsigned long pmac_ide_get_base(int index);
extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
unsigned long data_port, unsigned long ctrl_port, int *irq);
extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
extern void pmac_pcibios_after_init(void);
extern int of_show_percpuinfo(struct seq_file *m, int i);
unsigned char drive_info;
int ppc_override_l2cr = 0;
int ppc_override_l2cr_value;
int has_l2cache = 0;
static int current_root_goodness = -1;
extern int pmac_newworld;
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
extern void zs_kgdb_hook(int tty_num);
static void ohare_init(void);
#ifdef CONFIG_BOOTX_TEXT
static void pmac_progress(char *s, unsigned short hex);
#endif
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
#ifdef CONFIG_SMP
extern struct smp_ops_t psurge_smp_ops;
extern struct smp_ops_t core99_smp_ops;
#endif /* CONFIG_SMP */
static int
pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
char *pp;
int plen;
int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
NULL, PMAC_MB_INFO_MODEL, 0);
unsigned int mbflags = (unsigned int)pmac_call_feature(PMAC_FTR_GET_MB_INFO,
NULL, PMAC_MB_INFO_FLAGS, 0);
char* mbname;
if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (int)&mbname) != 0)
mbname = "Unknown";
/* find motherboard type */
seq_printf(m, "machine\t\t: ");
np = find_devices("device-tree");
if (np != NULL) {
pp = (char *) get_property(np, "model", NULL);
if (pp != NULL)
seq_printf(m, "%s\n", pp);
else
seq_printf(m, "PowerMac\n");
pp = (char *) get_property(np, "compatible", &plen);
if (pp != NULL) {
seq_printf(m, "motherboard\t:");
while (plen > 0) {
int l = strlen(pp) + 1;
seq_printf(m, " %s", pp);
plen -= l;
pp += l;
}
seq_printf(m, "\n");
}
} else
seq_printf(m, "PowerMac\n");
/* print parsed model */
seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
seq_printf(m, "pmac flags\t: %08x\n", mbflags);
/* find l2 cache info */
np = find_devices("l2-cache");
if (np == 0)
np = find_type_devices("cache");
if (np != 0) {
unsigned int *ic = (unsigned int *)
get_property(np, "i-cache-size", NULL);
unsigned int *dc = (unsigned int *)
get_property(np, "d-cache-size", NULL);
seq_printf(m, "L2 cache\t:");
has_l2cache = 1;
if (get_property(np, "cache-unified", NULL) != 0 && dc) {
seq_printf(m, " %dK unified", *dc / 1024);
} else {
if (ic)
seq_printf(m, " %dK instruction", *ic / 1024);
if (dc)
seq_printf(m, "%s %dK data",
(ic? " +": ""), *dc / 1024);
}
pp = get_property(np, "ram-type", NULL);
if (pp)
seq_printf(m, " %s", pp);
seq_printf(m, "\n");
}
/* find ram info */
np = find_devices("memory");
if (np != 0) {
int n;
struct reg_property *reg = (struct reg_property *)
get_property(np, "reg", &n);
if (reg != 0) {
unsigned long total = 0;
for (n /= sizeof(struct reg_property); n > 0; --n)
total += (reg++)->size;
seq_printf(m, "memory\t\t: %luMB\n", total >> 20);
}
}
/* Checks "l2cr-value" property in the registry */
np = find_devices("cpus");
if (np == 0)
np = find_type_devices("cpu");
if (np != 0) {
unsigned int *l2cr = (unsigned int *)
get_property(np, "l2cr-value", NULL);
if (l2cr != 0) {
seq_printf(m, "l2cr override\t: 0x%x\n", *l2cr);
}
}
/* Indicate newworld/oldworld */
seq_printf(m, "pmac-generation\t: %s\n",
pmac_newworld ? "NewWorld" : "OldWorld");
return 0;
}
static int
pmac_show_percpuinfo(struct seq_file *m, int i)
{
#ifdef CONFIG_CPU_FREQ_PMAC
extern unsigned int pmac_get_one_cpufreq(int i);
unsigned int freq = pmac_get_one_cpufreq(i);
if (freq != 0) {
seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
return 0;
}
#endif /* CONFIG_CPU_FREQ_PMAC */
return of_show_percpuinfo(m, i);
}
static volatile u32 *sysctrl_regs;
void __init
pmac_setup_arch(void)
{
struct device_node *cpu;
int *fp;
unsigned long pvr;
pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
cpu = find_type_devices("cpu");
if (cpu != 0) {
fp = (int *) get_property(cpu, "clock-frequency", NULL);
if (fp != 0) {
if (pvr == 4 || pvr >= 8)
/* 604, G3, G4 etc. */
loops_per_jiffy = *fp / HZ;
else
/* 601, 603, etc. */
loops_per_jiffy = *fp / (2*HZ);
} else
loops_per_jiffy = 50000000 / HZ;
}
/* this area has the CPU identification register
and some registers used by smp boards */
sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
ohare_init();
/* Lookup PCI hosts */
pmac_find_bridges();
/* Checks "l2cr-value" property in the registry */
if (cpu_has_feature(CPU_FTR_L2CR)) {
struct device_node *np = find_devices("cpus");
if (np == 0)
np = find_type_devices("cpu");
if (np != 0) {
unsigned int *l2cr = (unsigned int *)
get_property(np, "l2cr-value", NULL);
if (l2cr != 0) {
ppc_override_l2cr = 1;
ppc_override_l2cr_value = *l2cr;
_set_L2CR(0);
_set_L2CR(ppc_override_l2cr_value);
}
}
}
if (ppc_override_l2cr)
printk(KERN_INFO "L2CR overriden (0x%x), backside cache is %s\n",
ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000)
? "enabled" : "disabled");
#ifdef CONFIG_KGDB
zs_kgdb_hook(0);
#endif
#ifdef CONFIG_ADB_CUDA
find_via_cuda();
#else
if (find_devices("via-cuda")) {
printk("WARNING ! Your machine is Cuda based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
}
#endif
#ifdef CONFIG_ADB_PMU
find_via_pmu();
#else
if (find_devices("via-pmu")) {
printk("WARNING ! Your machine is PMU based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
}
#endif
#ifdef CONFIG_NVRAM
pmac_nvram_init();
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
ROOT_DEV = Root_RAM0;
else
#endif
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#ifdef CONFIG_SMP
/* Check for Core99 */
if (find_devices("uni-n") || find_devices("u3"))
ppc_md.smp_ops = &core99_smp_ops;
else
ppc_md.smp_ops = &psurge_smp_ops;
#endif /* CONFIG_SMP */
pci_create_OF_bus_map();
}
static void __init ohare_init(void)
{
/*
* Turn on the L2 cache.
* We assume that we have a PSX memory controller iff
* we have an ohare I/O controller.
*/
if (find_devices("ohare") != NULL) {
if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
if (sysctrl_regs[4] & 0x10)
sysctrl_regs[4] |= 0x04000020;
else
sysctrl_regs[4] |= 0x04000000;
if(has_l2cache)
printk(KERN_INFO "Level 2 cache enabled\n");
}
}
}
extern char *bootpath;
extern char *bootdevice;
void *boot_host;
int boot_target;
int boot_part;
extern dev_t boot_dev;
#ifdef CONFIG_SCSI
void __init
note_scsi_host(struct device_node *node, void *host)
{
int l;
char *p;
l = strlen(node->full_name);
if (bootpath != NULL && bootdevice != NULL
&& strncmp(node->full_name, bootdevice, l) == 0
&& (bootdevice[l] == '/' || bootdevice[l] == 0)) {
boot_host = host;
/*
* There's a bug in OF 1.0.5. (Why am I not surprised.)
* If you pass a path like scsi/sd@1:0 to canon, it returns
* something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
* That is, the scsi target number doesn't get preserved.
* So we pick the target number out of bootpath and use that.
*/
p = strstr(bootpath, "/sd@");
if (p != NULL) {
p += 4;
boot_target = simple_strtoul(p, NULL, 10);
p = strchr(p, ':');
if (p != NULL)
boot_part = simple_strtoul(p + 1, NULL, 10);
}
}
}
#endif
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
static dev_t __init
find_ide_boot(void)
{
char *p;
int n;
dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
if (bootdevice == NULL)
return 0;
p = strrchr(bootdevice, '/');
if (p == NULL)
return 0;
n = p - bootdevice;
return pmac_find_ide_boot(bootdevice, n);
}
#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
static void __init
find_boot_device(void)
{
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
boot_dev = find_ide_boot();
#endif
}
static int initializing = 1;
/* TODO: Merge the suspend-to-ram with the common code !!!
* currently, this is a stub implementation for suspend-to-disk
* only
*/
#ifdef CONFIG_SOFTWARE_SUSPEND
static int pmac_pm_prepare(suspend_state_t state)
{
printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
return 0;
}
static int pmac_pm_enter(suspend_state_t state)
{
printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
/* Giveup the lazy FPU & vec so we don't have to back them
* up from the low level code
*/
enable_kernel_fp();
#ifdef CONFIG_ALTIVEC
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC)
enable_kernel_altivec();
#endif /* CONFIG_ALTIVEC */
return 0;
}
static int pmac_pm_finish(suspend_state_t state)
{
printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
/* Restore userland MMU context */
set_context(current->active_mm->context, current->active_mm->pgd);
return 0;
}
static struct pm_ops pmac_pm_ops = {
.pm_disk_mode = PM_DISK_SHUTDOWN,
.prepare = pmac_pm_prepare,
.enter = pmac_pm_enter,
.finish = pmac_pm_finish,
};
#endif /* CONFIG_SOFTWARE_SUSPEND */
static int pmac_late_init(void)
{
initializing = 0;
#ifdef CONFIG_SOFTWARE_SUSPEND
pm_set_ops(&pmac_pm_ops);
#endif /* CONFIG_SOFTWARE_SUSPEND */
return 0;
}
late_initcall(pmac_late_init);
/* can't be __init - can be called whenever a disk is first accessed */
void
note_bootable_part(dev_t dev, int part, int goodness)
{
static int found_boot = 0;
char *p;
if (!initializing)
return;
if ((goodness <= current_root_goodness) &&
ROOT_DEV != DEFAULT_ROOT_DEVICE)
return;
p = strstr(saved_command_line, "root=");
if (p != NULL && (p == saved_command_line || p[-1] == ' '))
return;
if (!found_boot) {
find_boot_device();
found_boot = 1;
}
if (!boot_dev || dev == boot_dev) {
ROOT_DEV = dev + part;
boot_dev = 0;
current_root_goodness = goodness;
}
}
static void
pmac_restart(char *cmd)
{
#ifdef CONFIG_ADB_CUDA
struct adb_request req;
#endif /* CONFIG_ADB_CUDA */
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
cuda_request(&req, NULL, 2, CUDA_PACKET,
CUDA_RESET_SYSTEM);
for (;;)
cuda_poll();
break;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
pmu_restart();
break;
#endif /* CONFIG_ADB_PMU */
default: ;
}
}
static void
pmac_power_off(void)
{
#ifdef CONFIG_ADB_CUDA
struct adb_request req;
#endif /* CONFIG_ADB_CUDA */
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
cuda_request(&req, NULL, 2, CUDA_PACKET,
CUDA_POWERDOWN);
for (;;)
cuda_poll();
break;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
pmu_shutdown();
break;
#endif /* CONFIG_ADB_PMU */
default: ;
}
}
static void
pmac_halt(void)
{
pmac_power_off();
}
void __init
pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
/* isa_io_base gets set in pmac_find_bridges */
isa_mem_base = PMAC_ISA_MEM_BASE;
pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
ppc_md.setup_arch = pmac_setup_arch;
ppc_md.show_cpuinfo = pmac_show_cpuinfo;
ppc_md.show_percpuinfo = pmac_show_percpuinfo;
ppc_md.irq_canonicalize = NULL;
ppc_md.init_IRQ = pmac_pic_init;
ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */
ppc_md.pcibios_fixup = pmac_pcibios_fixup;
ppc_md.pcibios_enable_device_hook = pmac_pci_enable_device_hook;
ppc_md.pcibios_after_init = pmac_pcibios_after_init;
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
ppc_md.restart = pmac_restart;
ppc_md.power_off = pmac_power_off;
ppc_md.halt = pmac_halt;
ppc_md.time_init = pmac_time_init;
ppc_md.set_rtc_time = pmac_set_rtc_time;
ppc_md.get_rtc_time = pmac_get_rtc_time;
ppc_md.calibrate_decr = pmac_calibrate_decr;
ppc_md.feature_call = pmac_do_feature_call;
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
#ifdef CONFIG_BLK_DEV_IDE_PMAC
ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports;
ppc_ide_md.default_io_base = pmac_ide_get_base;
#endif /* CONFIG_BLK_DEV_IDE_PMAC */
#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
#ifdef CONFIG_BOOTX_TEXT
ppc_md.progress = pmac_progress;
#endif /* CONFIG_BOOTX_TEXT */
if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
}
#ifdef CONFIG_BOOTX_TEXT
static void __init
pmac_progress(char *s, unsigned short hex)
{
if (boot_text_mapped) {
btext_drawstring(s);
btext_drawchar('\n');
}
}
#endif /* CONFIG_BOOTX_TEXT */
static int __init
pmac_declare_of_platform_devices(void)
{
struct device_node *np;
np = find_devices("uni-n");
if (np) {
for (np = np->child; np != NULL; np = np->sibling)
if (strncmp(np->name, "i2c", 3) == 0) {
of_platform_device_create(np, "uni-n-i2c",
NULL);
break;
}
}
np = find_devices("u3");
if (np) {
for (np = np->child; np != NULL; np = np->sibling)
if (strncmp(np->name, "i2c", 3) == 0) {
of_platform_device_create(np, "u3-i2c",
NULL);
break;
}
}
np = find_devices("valkyrie");
if (np)
of_platform_device_create(np, "valkyrie", NULL);
np = find_devices("platinum");
if (np)
of_platform_device_create(np, "platinum", NULL);
return 0;
}
device_initcall(pmac_declare_of_platform_devices);

Просмотреть файл

@ -0,0 +1,396 @@
/*
* This file contains sleep low-level functions for PowerBook G3.
* Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
* and Paul Mackerras (paulus@samba.org).
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#define MAGIC 0x4c617273 /* 'Lars' */
/*
* Structure for storing CPU registers on the stack.
*/
#define SL_SP 0
#define SL_PC 4
#define SL_MSR 8
#define SL_SDR1 0xc
#define SL_SPRG0 0x10 /* 4 sprg's */
#define SL_DBAT0 0x20
#define SL_IBAT0 0x28
#define SL_DBAT1 0x30
#define SL_IBAT1 0x38
#define SL_DBAT2 0x40
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
#define SL_TB 0x60
#define SL_R2 0x68
#define SL_CR 0x6c
#define SL_R12 0x70 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .text
.align 5
#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
/* This gets called by via-pmu.c late during the sleep process.
* The PMU was already send the sleep command and will shut us down
* soon. We need to save all that is needed and setup the wakeup
* vector that will be called by the ROM on wakeup
*/
_GLOBAL(low_sleep_handler)
#ifndef CONFIG_6xx
blr
#else
mflr r0
stw r0,4(r1)
stwu r1,-SL_SIZE(r1)
mfcr r0
stw r0,SL_CR(r1)
stw r2,SL_R2(r1)
stmw r12,SL_R12(r1)
/* Save MSR & SDR1 */
mfmsr r4
stw r4,SL_MSR(r1)
mfsdr1 r4
stw r4,SL_SDR1(r1)
/* Get a stable timebase and save it */
1: mftbu r4
stw r4,SL_TB(r1)
mftb r5
stw r5,SL_TB+4(r1)
mftbu r3
cmpw r3,r4
bne 1b
/* Save SPRGs */
mfsprg r4,0
stw r4,SL_SPRG0(r1)
mfsprg r4,1
stw r4,SL_SPRG0+4(r1)
mfsprg r4,2
stw r4,SL_SPRG0+8(r1)
mfsprg r4,3
stw r4,SL_SPRG0+12(r1)
/* Save BATs */
mfdbatu r4,0
stw r4,SL_DBAT0(r1)
mfdbatl r4,0
stw r4,SL_DBAT0+4(r1)
mfdbatu r4,1
stw r4,SL_DBAT1(r1)
mfdbatl r4,1
stw r4,SL_DBAT1+4(r1)
mfdbatu r4,2
stw r4,SL_DBAT2(r1)
mfdbatl r4,2
stw r4,SL_DBAT2+4(r1)
mfdbatu r4,3
stw r4,SL_DBAT3(r1)
mfdbatl r4,3
stw r4,SL_DBAT3+4(r1)
mfibatu r4,0
stw r4,SL_IBAT0(r1)
mfibatl r4,0
stw r4,SL_IBAT0+4(r1)
mfibatu r4,1
stw r4,SL_IBAT1(r1)
mfibatl r4,1
stw r4,SL_IBAT1+4(r1)
mfibatu r4,2
stw r4,SL_IBAT2(r1)
mfibatl r4,2
stw r4,SL_IBAT2+4(r1)
mfibatu r4,3
stw r4,SL_IBAT3(r1)
mfibatl r4,3
stw r4,SL_IBAT3+4(r1)
/* Backup various CPU config stuffs */
bl __save_cpu_setup
/* The ROM can wake us up via 2 different vectors:
* - On wallstreet & lombard, we must write a magic
* value 'Lars' at address 4 and a pointer to a
* memory location containing the PC to resume from
* at address 0.
* - On Core99, we must store the wakeup vector at
* address 0x80 and eventually it's parameters
* at address 0x84. I've have some trouble with those
* parameters however and I no longer use them.
*/
lis r5,grackle_wake_up@ha
addi r5,r5,grackle_wake_up@l
tophys(r5,r5)
stw r5,SL_PC(r1)
lis r4,KERNELBASE@h
tophys(r5,r1)
addi r5,r5,SL_PC
lis r6,MAGIC@ha
addi r6,r6,MAGIC@l
stw r5,0(r4)
stw r6,4(r4)
/* Setup stuffs at 0x80-0x84 for Core99 */
lis r3,core99_wake_up@ha
addi r3,r3,core99_wake_up@l
tophys(r3,r3)
stw r3,0x80(r4)
stw r5,0x84(r4)
/* Store a pointer to our backup storage into
* a kernel global
*/
lis r3,sleep_storage@ha
addi r3,r3,sleep_storage@l
stw r5,0(r3)
.globl low_cpu_die
low_cpu_die:
/* Flush & disable all caches */
bl flush_disable_caches
/* Turn off data relocation. */
mfmsr r3 /* Save MSR in r7 */
rlwinm r3,r3,0,28,26 /* Turn off DR bit */
sync
mtmsr r3
isync
BEGIN_FTR_SECTION
/* Flush any pending L2 data prefetches to work around HW bug */
sync
lis r3,0xfff0
lwz r0,0(r3) /* perform cache-inhibited load to ROM */
sync /* (caches are disabled at this point) */
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/*
* Set the HID0 and MSR for sleep.
*/
mfspr r2,SPRN_HID0
rlwinm r2,r2,0,10,7 /* clear doze, nap */
oris r2,r2,HID0_SLEEP@h
sync
isync
mtspr SPRN_HID0,r2
sync
/* This loop puts us back to sleep in case we have a spurrious
* wakeup so that the host bridge properly stays asleep. The
* CPU will be turned off, either after a known time (about 1
* second) on wallstreet & lombard, or as soon as the CPU enters
* SLEEP mode on core99
*/
mfmsr r2
oris r2,r2,MSR_POW@h
1: sync
mtmsr r2
isync
b 1b
/*
* Here is the resume code.
*/
/*
* Core99 machines resume here
* r4 has the physical address of SL_PC(sp) (unused)
*/
_GLOBAL(core99_wake_up)
/* Make sure HID0 no longer contains any sleep bit and that data cache
* is disabled
*/
mfspr r3,SPRN_HID0
rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
mtspr SPRN_HID0,r3
sync
isync
/* sanitize MSR */
mfmsr r3
ori r3,r3,MSR_EE|MSR_IP
xori r3,r3,MSR_EE|MSR_IP
sync
isync
mtmsr r3
sync
isync
/* Recover sleep storage */
lis r3,sleep_storage@ha
addi r3,r3,sleep_storage@l
tophys(r3,r3)
lwz r1,0(r3)
/* Pass thru to older resume code ... */
/*
* Here is the resume code for older machines.
* r1 has the physical address of SL_PC(sp).
*/
grackle_wake_up:
/* Restore the kernel's segment registers before
* we do any r1 memory access as we are not sure they
* are in a sane state above the first 256Mb region
*/
li r0,16 /* load up segment register values */
mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */
li r4,0
3: mtsrin r3,r4
addi r3,r3,0x111 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
sync
isync
subi r1,r1,SL_PC
/* Restore various CPU config stuffs */
bl __restore_cpu_setup
/* Make sure all FPRs have been initialized */
bl reloc_offset
bl __init_fpu_registers
/* Invalidate & enable L1 cache, we don't care about
* whatever the ROM may have tried to write to memory
*/
bl __inval_enable_L1
/* Restore the BATs, and SDR1. Then we can turn on the MMU. */
lwz r4,SL_SDR1(r1)
mtsdr1 r4
lwz r4,SL_SPRG0(r1)
mtsprg 0,r4
lwz r4,SL_SPRG0+4(r1)
mtsprg 1,r4
lwz r4,SL_SPRG0+8(r1)
mtsprg 2,r4
lwz r4,SL_SPRG0+12(r1)
mtsprg 3,r4
lwz r4,SL_DBAT0(r1)
mtdbatu 0,r4
lwz r4,SL_DBAT0+4(r1)
mtdbatl 0,r4
lwz r4,SL_DBAT1(r1)
mtdbatu 1,r4
lwz r4,SL_DBAT1+4(r1)
mtdbatl 1,r4
lwz r4,SL_DBAT2(r1)
mtdbatu 2,r4
lwz r4,SL_DBAT2+4(r1)
mtdbatl 2,r4
lwz r4,SL_DBAT3(r1)
mtdbatu 3,r4
lwz r4,SL_DBAT3+4(r1)
mtdbatl 3,r4
lwz r4,SL_IBAT0(r1)
mtibatu 0,r4
lwz r4,SL_IBAT0+4(r1)
mtibatl 0,r4
lwz r4,SL_IBAT1(r1)
mtibatu 1,r4
lwz r4,SL_IBAT1+4(r1)
mtibatl 1,r4
lwz r4,SL_IBAT2(r1)
mtibatu 2,r4
lwz r4,SL_IBAT2+4(r1)
mtibatl 2,r4
lwz r4,SL_IBAT3(r1)
mtibatu 3,r4
lwz r4,SL_IBAT3+4(r1)
mtibatl 3,r4
BEGIN_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
mtspr SPRN_DBAT5U,r4
mtspr SPRN_DBAT5L,r4
mtspr SPRN_DBAT6U,r4
mtspr SPRN_DBAT6L,r4
mtspr SPRN_DBAT7U,r4
mtspr SPRN_DBAT7L,r4
mtspr SPRN_IBAT4U,r4
mtspr SPRN_IBAT4L,r4
mtspr SPRN_IBAT5U,r4
mtspr SPRN_IBAT5L,r4
mtspr SPRN_IBAT6U,r4
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
1: addic. r4,r4,-0x1000
tlbie r4
blt 1b
sync
/* restore the MSR and turn on the MMU */
lwz r3,SL_MSR(r1)
bl turn_on_mmu
/* get back the stack pointer */
tovirt(r1,r1)
/* Restore TB */
li r3,0
mttbl r3
lwz r3,SL_TB(r1)
lwz r4,SL_TB+4(r1)
mttbu r3
mttbl r4
/* Restore the callee-saved registers and return */
lwz r0,SL_CR(r1)
mtcr r0
lwz r2,SL_R2(r1)
lmw r12,SL_R12(r1)
addi r1,r1,SL_SIZE
lwz r0,4(r1)
mtlr r0
blr
turn_on_mmu:
mflr r4
tovirt(r4,r4)
mtsrr0 r4
mtsrr1 r3
sync
isync
rfi
#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
.section .data
.balign L1_CACHE_LINE_SIZE
sleep_storage:
.long 0
.balign L1_CACHE_LINE_SIZE, 0
#endif /* CONFIG_6xx */
.section .text

Просмотреть файл

@ -0,0 +1,716 @@
/*
* SMP support for power macintosh.
*
* We support both the old "powersurge" SMP architecture
* and the current Core99 (G4 PowerMac) machines.
*
* Note that we don't support the very first rev. of
* Apple/DayStar 2 CPUs board, the one with the funky
* watchdog. Hopefully, none of these should be there except
* maybe internally to Apple. I should probably still add some
* code to detect this card though and disable SMP. --BenH.
*
* Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
* and Ben Herrenschmidt <benh@kernel.crashing.org>.
*
* Support for DayStar quad CPU cards
* Copyright (C) XLR8, Inc. 1994-2000
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/residual.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/open_pic.h>
#include <asm/cacheflush.h>
#include <asm/keylargo.h>
/*
* Powersurge (old powermac SMP) support.
*/
extern void __secondary_start_pmac_0(void);
/* Addresses for powersurge registers */
#define HAMMERHEAD_BASE 0xf8000000
#define HHEAD_CONFIG 0x90
#define HHEAD_SEC_INTR 0xc0
/* register for interrupting the primary processor on the powersurge */
/* N.B. this is actually the ethernet ROM! */
#define PSURGE_PRI_INTR 0xf3019000
/* register for storing the start address for the secondary processor */
/* N.B. this is the PCI config space address register for the 1st bridge */
#define PSURGE_START 0xf2800000
/* Daystar/XLR8 4-CPU card */
#define PSURGE_QUAD_REG_ADDR 0xf8800000
#define PSURGE_QUAD_IRQ_SET 0
#define PSURGE_QUAD_IRQ_CLR 1
#define PSURGE_QUAD_IRQ_PRIMARY 2
#define PSURGE_QUAD_CKSTOP_CTL 3
#define PSURGE_QUAD_PRIMARY_ARB 4
#define PSURGE_QUAD_BOARD_ID 6
#define PSURGE_QUAD_WHICH_CPU 7
#define PSURGE_QUAD_CKSTOP_RDBK 8
#define PSURGE_QUAD_RESET_CTL 11
#define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
#define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
/* virtual addresses for the above */
static volatile u8 __iomem *hhead_base;
static volatile u8 __iomem *quad_base;
static volatile u32 __iomem *psurge_pri_intr;
static volatile u8 __iomem *psurge_sec_intr;
static volatile u32 __iomem *psurge_start;
/* values for psurge_type */
#define PSURGE_NONE -1
#define PSURGE_DUAL 0
#define PSURGE_QUAD_OKEE 1
#define PSURGE_QUAD_COTTON 2
#define PSURGE_QUAD_ICEGRASS 3
/* what sort of powersurge board we have */
static int psurge_type = PSURGE_NONE;
/* L2 and L3 cache settings to pass from CPU0 to CPU1 */
volatile static long int core99_l2_cache;
volatile static long int core99_l3_cache;
/* Timebase freeze GPIO */
static unsigned int core99_tb_gpio;
/* Sync flag for HW tb sync */
static volatile int sec_tb_reset = 0;
static unsigned int pri_tb_hi, pri_tb_lo;
static unsigned int pri_tb_stamp;
static void __devinit core99_init_caches(int cpu)
{
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
if (cpu == 0) {
core99_l2_cache = _get_L2CR();
printk("CPU0: L2CR is %lx\n", core99_l2_cache);
} else {
printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
_set_L2CR(0);
_set_L2CR(core99_l2_cache);
printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
}
if (!cpu_has_feature(CPU_FTR_L3CR))
return;
if (cpu == 0){
core99_l3_cache = _get_L3CR();
printk("CPU0: L3CR is %lx\n", core99_l3_cache);
} else {
printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
_set_L3CR(0);
_set_L3CR(core99_l3_cache);
printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
}
}
/*
* Set and clear IPIs for powersurge.
*/
static inline void psurge_set_ipi(int cpu)
{
if (psurge_type == PSURGE_NONE)
return;
if (cpu == 0)
in_be32(psurge_pri_intr);
else if (psurge_type == PSURGE_DUAL)
out_8(psurge_sec_intr, 0);
else
PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
}
static inline void psurge_clr_ipi(int cpu)
{
if (cpu > 0) {
switch(psurge_type) {
case PSURGE_DUAL:
out_8(psurge_sec_intr, ~0);
case PSURGE_NONE:
break;
default:
PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
}
}
}
/*
* On powersurge (old SMP powermac architecture) we don't have
* separate IPIs for separate messages like openpic does. Instead
* we have a bitmap for each processor, where a 1 bit means that
* the corresponding message is pending for that processor.
* Ideally each cpu's entry would be in a different cache line.
* -- paulus.
*/
static unsigned long psurge_smp_message[NR_CPUS];
void psurge_smp_message_recv(struct pt_regs *regs)
{
int cpu = smp_processor_id();
int msg;
/* clear interrupt */
psurge_clr_ipi(cpu);
if (num_online_cpus() < 2)
return;
/* make sure there is a message there */
for (msg = 0; msg < 4; msg++)
if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
smp_message_recv(msg, regs);
}
irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
{
psurge_smp_message_recv(regs);
return IRQ_HANDLED;
}
static void smp_psurge_message_pass(int target, int msg, unsigned long data,
int wait)
{
int i;
if (num_online_cpus() < 2)
return;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
if (target == MSG_ALL
|| (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
|| target == i) {
set_bit(msg, &psurge_smp_message[i]);
psurge_set_ipi(i);
}
}
}
/*
* Determine a quad card presence. We read the board ID register, we
* force the data bus to change to something else, and we read it again.
* It it's stable, then the register probably exist (ugh !)
*/
static int __init psurge_quad_probe(void)
{
int type;
unsigned int i;
type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
|| type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
return PSURGE_DUAL;
/* looks OK, try a slightly more rigorous test */
/* bogus is not necessarily cacheline-aligned,
though I don't suppose that really matters. -- paulus */
for (i = 0; i < 100; i++) {
volatile u32 bogus[8];
bogus[(0+i)%8] = 0x00000000;
bogus[(1+i)%8] = 0x55555555;
bogus[(2+i)%8] = 0xFFFFFFFF;
bogus[(3+i)%8] = 0xAAAAAAAA;
bogus[(4+i)%8] = 0x33333333;
bogus[(5+i)%8] = 0xCCCCCCCC;
bogus[(6+i)%8] = 0xCCCCCCCC;
bogus[(7+i)%8] = 0x33333333;
wmb();
asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
mb();
if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
return PSURGE_DUAL;
}
return type;
}
static void __init psurge_quad_init(void)
{
int procbits;
if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
if (psurge_type == PSURGE_QUAD_ICEGRASS)
PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
else
PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
mdelay(33);
out_8(psurge_sec_intr, ~0);
PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
if (psurge_type != PSURGE_QUAD_ICEGRASS)
PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
mdelay(33);
PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
mdelay(33);
PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
mdelay(33);
}
static int __init smp_psurge_probe(void)
{
int i, ncpus;
/* We don't do SMP on the PPC601 -- paulus */
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
return 1;
/*
* The powersurge cpu board can be used in the generation
* of powermacs that have a socket for an upgradeable cpu card,
* including the 7500, 8500, 9500, 9600.
* The device tree doesn't tell you if you have 2 cpus because
* OF doesn't know anything about the 2nd processor.
* Instead we look for magic bits in magic registers,
* in the hammerhead memory controller in the case of the
* dual-cpu powersurge board. -- paulus.
*/
if (find_devices("hammerhead") == NULL)
return 1;
hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
psurge_type = psurge_quad_probe();
if (psurge_type != PSURGE_DUAL) {
psurge_quad_init();
/* All released cards using this HW design have 4 CPUs */
ncpus = 4;
} else {
iounmap(quad_base);
if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
/* not a dual-cpu card */
iounmap(hhead_base);
psurge_type = PSURGE_NONE;
return 1;
}
ncpus = 2;
}
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
/* this is not actually strictly necessary -- paulus. */
for (i = 1; i < ncpus; ++i)
smp_hw_index[i] = i;
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
return ncpus;
}
static void __init smp_psurge_kick_cpu(int nr)
{
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
unsigned long a;
/* may need to flush here if secondary bats aren't setup */
for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
asm volatile("sync");
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
out_be32(psurge_start, start);
mb();
psurge_set_ipi(nr);
udelay(10);
psurge_clr_ipi(nr);
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
}
/*
* With the dual-cpu powersurge board, the decrementers and timebases
* of both cpus are frozen after the secondary cpu is started up,
* until we give the secondary cpu another interrupt. This routine
* uses this to get the timebases synchronized.
* -- paulus.
*/
static void __init psurge_dual_sync_tb(int cpu_nr)
{
int t;
set_dec(tb_ticks_per_jiffy);
set_tb(0, 0);
last_jiffy_stamp(cpu_nr) = 0;
if (cpu_nr > 0) {
mb();
sec_tb_reset = 1;
return;
}
/* wait for the secondary to have reset its TB before proceeding */
for (t = 10000000; t > 0 && !sec_tb_reset; --t)
;
/* now interrupt the secondary, starting both TBs */
psurge_set_ipi(1);
smp_tb_synchronized = 1;
}
static struct irqaction psurge_irqaction = {
.handler = psurge_primary_intr,
.flags = SA_INTERRUPT,
.mask = CPU_MASK_NONE,
.name = "primary IPI",
};
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
if (cpu_nr == 0) {
/* If we failed to start the second CPU, we should still
* send it an IPI to start the timebase & DEC or we might
* have them stuck.
*/
if (num_online_cpus() < 2) {
if (psurge_type == PSURGE_DUAL)
psurge_set_ipi(1);
return;
}
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
if (setup_irq(30, &psurge_irqaction))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
if (psurge_type == PSURGE_DUAL)
psurge_dual_sync_tb(cpu_nr);
}
void __init smp_psurge_take_timebase(void)
{
/* Dummy implementation */
}
void __init smp_psurge_give_timebase(void)
{
/* Dummy implementation */
}
static int __init smp_core99_probe(void)
{
#ifdef CONFIG_6xx
extern int powersave_nap;
#endif
struct device_node *cpus, *firstcpu;
int i, ncpus = 0, boot_cpu = -1;
u32 *tbprop = NULL;
if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
cpus = firstcpu = find_type_devices("cpu");
while(cpus != NULL) {
u32 *regprop = (u32 *)get_property(cpus, "reg", NULL);
char *stateprop = (char *)get_property(cpus, "state", NULL);
if (regprop != NULL && stateprop != NULL &&
!strncmp(stateprop, "running", 7))
boot_cpu = *regprop;
++ncpus;
cpus = cpus->next;
}
if (boot_cpu == -1)
printk(KERN_WARNING "Couldn't detect boot CPU !\n");
if (boot_cpu != 0)
printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu);
if (machine_is_compatible("MacRISC4")) {
extern struct smp_ops_t core99_smp_ops;
core99_smp_ops.take_timebase = smp_generic_take_timebase;
core99_smp_ops.give_timebase = smp_generic_give_timebase;
} else {
if (firstcpu != NULL)
tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL);
if (tbprop)
core99_tb_gpio = *tbprop;
else
core99_tb_gpio = KL_GPIO_TB_ENABLE;
}
if (ncpus > 1) {
mpic_request_ipis();
for (i = 1; i < ncpus; ++i)
smp_hw_index[i] = i;
#ifdef CONFIG_6xx
powersave_nap = 0;
#endif
core99_init_caches(0);
}
return ncpus;
}
static void __devinit smp_core99_kick_cpu(int nr)
{
unsigned long save_vector, new_vector;
unsigned long flags;
volatile unsigned long *vector
= ((volatile unsigned long *)(KERNELBASE+0x100));
if (nr < 0 || nr > 3)
return;
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
local_irq_save(flags);
local_irq_disable();
/* Save reset vector */
save_vector = *vector;
/* Setup fake reset vector that does
* b __secondary_start_pmac_0 + nr*8 - KERNELBASE
*/
new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
*vector = 0x48000002 + new_vector - KERNELBASE;
/* flush data cache and inval instruction cache */
flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
/* Put some life in our friend */
pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
/* FIXME: We wait a bit for the CPU to take the exception, I should
* instead wait for the entry code to set something for me. Well,
* ideally, all that crap will be done in prom.c and the CPU left
* in a RAM-based wait loop like CHRP.
*/
mdelay(1);
/* Restore our exception vector */
*vector = save_vector;
flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
}
static void __devinit smp_core99_setup_cpu(int cpu_nr)
{
/* Setup L2/L3 */
if (cpu_nr != 0)
core99_init_caches(cpu_nr);
/* Setup openpic */
mpic_setup_this_cpu();
if (cpu_nr == 0) {
#ifdef CONFIG_POWER4
extern void g5_phy_disable_cpu1(void);
/* If we didn't start the second CPU, we must take
* it off the bus
*/
if (machine_is_compatible("MacRISC4") &&
num_online_cpus() < 2)
g5_phy_disable_cpu1();
#endif /* CONFIG_POWER4 */
if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
}
}
/* not __init, called in sleep/wakeup code */
void smp_core99_take_timebase(void)
{
unsigned long flags;
/* tell the primary we're here */
sec_tb_reset = 1;
mb();
/* wait for the primary to set pri_tb_hi/lo */
while (sec_tb_reset < 2)
mb();
/* set our stuff the same as the primary */
local_irq_save(flags);
set_dec(1);
set_tb(pri_tb_hi, pri_tb_lo);
last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
mb();
/* tell the primary we're done */
sec_tb_reset = 0;
mb();
local_irq_restore(flags);
}
/* not __init, called in sleep/wakeup code */
void smp_core99_give_timebase(void)
{
unsigned long flags;
unsigned int t;
/* wait for the secondary to be in take_timebase */
for (t = 100000; t > 0 && !sec_tb_reset; --t)
udelay(10);
if (!sec_tb_reset) {
printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
return;
}
/* freeze the timebase and read it */
/* disable interrupts so the timebase is disabled for the
shortest possible time */
local_irq_save(flags);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
mb();
pri_tb_hi = get_tbu();
pri_tb_lo = get_tbl();
pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
mb();
/* tell the secondary we're ready */
sec_tb_reset = 2;
mb();
/* wait for the secondary to have taken it */
for (t = 100000; t > 0 && sec_tb_reset; --t)
udelay(10);
if (sec_tb_reset)
printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
else
smp_tb_synchronized = 1;
/* Now, restart the timebase by leaving the GPIO to an open collector */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
local_irq_restore(flags);
}
void smp_core99_message_pass(int target, int msg, unsigned long data, int wait)
{
cpumask_t mask = CPU_MASK_ALL;
/* make sure we're sending something that translates to an IPI */
if (msg > 0x3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
smp_processor_id(), msg);
return;
}
switch (target) {
case MSG_ALL:
mpic_send_ipi(msg, mask);
break;
case MSG_ALL_BUT_SELF:
cpu_clear(smp_processor_id(), mask);
mpic_send_ipi(msg, mask);
break;
default:
mpic_send_ipi(msg, cpumask_of_cpu(target));
break;
}
}
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops = {
.message_pass = smp_psurge_message_pass,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
.give_timebase = smp_psurge_give_timebase,
.take_timebase = smp_psurge_take_timebase,
};
/* Core99 Macs (dual G4s) */
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_core99_message_pass,
.probe = smp_core99_probe,
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
.take_timebase = smp_core99_take_timebase,
};
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
cpu_clear(smp_processor_id(), cpu_online_map);
/* XXX reset cpu affinity here */
openpic_set_priority(0xf);
asm volatile("mtdec %0" : : "r" (0x7fffffff));
mb();
udelay(20);
asm volatile("mtdec %0" : : "r" (0x7fffffff));
return 0;
}
extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
static int cpu_dead[NR_CPUS];
void cpu_die(void)
{
local_irq_disable();
cpu_dead[smp_processor_id()] = 1;
mb();
low_cpu_die();
}
void __cpu_die(unsigned int cpu)
{
int timeout;
timeout = 1000;
while (!cpu_dead[cpu]) {
if (--timeout == 0) {
printk("CPU %u refused to die!\n", cpu);
break;
}
msleep(1);
}
cpu_callin_map[cpu] = 0;
cpu_dead[cpu] = 0;
}
#endif

Просмотреть файл

@ -0,0 +1,291 @@
/*
* Support for periodic interrupts (100 per second) and for getting
* the current time from the RTC on Power Macintoshes.
*
* We use the decrementer register for our periodic interrupts.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996 Paul Mackerras.
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/hardirq.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/nvram.h>
/* Apparently the RTC stores seconds since 1 Jan 1904 */
#define RTC_OFFSET 2082844800
/*
* Calibrate the decrementer frequency with the VIA timer 1.
*/
#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
/* VIA registers */
#define RS 0x200 /* skip between registers */
#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
#define ACR (11*RS) /* Auxiliary control register */
#define IFR (13*RS) /* Interrupt flag register */
/* Bits in ACR */
#define T1MODE 0xc0 /* Timer 1 mode */
#define T1MODE_CONT 0x40 /* continuous interrupts */
/* Bits in IFR and IER */
#define T1_INT 0x40 /* Timer 1 interrupt */
extern struct timezone sys_tz;
long __init
pmac_time_init(void)
{
#ifdef CONFIG_NVRAM
s32 delta = 0;
int dst;
delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
if (delta & 0x00800000UL)
delta |= 0xFF000000UL;
dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
dst ? "on" : "off");
return delta;
#else
return 0;
#endif
}
unsigned long
pmac_get_rtc_time(void)
{
#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
struct adb_request req;
unsigned long now;
#endif
/* Get the time from the RTC */
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
if (req.reply_len != 7)
printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
req.reply_len);
now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ (req.reply[5] << 8) + req.reply[6];
return now - RTC_OFFSET;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
while (!req.complete)
pmu_poll();
if (req.reply_len != 4)
printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
req.reply_len);
now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ (req.reply[2] << 8) + req.reply[3];
return now - RTC_OFFSET;
#endif /* CONFIG_ADB_PMU */
default: ;
}
return 0;
}
int
pmac_set_rtc_time(unsigned long nowtime)
{
#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
struct adb_request req;
#endif
nowtime += RTC_OFFSET;
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0)
return 0;
while (!req.complete)
cuda_poll();
if ((req.reply_len != 3) && (req.reply_len != 7))
printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 1;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0)
return 0;
while (!req.complete)
pmu_poll();
if (req.reply_len != 0)
printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 1;
#endif /* CONFIG_ADB_PMU */
default:
return 0;
}
}
/*
* Calibrate the decrementer register using VIA timer 1.
* This is used both on powermacs and CHRP machines.
*/
int __init
via_calibrate_decr(void)
{
struct device_node *vias;
volatile unsigned char __iomem *via;
int count = VIA_TIMER_FREQ_6 / 100;
unsigned int dstart, dend;
vias = find_devices("via-cuda");
if (vias == 0)
vias = find_devices("via-pmu");
if (vias == 0)
vias = find_devices("via");
if (vias == 0 || vias->n_addrs == 0)
return 0;
via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
/* set timer 1 for continuous interrupts */
out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
/* set the counter to a small value */
out_8(&via[T1CH], 2);
/* set the latch to `count' */
out_8(&via[T1LL], count);
out_8(&via[T1LH], count >> 8);
/* wait until it hits 0 */
while ((in_8(&via[IFR]) & T1_INT) == 0)
;
dstart = get_dec();
/* clear the interrupt & wait until it hits 0 again */
in_8(&via[T1CL]);
while ((in_8(&via[IFR]) & T1_INT) == 0)
;
dend = get_dec();
tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100));
tb_to_us = mulhwu_scale_factor(dstart - dend, 60000);
printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n",
tb_ticks_per_jiffy, dstart - dend);
iounmap(via);
return 1;
}
#ifdef CONFIG_PM
/*
* Reset the time after a sleep.
*/
static int
time_sleep_notify(struct pmu_sleep_notifier *self, int when)
{
static unsigned long time_diff;
unsigned long flags;
unsigned long seq;
switch (when) {
case PBOOK_SLEEP_NOW:
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
time_diff = xtime.tv_sec - pmac_get_rtc_time();
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
break;
case PBOOK_WAKE:
write_seqlock_irqsave(&xtime_lock, flags);
xtime.tv_sec = pmac_get_rtc_time() + time_diff;
xtime.tv_nsec = 0;
last_rtc_update = xtime.tv_sec;
write_sequnlock_irqrestore(&xtime_lock, flags);
break;
}
return PBOOK_SLEEP_OK;
}
static struct pmu_sleep_notifier time_sleep_notifier = {
time_sleep_notify, SLEEP_LEVEL_MISC,
};
#endif /* CONFIG_PM */
/*
* Query the OF and get the decr frequency.
* This was taken from the pmac time_init() when merging the prep/pmac
* time functions.
*/
void __init
pmac_calibrate_decr(void)
{
struct device_node *cpu;
unsigned int freq, *fp;
#ifdef CONFIG_PM
pmu_register_sleep_notifier(&time_sleep_notifier);
#endif /* CONFIG_PM */
/* We assume MacRISC2 machines have correct device-tree
* calibration. That's better since the VIA itself seems
* to be slightly off. --BenH
*/
if (!machine_is_compatible("MacRISC2") &&
!machine_is_compatible("MacRISC3") &&
!machine_is_compatible("MacRISC4"))
if (via_calibrate_decr())
return;
/* Special case: QuickSilver G4s seem to have a badly calibrated
* timebase-frequency in OF, VIA is much better on these. We should
* probably implement calibration based on the KL timer on these
* machines anyway... -BenH
*/
if (machine_is_compatible("PowerMac3,5"))
if (via_calibrate_decr())
return;
/*
* The cpu node should have a timebase-frequency property
* to tell us the rate at which the decrementer counts.
*/
cpu = find_type_devices("cpu");
if (cpu == 0)
panic("can't find cpu node in time_init");
fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
if (fp == 0)
panic("can't get cpu timebase frequency");
freq = *fp;
printk("time_init: decrementer frequency = %u.%.6u MHz\n",
freq/1000000, freq%1000000);
tb_ticks_per_jiffy = freq / HZ;
tb_to_us = mulhwu_scale_factor(freq, 1000000);
}

Просмотреть файл

@ -0,0 +1,22 @@
config PREP_RESIDUAL
bool "Support for PReP Residual Data"
depends on PPC_PREP
help
Some PReP systems have residual data passed to the kernel by the
firmware. This allows detection of memory size, devices present and
other useful pieces of information. Sometimes this information is
not present or incorrect, in which case it could lead to the machine
behaving incorrectly. If this happens, either disable PREP_RESIDUAL
or pass the 'noresidual' option to the kernel.
If you are running a PReP system, say Y here, otherwise say N.
config PROC_PREPRESIDUAL
bool "Support for reading of PReP Residual Data in /proc"
depends on PREP_RESIDUAL && PROC_FS
help
Enabling this option will create a /proc/residual file which allows
you to get at the residual data on PReP systems. You will need a tool
(lsresidual) to parse it. If you aren't on a PReP system, you don't
want this.

Просмотреть файл

@ -0,0 +1,47 @@
config PPC_SPLPAR
depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
default n
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
processors, that is, which share physical processors between
two or more partitions.
config HMT
bool "Hardware multithreading"
depends on SMP && PPC_PSERIES && BROKEN
help
This option enables hardware multithreading on RS64 cpus.
pSeries systems p620 and p660 have such a cpu type.
config EEH
bool "PCI Extended Error Handling (EEH)" if EMBEDDED
depends on PPC_PSERIES
default y if !EMBEDDED
config PPC_RTAS
bool
depends on PPC_PSERIES || PPC_BPA
default y
config RTAS_PROC
bool "Proc interface to RTAS"
depends on PPC_RTAS
default y
config RTAS_FLASH
tristate "Firmware flash interface"
depends on PPC64 && RTAS_PROC
config SCANLOG
tristate "Scanlog dump interface"
depends on RTAS_PROC && PPC_PSERIES
config LPARCFG
tristate "LPAR Configuration Data"
depends on PPC_PSERIES || PPC_ISERIES
help
Provide system capacity information via human readable
<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.

Просмотреть файл

@ -0,0 +1 @@
obj-$(CONFIG_MPIC) += mpic.o

904
arch/powerpc/sysdev/mpic.c Normal file
Просмотреть файл

@ -0,0 +1,904 @@
/*
* arch/powerpc/kernel/mpic.c
*
* Driver for interrupt controllers following the OpenPIC standard, the
* common implementation beeing IBM's MPIC. This driver also can deal
* with various broken implementations of this HW.
*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#undef DEBUG
#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/smp.h>
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
static struct mpic *mpics;
static struct mpic *mpic_primary;
static DEFINE_SPINLOCK(mpic_lock);
/*
* Register accessor functions
*/
static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
unsigned int reg)
{
if (be)
return in_be32(base + (reg >> 2));
else
return in_le32(base + (reg >> 2));
}
static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
unsigned int reg, u32 value)
{
if (be)
out_be32(base + (reg >> 2), value);
else
out_le32(base + (reg >> 2), value);
}
static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
{
unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
if (mpic->flags & MPIC_BROKEN_IPI)
be = !be;
return _mpic_read(be, mpic->gregs, offset);
}
static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
{
unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
}
static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
{
unsigned int cpu = 0;
if (mpic->flags & MPIC_PRIMARY)
cpu = hard_smp_processor_id();
return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
}
static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
{
unsigned int cpu = 0;
if (mpic->flags & MPIC_PRIMARY)
cpu = hard_smp_processor_id();
_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
}
static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
{
unsigned int isu = src_no >> mpic->isu_shift;
unsigned int idx = src_no & mpic->isu_mask;
return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
reg + (idx * MPIC_IRQ_STRIDE));
}
static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
unsigned int reg, u32 value)
{
unsigned int isu = src_no >> mpic->isu_shift;
unsigned int idx = src_no & mpic->isu_mask;
_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
reg + (idx * MPIC_IRQ_STRIDE), value);
}
#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
/*
* Low level utility functions
*/
/* Check if we have one of those nice broken MPICs with a flipped endian on
* reads from IPI registers
*/
static void __init mpic_test_broken_ipi(struct mpic *mpic)
{
u32 r;
mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
mpic->flags |= MPIC_BROKEN_IPI;
}
}
#ifdef CONFIG_MPIC_BROKEN_U3
/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
* to force the edge setting on the MPIC and do the ack workaround.
*/
static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no)
{
if (source_no >= 128 || !mpic->fixups)
return 0;
return mpic->fixups[source_no].base != NULL;
}
static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source_no];
u32 tmp;
spin_lock(&mpic->fixup_lock);
writeb(0x11 + 2 * fixup->irq, fixup->base);
tmp = readl(fixup->base + 2);
writel(tmp | 0x80000000ul, fixup->base + 2);
/* config writes shouldn't be posted but let's be safe ... */
(void)readl(fixup->base + 2);
spin_unlock(&mpic->fixup_lock);
}
static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase)
{
int i, irq;
u32 tmp;
printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase);
for (i=0; i < 24; i++) {
writeb(0x10 + 2*i, devbase + 0xf2);
tmp = readl(devbase + 0xf4);
if ((tmp & 0x1) || !(tmp & 0x20))
continue;
irq = (tmp >> 16) & 0xff;
mpic->fixups[irq].irq = i;
mpic->fixups[irq].base = devbase + 0xf2;
}
}
static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase)
{
int i, irq;
u32 tmp;
printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase);
for (i=0; i < 4; i++) {
writeb(0x10 + 2*i, devbase + 0xba);
tmp = readl(devbase + 0xbc);
if ((tmp & 0x1) || !(tmp & 0x20))
continue;
irq = (tmp >> 16) & 0xff;
mpic->fixups[irq].irq = i;
mpic->fixups[irq].base = devbase + 0xba;
}
}
static void __init mpic_scan_ioapics(struct mpic *mpic)
{
unsigned int devfn;
u8 __iomem *cfgspace;
printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n");
/* Allocate fixups array */
mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
BUG_ON(mpic->fixups == NULL);
memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
/* Init spinlock */
spin_lock_init(&mpic->fixup_lock);
/* Map u3 config space. We assume all IO-APICs are on the primary bus
* and slot will never be above "0xf" so we only need to map 32k
*/
cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000);
BUG_ON(cfgspace == NULL);
/* Now we scan all slots. We do a very quick scan, we read the header type,
* vendor ID and device ID only, that's plenty enough
*/
for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) {
u8 __iomem *devbase = cfgspace + (devfn << 8);
u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
u32 l = readl(devbase + PCI_VENDOR_ID);
u16 vendor_id, device_id;
int multifunc = 0;
DBG("devfn %x, l: %x\n", devfn, l);
/* If no device, skip */
if (l == 0xffffffff || l == 0x00000000 ||
l == 0x0000ffff || l == 0xffff0000)
goto next;
/* Check if it's a multifunction device (only really used
* to function 0 though
*/
multifunc = !!(hdr_type & 0x80);
vendor_id = l & 0xffff;
device_id = (l >> 16) & 0xffff;
/* If a known device, go to fixup setup code */
if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460)
mpic_amd8111_read_irq(mpic, devbase);
if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450)
mpic_amd8131_read_irq(mpic, devbase);
next:
/* next device, if function 0 */
if ((PCI_FUNC(devfn) == 0) && !multifunc)
devfn += 7;
}
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
{
struct mpic *mpic = mpics;
while(mpic) {
/* search IPIs first since they may override the main interrupts */
if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
if (is_ipi)
*is_ipi = 1;
return mpic;
}
if (irq >= mpic->irq_offset &&
irq < (mpic->irq_offset + mpic->irq_count)) {
if (is_ipi)
*is_ipi = 0;
return mpic;
}
mpic = mpic -> next;
}
return NULL;
}
/* Convert a cpu mask from logical to physical cpu numbers. */
static inline u32 mpic_physmask(u32 cpumask)
{
int i;
u32 mask = 0;
for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
return mask;
}
#ifdef CONFIG_SMP
/* Get the mpic structure from the IPI number */
static inline struct mpic * mpic_from_ipi(unsigned int ipi)
{
return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
}
#endif
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
}
/* Send an EOI */
static inline void mpic_eoi(struct mpic *mpic)
{
mpic_cpu_write(MPIC_CPU_EOI, 0);
(void)mpic_cpu_read(MPIC_CPU_WHOAMI);
}
#ifdef CONFIG_SMP
static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
struct mpic *mpic = dev_id;
smp_message_recv(irq - mpic->ipi_offset, regs);
return IRQ_HANDLED;
}
#endif /* CONFIG_SMP */
/*
* Linux descriptor level callbacks
*/
static void mpic_enable_irq(unsigned int irq)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
DBG("%s: enable_irq: %d (src %d)\n", mpic->name, irq, src);
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
printk(KERN_ERR "mpic_enable_irq timeout\n");
break;
}
} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
}
static void mpic_disable_irq(unsigned int irq)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = irq - mpic->irq_offset;
DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
printk(KERN_ERR "mpic_enable_irq timeout\n");
break;
}
} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
}
static void mpic_end_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
DBG("%s: end_irq: %d\n", mpic->name, irq);
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
*/
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic->flags & MPIC_BROKEN_U3) {
unsigned int src = irq - mpic->irq_offset;
if (mpic_is_ht_interrupt(mpic, src))
mpic_apic_end_irq(mpic, src);
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
mpic_eoi(mpic);
}
#ifdef CONFIG_SMP
static void mpic_enable_ipi(unsigned int irq)
{
struct mpic *mpic = mpic_from_ipi(irq);
unsigned int src = irq - mpic->ipi_offset;
DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
}
static void mpic_disable_ipi(unsigned int irq)
{
/* NEVER disable an IPI... that's just plain wrong! */
}
static void mpic_end_ipi(unsigned int irq)
{
struct mpic *mpic = mpic_from_ipi(irq);
/*
* IPIs are marked IRQ_PER_CPU. This has the side effect of
* preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
* applying to them. We EOI them late to avoid re-entering.
* We mark IPI's with SA_INTERRUPT as they must run with
* irqs disabled.
*/
mpic_eoi(mpic);
}
#endif /* CONFIG_SMP */
static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
{
struct mpic *mpic = mpic_from_irq(irq);
cpumask_t tmp;
cpus_and(tmp, cpumask, cpu_online_map);
mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
mpic_physmask(cpus_addr(tmp)[0]));
}
/*
* Exported functions
*/
struct mpic * __init mpic_alloc(unsigned long phys_addr,
unsigned int flags,
unsigned int isu_size,
unsigned int irq_offset,
unsigned int irq_count,
unsigned int ipi_offset,
unsigned char *senses,
unsigned int senses_count,
const char *name)
{
struct mpic *mpic;
u32 reg;
const char *vers;
int i;
mpic = alloc_bootmem(sizeof(struct mpic));
if (mpic == NULL)
return NULL;
memset(mpic, 0, sizeof(struct mpic));
mpic->name = name;
mpic->hc_irq.typename = name;
mpic->hc_irq.enable = mpic_enable_irq;
mpic->hc_irq.disable = mpic_disable_irq;
mpic->hc_irq.end = mpic_end_irq;
if (flags & MPIC_PRIMARY)
mpic->hc_irq.set_affinity = mpic_set_affinity;
#ifdef CONFIG_SMP
mpic->hc_ipi.typename = name;
mpic->hc_ipi.enable = mpic_enable_ipi;
mpic->hc_ipi.disable = mpic_disable_ipi;
mpic->hc_ipi.end = mpic_end_ipi;
#endif /* CONFIG_SMP */
mpic->flags = flags;
mpic->isu_size = isu_size;
mpic->irq_offset = irq_offset;
mpic->irq_count = irq_count;
mpic->ipi_offset = ipi_offset;
mpic->num_sources = 0; /* so far */
mpic->senses = senses;
mpic->senses_count = senses_count;
/* Map the global registers */
mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
mpic->tmregs = mpic->gregs + (MPIC_TIMER_BASE >> 2);
BUG_ON(mpic->gregs == NULL);
/* Reset */
if (flags & MPIC_WANTS_RESET) {
mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
| MPIC_GREG_GCONF_RESET);
while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
& MPIC_GREG_GCONF_RESET)
mb();
}
/* Read feature register, calculate num CPUs and, for non-ISU
* MPICs, num sources as well. On ISU MPICs, sources are counted
* as ISUs are added
*/
reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
>> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
if (isu_size == 0)
mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
>> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
/* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) {
mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
i * MPIC_CPU_STRIDE, 0x1000);
BUG_ON(mpic->cpuregs[i] == NULL);
}
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
MPIC_IRQ_STRIDE * mpic->isu_size);
BUG_ON(mpic->isus[0] == NULL);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
/* Display version */
switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
case 1:
vers = "1.0";
break;
case 2:
vers = "1.2";
break;
case 3:
vers = "1.3";
break;
default:
vers = "<unknown>";
break;
}
printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
name, vers, phys_addr, mpic->num_cpus);
printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
mpic->isu_shift, mpic->isu_mask);
mpic->next = mpics;
mpics = mpic;
if (flags & MPIC_PRIMARY)
mpic_primary = mpic;
return mpic;
}
void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
unsigned long phys_addr)
{
unsigned int isu_first = isu_num * mpic->isu_size;
BUG_ON(isu_num >= MPIC_MAX_ISU);
mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}
void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
void *data)
{
struct mpic *mpic = mpic_find(irq, NULL);
unsigned long flags;
/* Synchronization here is a bit dodgy, so don't try to replace cascade
* interrupts on the fly too often ... but normally it's set up at boot.
*/
spin_lock_irqsave(&mpic_lock, flags);
if (mpic->cascade)
mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
mpic->cascade = NULL;
wmb();
mpic->cascade_vec = irq - mpic->irq_offset;
mpic->cascade_data = data;
wmb();
mpic->cascade = handler;
mpic_enable_irq(irq);
spin_unlock_irqrestore(&mpic_lock, flags);
}
void __init mpic_init(struct mpic *mpic)
{
int i;
BUG_ON(mpic->num_sources == 0);
printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
/* Initialize timers: just disable them all */
for (i = 0; i < 4; i++) {
mpic_write(mpic->tmregs,
i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
mpic_write(mpic->tmregs,
i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
MPIC_VECPRI_MASK |
(MPIC_VEC_TIMER_0 + i));
}
/* Initialize IPIs to our reserved vectors and mark them disabled for now */
mpic_test_broken_ipi(mpic);
for (i = 0; i < 4; i++) {
mpic_ipi_write(i,
MPIC_VECPRI_MASK |
(10 << MPIC_VECPRI_PRIORITY_SHIFT) |
(MPIC_VEC_IPI_0 + i));
#ifdef CONFIG_SMP
if (!(mpic->flags & MPIC_PRIMARY))
continue;
irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
#endif /* CONFIG_SMP */
}
/* Initialize interrupt sources */
if (mpic->irq_count == 0)
mpic->irq_count = mpic->num_sources;
#ifdef CONFIG_MPIC_BROKEN_U3
/* Do the ioapic fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
mpic_scan_ioapics(mpic);
#endif /* CONFIG_MPIC_BROKEN_U3 */
for (i = 0; i < mpic->num_sources; i++) {
/* start with vector = source number, and masked */
u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
int level = 0;
/* if it's an IPI, we skip it */
if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
(mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
continue;
/* do senses munging */
if (mpic->senses && i < mpic->senses_count) {
if (mpic->senses[i] & IRQ_SENSE_LEVEL)
vecpri |= MPIC_VECPRI_SENSE_LEVEL;
if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
} else
vecpri |= MPIC_VECPRI_SENSE_LEVEL;
/* remember if it was a level interrupts */
level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
/* deal with broken U3 */
if (mpic->flags & MPIC_BROKEN_U3) {
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic_is_ht_interrupt(mpic, i)) {
vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
MPIC_VECPRI_POLARITY_MASK);
vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
}
#else
printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
#endif
}
DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
(level != 0));
/* init hw */
mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
1 << hard_smp_processor_id());
/* init linux descriptors */
if (i < mpic->irq_count) {
irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
}
}
/* Init spurrious vector */
mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
/* Disable 8259 passthrough */
mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
| MPIC_GREG_GCONF_8259_PTHROU_DIS);
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
}
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
int is_ipi;
struct mpic *mpic = mpic_find(irq, &is_ipi);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&mpic_lock, flags);
if (is_ipi) {
reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK;
mpic_ipi_write(irq - mpic->ipi_offset,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else {
reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI)
& MPIC_VECPRI_PRIORITY_MASK;
mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
}
spin_unlock_irqrestore(&mpic_lock, flags);
}
unsigned int mpic_irq_get_priority(unsigned int irq)
{
int is_ipi;
struct mpic *mpic = mpic_find(irq, &is_ipi);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&mpic_lock, flags);
if (is_ipi)
reg = mpic_ipi_read(irq - mpic->ipi_offset);
else
reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
spin_unlock_irqrestore(&mpic_lock, flags);
return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
}
void mpic_setup_this_cpu(void)
{
#ifdef CONFIG_SMP
struct mpic *mpic = mpic_primary;
unsigned long flags;
u32 msk = 1 << hard_smp_processor_id();
unsigned int i;
BUG_ON(mpic == NULL);
DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
spin_lock_irqsave(&mpic_lock, flags);
/* let the mpic know we want intrs. default affinity is 0xffffffff
* until changed via /proc. That's how it's done on x86. If we want
* it differently, then we should make sure we also change the default
* values of irq_affinity in irq.c.
*/
if (distribute_irqs) {
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
}
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
spin_unlock_irqrestore(&mpic_lock, flags);
#endif /* CONFIG_SMP */
}
int mpic_cpu_get_priority(void)
{
struct mpic *mpic = mpic_primary;
return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
}
void mpic_cpu_set_priority(int prio)
{
struct mpic *mpic = mpic_primary;
prio &= MPIC_CPU_TASKPRI_MASK;
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
}
/*
* XXX: someone who knows mpic should check this.
* do we need to eoi the ipi including for kexec cpu here (see xics comments)?
* or can we reset the mpic in the new kernel?
*/
void mpic_teardown_this_cpu(int secondary)
{
struct mpic *mpic = mpic_primary;
unsigned long flags;
u32 msk = 1 << hard_smp_processor_id();
unsigned int i;
BUG_ON(mpic == NULL);
DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
spin_lock_irqsave(&mpic_lock, flags);
/* let the mpic know we don't want intrs. */
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_IRQ_DESTINATION,
mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
spin_unlock_irqrestore(&mpic_lock, flags);
}
void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
{
struct mpic *mpic = mpic_primary;
BUG_ON(mpic == NULL);
DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
}
int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
{
u32 irq;
irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
if (mpic->cascade && irq == mpic->cascade_vec) {
DBG("%s: cascading ...\n", mpic->name);
irq = mpic->cascade(regs, mpic->cascade_data);
mpic_eoi(mpic);
return irq;
}
if (unlikely(irq == MPIC_VEC_SPURRIOUS))
return -1;
if (irq < MPIC_VEC_IPI_0)
return irq + mpic->irq_offset;
DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
}
int mpic_get_irq(struct pt_regs *regs)
{
struct mpic *mpic = mpic_primary;
BUG_ON(mpic == NULL);
return mpic_get_one_irq(mpic, regs);
}
#ifdef CONFIG_SMP
void mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
BUG_ON(mpic == NULL);
printk("requesting IPIs ... \n");
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT,
"IPI0 (call function)", mpic);
request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
"IPI1 (reschedule)", mpic);
request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
"IPI2 (unused)", mpic);
request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
"IPI3 (debugger break)", mpic);
printk("IPIs requested... \n");
}
#endif /* CONFIG_SMP */

Просмотреть файл

@ -1,6 +1,7 @@
#
# Makefile for the linux kernel.
#
ifneq ($(CONFIG_PPC_MERGE),y)
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
@ -37,3 +38,23 @@ endif
# These are here while we do the architecture merge
vecemu-y += ../../powerpc/kernel/vecemu.o
else
obj-y := entry.o irq.o idle.o time.o misc.o \
signal.o ptrace.o align.o \
syscalls.o setup.o \
cputable.o perfmon.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_POWER4) += cpu_setup_power4.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
obj-$(CONFIG_TAU) += temp.o
ifndef CONFIG_E200
obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
endif
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
endif

Просмотреть файл

@ -83,6 +83,8 @@ extern void pmac_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
extern void chrp_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
dev_t boot_dev;
#endif /* CONFIG_PPC_MULTIPLATFORM */
#ifdef CONFIG_MAGIC_SYSRQ
@ -405,11 +407,13 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
_machine = _MACH_prep;
}
#ifdef CONFIG_PPC_PREP
/* not much more to do here, if prep */
if (_machine == _MACH_prep) {
prep_init(r3, r4, r5, r6, r7);
return;
}
#endif
/* prom_init has already been called from __start */
if (boot_infos)
@ -480,12 +484,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
#endif /* CONFIG_ADB */
switch (_machine) {
#ifdef CONFIG_PPC_PMAC
case _MACH_Pmac:
pmac_init(r3, r4, r5, r6, r7);
break;
#endif
#ifdef CONFIG_PPC_CHRP
case _MACH_chrp:
chrp_init(r3, r4, r5, r6, r7);
break;
#endif
}
}

Просмотреть файл

@ -89,9 +89,6 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
#define cached_21 (((char *)(ppc_cached_irq_mask))[3])
#define cached_A1 (((char *)(ppc_cached_irq_mask))[2])
/* for the mac fs */
dev_t boot_dev;
#ifdef CONFIG_SOUND_CS4232
long ppc_cs4232_dma, ppc_cs4232_dma2;
#endif

Просмотреть файл

@ -5,6 +5,7 @@
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
ifneq ($(CONFIG_PPC_MERGE),y)
wdt-mpc8xx-$(CONFIG_8xx_WDT) += m8xx_wdt.o
obj-$(CONFIG_PPCBUG_NVRAM) += prep_nvram.o
@ -109,3 +110,16 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc52xx_setup.o mpc52xx_pic.o \
ifeq ($(CONFIG_PPC_MPC52xx),y)
obj-$(CONFIG_PCI) += mpc52xx_pci.o
endif
else
# Stuff still needed by the merged powerpc sources
obj-$(CONFIG_PPCBUG_NVRAM) += prep_nvram.o
obj-$(CONFIG_PPC_OF) += prom_init.o prom.o of_device.o
obj-$(CONFIG_PPC_PMAC) += indirect_pci.o
obj-$(CONFIG_PPC_CHRP) += indirect_pci.o i8259.o
obj-$(CONFIG_PPC_PREP) += indirect_pci.o i8259.o todc_time.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o indirect_pci.o ppc_sys.o
endif

Просмотреть файл

@ -405,7 +405,7 @@ static int __init via_pmu_start(void)
bright_req_2.complete = 1;
batt_req.complete = 1;
#ifdef CONFIG_PPC32
#if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE)
if (pmu_kind == PMU_KEYLARGO_BASED)
openpic_set_irq_priority(vias->intrs[0].line,
OPENPIC_PRIORITY_DEFAULT + 1);

Просмотреть файл

@ -629,12 +629,4 @@ void __init proc_misc_init(void)
if (entry)
entry->proc_fops = &proc_sysrq_trigger_operations;
#endif
#ifdef CONFIG_PPC32
{
extern struct file_operations ppc_htab_operations;
entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL);
if (entry)
entry->proc_fops = &ppc_htab_operations;
}
#endif
}

Просмотреть файл

@ -0,0 +1,42 @@
#ifndef _POWERPC_KDEBUG_H
#define _POWERPC_KDEBUG_H 1
/* nearly identical to x86_64/i386 code */
#include <linux/notifier.h>
struct pt_regs;
struct die_args {
struct pt_regs *regs;
const char *str;
long err;
int trapnr;
int signr;
};
/*
Note - you should never unregister because that can race with NMIs.
If you really want to do it first unregister - then synchronize_sched -
then free.
*/
int register_die_notifier(struct notifier_block *nb);
extern struct notifier_block *powerpc_die_chain;
/* Grossly misnamed. */
enum die_val {
DIE_OOPS = 1,
DIE_IABR_MATCH,
DIE_DABR_MATCH,
DIE_BPT,
DIE_SSTEP,
DIE_PAGE_FAULT,
};
static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
{
struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
return notifier_call_chain(&powerpc_die_chain, val, &args);
}
#endif

Просмотреть файл

@ -0,0 +1,67 @@
#ifndef _ASM_KPROBES_H
#define _ASM_KPROBES_H
/*
* Kernel Probes (KProbes)
* include/asm-ppc64/kprobes.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
* Probes initial implementation ( includes suggestions from
* Rusty Russell).
* 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
* <ananth@in.ibm.com>
*/
#include <linux/types.h>
#include <linux/ptrace.h>
struct pt_regs;
typedef unsigned int kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
#define MAX_INSN_SIZE 1
#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
IS_TWI(instr) || IS_TDI(instr))
#define ARCH_SUPPORTS_KRETPROBES
void kretprobe_trampoline(void);
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
kprobe_opcode_t *insn;
};
#ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
#else /* !CONFIG_KPROBES */
static inline int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return 0;
}
#endif
#endif /* _ASM_KPROBES_H */

279
include/asm-powerpc/mpic.h Normal file
Просмотреть файл

@ -0,0 +1,279 @@
#include <linux/irq.h>
/*
* Global registers
*/
#define MPIC_GREG_BASE 0x01000
#define MPIC_GREG_FEATURE_0 0x00000
#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
#define MPIC_GREG_FEATURE_1 0x00010
#define MPIC_GREG_GLOBAL_CONF_0 0x00020
#define MPIC_GREG_GCONF_RESET 0x80000000
#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
#define MPIC_GREG_GLOBAL_CONF_1 0x00030
#define MPIC_GREG_VENDOR_0 0x00040
#define MPIC_GREG_VENDOR_1 0x00050
#define MPIC_GREG_VENDOR_2 0x00060
#define MPIC_GREG_VENDOR_3 0x00070
#define MPIC_GREG_VENDOR_ID 0x00080
#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
#define MPIC_GREG_PROCESSOR_INIT 0x00090
#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
#define MPIC_GREG_SPURIOUS 0x000e0
#define MPIC_GREG_TIMER_FREQ 0x000f0
/*
*
* Timer registers
*/
#define MPIC_TIMER_BASE 0x01100
#define MPIC_TIMER_STRIDE 0x40
#define MPIC_TIMER_CURRENT_CNT 0x00000
#define MPIC_TIMER_BASE_CNT 0x00010
#define MPIC_TIMER_VECTOR_PRI 0x00020
#define MPIC_TIMER_DESTINATION 0x00030
/*
* Per-Processor registers
*/
#define MPIC_CPU_THISBASE 0x00000
#define MPIC_CPU_BASE 0x20000
#define MPIC_CPU_STRIDE 0x01000
#define MPIC_CPU_IPI_DISPATCH_0 0x00040
#define MPIC_CPU_IPI_DISPATCH_1 0x00050
#define MPIC_CPU_IPI_DISPATCH_2 0x00060
#define MPIC_CPU_IPI_DISPATCH_3 0x00070
#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
#define MPIC_CPU_TASKPRI_MASK 0x0000000f
#define MPIC_CPU_WHOAMI 0x00090
#define MPIC_CPU_WHOAMI_MASK 0x0000001f
#define MPIC_CPU_INTACK 0x000a0
#define MPIC_CPU_EOI 0x000b0
/*
* Per-source registers
*/
#define MPIC_IRQ_BASE 0x10000
#define MPIC_IRQ_STRIDE 0x00020
#define MPIC_IRQ_VECTOR_PRI 0x00000
#define MPIC_VECPRI_MASK 0x80000000
#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
#define MPIC_VECPRI_PRIORITY_SHIFT 16
#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
#define MPIC_VECPRI_POLARITY_MASK 0x00800000
#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
#define MPIC_VECPRI_SENSE_EDGE 0x00000000
#define MPIC_VECPRI_SENSE_MASK 0x00400000
#define MPIC_IRQ_DESTINATION 0x00010
#define MPIC_MAX_IRQ_SOURCES 2048
#define MPIC_MAX_CPUS 32
#define MPIC_MAX_ISU 32
/*
* Special vector numbers (internal use only)
*/
#define MPIC_VEC_SPURRIOUS 255
#define MPIC_VEC_IPI_3 254
#define MPIC_VEC_IPI_2 253
#define MPIC_VEC_IPI_1 252
#define MPIC_VEC_IPI_0 251
/* unused */
#define MPIC_VEC_TIMER_3 250
#define MPIC_VEC_TIMER_2 249
#define MPIC_VEC_TIMER_1 248
#define MPIC_VEC_TIMER_0 247
/* Type definition of the cascade handler */
typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
#ifdef CONFIG_MPIC_BROKEN_U3
/* Fixup table entry */
struct mpic_irq_fixup
{
u8 __iomem *base;
unsigned int irq;
};
#endif /* CONFIG_MPIC_BROKEN_U3 */
/* The instance data of a given MPIC */
struct mpic
{
/* The "linux" controller struct */
hw_irq_controller hc_irq;
#ifdef CONFIG_SMP
hw_irq_controller hc_ipi;
#endif
const char *name;
/* Flags */
unsigned int flags;
/* How many irq sources in a given ISU */
unsigned int isu_size;
unsigned int isu_shift;
unsigned int isu_mask;
/* Offset of irq vector numbers */
unsigned int irq_offset;
unsigned int irq_count;
/* Offset of ipi vector numbers */
unsigned int ipi_offset;
/* Number of sources */
unsigned int num_sources;
/* Number of CPUs */
unsigned int num_cpus;
/* cascade handler */
mpic_cascade_t cascade;
void *cascade_data;
unsigned int cascade_vec;
/* senses array */
unsigned char *senses;
unsigned int senses_count;
#ifdef CONFIG_MPIC_BROKEN_U3
/* The fixup table */
struct mpic_irq_fixup *fixups;
spinlock_t fixup_lock;
#endif
/* The various ioremap'ed bases */
volatile u32 __iomem *gregs;
volatile u32 __iomem *tmregs;
volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS];
volatile u32 __iomem *isus[MPIC_MAX_ISU];
/* link */
struct mpic *next;
};
/* This is the primary controller, only that one has IPIs and
* has afinity control. A non-primary MPIC always uses CPU0
* registers only
*/
#define MPIC_PRIMARY 0x00000001
/* Set this for a big-endian MPIC */
#define MPIC_BIG_ENDIAN 0x00000002
/* Broken U3 MPIC */
#define MPIC_BROKEN_U3 0x00000004
/* Broken IPI registers (autodetected) */
#define MPIC_BROKEN_IPI 0x00000008
/* MPIC wants a reset */
#define MPIC_WANTS_RESET 0x00000010
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
* actually performed.
*
* @phys_addr: physial base address of the MPIC
* @flags: flags, see constants above
* @isu_size: number of interrupts in an ISU. Use 0 to use a
* standard ISU-less setup (aka powermac)
* @irq_offset: first irq number to assign to this mpic
* @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
* to match the number of sources
* @ipi_offset: first irq number to assign to this mpic IPI sources,
* used only on primary mpic
* @senses: array of sense values
* @senses_num: number of entries in the array
*
* Note about the sense array. If none is passed, all interrupts are
* setup to be level negative unless MPIC_BROKEN_U3 is set in which
* case they are edge positive (and the array is ignored anyway).
* The values in the array start at the first source of the MPIC,
* that is senses[0] correspond to linux irq "irq_offset".
*/
extern struct mpic *mpic_alloc(unsigned long phys_addr,
unsigned int flags,
unsigned int isu_size,
unsigned int irq_offset,
unsigned int irq_count,
unsigned int ipi_offset,
unsigned char *senses,
unsigned int senses_num,
const char *name);
/* Assign ISUs, to call before mpic_init()
*
* @mpic: controller structure as returned by mpic_alloc()
* @isu_num: ISU number
* @phys_addr: physical address of the ISU
*/
extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
unsigned long phys_addr);
/* Initialize the controller. After this has been called, none of the above
* should be called again for this mpic
*/
extern void mpic_init(struct mpic *mpic);
/* Setup a cascade. Currently, only one cascade is supported this
* way, though you can always do a normal request_irq() and add
* other cascades this way. You should call this _after_ having
* added all the ISUs
*
* @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
* @handler: cascade handler function
*/
extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
void *data);
/*
* All of the following functions must only be used after the
* ISUs have been assigned and the controller fully initialized
* with mpic_init()
*/
/* Change/Read the priority of an interrupt. Default is 8 for irqs and
* 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
* IPI number is then the offset'ed (linux irq number mapped to the IPI)
*/
extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
extern unsigned int mpic_irq_get_priority(unsigned int irq);
/* Setup a non-boot CPU */
extern void mpic_setup_this_cpu(void);
/* Clean up for kexec (or cpu offline or ...) */
extern void mpic_teardown_this_cpu(int secondary);
/* Get the current cpu priority for this cpu (0..15) */
extern int mpic_cpu_get_priority(void);
/* Set the current cpu priority for this cpu */
extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
/* Send an IPI (non offseted number 0..3) */
extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
/* Fetch interrupt from a given mpic */
extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
/* This one gets to the primary mpic */
extern int mpic_get_irq(struct pt_regs *regs);
/* global mpic for pSeries */
extern struct mpic *pSeries_mpic;

446
include/asm-powerpc/reg.h Normal file
Просмотреть файл

@ -0,0 +1,446 @@
/*
* Contains the definition of registers common to all PowerPC variants.
* If a register definition has been changed in a different PowerPC
* variant, we will case it in #ifndef XXX ... #endif, and have the
* number used in the Programming Environments Manual For 32-Bit
* Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
*/
#ifdef __KERNEL__
#ifndef __ASM_PPC_REGS_H__
#define __ASM_PPC_REGS_H__
#include <linux/stringify.h>
/* Pickup Book E specific registers. */
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include <asm/reg_booke.h>
#endif
/* Machine State Register (MSR) Fields */
#define MSR_SF (1<<63)
#define MSR_ISF (1<<61)
#define MSR_VEC (1<<25) /* Enable AltiVec */
#define MSR_POW (1<<18) /* Enable Power Management */
#define MSR_WE (1<<18) /* Wait State Enable */
#define MSR_TGPR (1<<17) /* TLB Update registers in use */
#define MSR_CE (1<<17) /* Critical Interrupt Enable */
#define MSR_ILE (1<<16) /* Interrupt Little Endian */
#define MSR_EE (1<<15) /* External Interrupt Enable */
#define MSR_PR (1<<14) /* Problem State / Privilege Level */
#define MSR_FP (1<<13) /* Floating Point enable */
#define MSR_ME (1<<12) /* Machine Check Enable */
#define MSR_FE0 (1<<11) /* Floating Exception mode 0 */
#define MSR_SE (1<<10) /* Single Step */
#define MSR_BE (1<<9) /* Branch Trace */
#define MSR_DE (1<<9) /* Debug Exception Enable */
#define MSR_FE1 (1<<8) /* Floating Exception mode 1 */
#define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */
#define MSR_IR (1<<5) /* Instruction Relocate */
#define MSR_DR (1<<4) /* Data Relocate */
#define MSR_PE (1<<3) /* Protection Enable */
#define MSR_PX (1<<2) /* Protection Exclusive Mode */
#define MSR_RI (1<<1) /* Recoverable Exception */
#define MSR_LE (1<<0) /* Little Endian */
/* Default MSR for kernel mode. */
#ifdef CONFIG_APUS_FAST_EXCEPT
#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR)
#endif
#ifndef MSR_KERNEL
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
#endif
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
#define FPSCR_VX 0x20000000 /* Invalid operation summary */
#define FPSCR_OX 0x10000000 /* Overflow exception summary */
#define FPSCR_UX 0x08000000 /* Underflow exception summary */
#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
#define FPSCR_XX 0x02000000 /* Inexact exception summary */
#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
#define FPSCR_FR 0x00040000 /* Fraction rounded */
#define FPSCR_FI 0x00020000 /* Fraction inexact */
#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
#define FPSCR_RN 0x00000003 /* FPU rounding control */
/* Special Purpose Registers (SPRNs)*/
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define DABR_TRANSLATION (1UL << 2)
#define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
#define DSISR_NOHPTE 0x40000000 /* no translation found */
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
#define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */
#define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */
#define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */
#define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */
#define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */
#define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */
#define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */
#define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */
#define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */
#define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */
#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
#define SPRN_DEC 0x016 /* Decrement Register */
#define SPRN_DER 0x095 /* Debug Enable Regsiter */
#define DER_RSTE 0x40000000 /* Reset Interrupt */
#define DER_CHSTPE 0x20000000 /* Check Stop */
#define DER_MCIE 0x10000000 /* Machine Check Interrupt */
#define DER_EXTIE 0x02000000 /* External Interrupt */
#define DER_ALIE 0x01000000 /* Alignment Interrupt */
#define DER_PRIE 0x00800000 /* Program Interrupt */
#define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */
#define DER_DECIE 0x00200000 /* Decrementer Interrupt */
#define DER_SYSIE 0x00040000 /* System Call Interrupt */
#define DER_TRE 0x00020000 /* Trace Interrupt */
#define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */
#define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */
#define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */
#define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */
#define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */
#define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */
#define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */
#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
#define SPRN_EAR 0x11A /* External Address Register */
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
#define HID0_SBCLK (1<<27)
#define HID0_EICE (1<<26)
#define HID0_TBEN (1<<26) /* Timebase enable - 745x */
#define HID0_ECLK (1<<25)
#define HID0_PAR (1<<24)
#define HID0_STEN (1<<24) /* Software table search enable - 745x */
#define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */
#define HID0_DOZE (1<<23)
#define HID0_NAP (1<<22)
#define HID0_SLEEP (1<<21)
#define HID0_DPM (1<<20)
#define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */
#define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */
#define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/
#define HID0_ICE (1<<15) /* Instruction Cache Enable */
#define HID0_DCE (1<<14) /* Data Cache Enable */
#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
#define HID0_DLOCK (1<<12) /* Data Cache Lock */
#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
#define HID0_DCI (1<<10) /* Data Cache Invalidate */
#define HID0_SPD (1<<9) /* Speculative disable */
#define HID0_DAPUEN (1<<8) /* Debug APU enable */
#define HID0_SGE (1<<7) /* Store Gathering Enable */
#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
#define HID0_DFCA (1<<6) /* Data Cache Flush Assist */
#define HID0_LRSTK (1<<4) /* Link register stack - 745x */
#define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */
#define HID0_ABE (1<<3) /* Address Broadcast Enable */
#define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */
#define HID0_BHTE (1<<2) /* Branch History Table Enable */
#define HID0_BTCD (1<<1) /* Branch target cache disable */
#define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */
#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
#define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */
#define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */
#define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */
#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
#endif
#define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */
#define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */
#define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */
#define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */
#define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */
#define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */
#define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */
#define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */
#define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */
#define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */
#define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */
#define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */
#define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */
#define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */
#define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */
#define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */
#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */
#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */
#define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */
#define ICTRL_EICE 0x08000000 /* enable icache parity errs */
#define ICTRL_EDC 0x04000000 /* enable dcache parity errs */
#define ICTRL_EICP 0x00000100 /* enable icache par. check */
#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
#define SPRN_L2CR2 0x3f8
#define L2CR_L2E 0x80000000 /* L2 enable */
#define L2CR_L2PE 0x40000000 /* L2 parity enable */
#define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */
#define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */
#define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */
#define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */
#define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */
#define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */
#define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */
#define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */
#define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */
#define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */
#define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */
#define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */
#define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */
#define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */
#define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */
#define L2CR_L2DO 0x00400000 /* L2 data only */
#define L2CR_L2I 0x00200000 /* L2 global invalidate */
#define L2CR_L2CTL 0x00100000 /* L2 RAM control */
#define L2CR_L2WT 0x00080000 /* L2 write-through */
#define L2CR_L2TS 0x00040000 /* L2 test support */
#define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */
#define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */
#define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */
#define L2CR_L2SL 0x00008000 /* L2 DLL slow */
#define L2CR_L2DF 0x00004000 /* L2 differential clock */
#define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */
#define L2CR_L2IP 0x00000001 /* L2 GI in progress */
#define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */
#define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */
#define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */
#define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */
#define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */
#define L3CR_L3E 0x80000000 /* L3 enable */
#define L3CR_L3PE 0x40000000 /* L3 data parity enable */
#define L3CR_L3APE 0x20000000 /* L3 addr parity enable */
#define L3CR_L3SIZ 0x10000000 /* L3 size */
#define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */
#define L3CR_L3RES 0x04000000 /* L3 special reserved bit */
#define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */
#define L3CR_L3IO 0x00400000 /* L3 instruction only */
#define L3CR_L3SPO 0x00040000 /* L3 sample point override */
#define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */
#define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */
#define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */
#define L3CR_L3HWF 0x00000800 /* L3 hardware flush */
#define L3CR_L3I 0x00000400 /* L3 global invalidate */
#define L3CR_L3RT 0x00000300 /* L3 SRAM type */
#define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */
#define L3CR_L3DO 0x00000040 /* L3 data only mode */
#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
#define SPRN_LDSTDB 0x3f4 /* */
#define SPRN_LR 0x008 /* Link Register */
#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
#ifndef SPRN_PIR
#define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif
#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
#define SPRN_PVR 0x11F /* Processor Version Register */
#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
#ifndef SPRN_SVR
#define SPRN_SVR 0x11E /* System Version Register */
#endif
#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
/* these bits were defined in inverted endian sense originally, ugh, confusing */
#define THRM1_TIN (1 << 31)
#define THRM1_TIV (1 << 30)
#define THRM1_THRES(x) ((x&0x7f)<<23)
#define THRM3_SITV(x) ((x&0x3fff)<<1)
#define THRM1_TID (1<<2)
#define THRM1_TIE (1<<1)
#define THRM1_V (1<<0)
#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */
#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */
#define THRM3_E (1<<0)
#define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */
#define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */
#define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */
#define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */
#define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */
#define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */
#define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */
#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
#define SPRN_XER 0x001 /* Fixed Point Exception Register */
/* Bit definitions for MMCR0 and PMC1 / PMC2. */
#define MMCR0_PMC1_CYCLES (1 << 7)
#define MMCR0_PMC1_ICACHEMISS (5 << 7)
#define MMCR0_PMC1_DTLB (6 << 7)
#define MMCR0_PMC2_DCACHEMISS 0x6
#define MMCR0_PMC2_CYCLES 0x1
#define MMCR0_PMC2_ITLB 0x7
#define MMCR0_PMC2_LOADMISSTIME 0x5
#define MMCR0_PMXE (1 << 26)
/* Processor Version Register */
/* Processor Version Register (PVR) field extraction */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
/*
* IBM has further subdivided the standard PowerPC 16-bit version and
* revision subfields of the PVR for the PowerPC 403s into the following:
*/
#define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */
#define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */
#define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */
#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */
#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */
#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */
/* Processor Version Numbers */
#define PVR_403GA 0x00200000
#define PVR_403GB 0x00200100
#define PVR_403GC 0x00200200
#define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000
#define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000
#define PVR_601 0x00010000
#define PVR_602 0x00050000
#define PVR_603 0x00030000
#define PVR_603e 0x00060000
#define PVR_603ev 0x00070000
#define PVR_603r 0x00071000
#define PVR_604 0x00040000
#define PVR_604e 0x00090000
#define PVR_604r 0x000A0000
#define PVR_620 0x00140000
#define PVR_740 0x00080000
#define PVR_750 PVR_740
#define PVR_740P 0x10080000
#define PVR_750P PVR_740P
#define PVR_7400 0x000C0000
#define PVR_7410 0x800C0000
#define PVR_7450 0x80000000
#define PVR_8540 0x80200000
#define PVR_8560 0x80200000
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
* differentiated by the version number in the Communication Processor
* Module (CPM).
*/
#define PVR_821 0x00500000
#define PVR_823 PVR_821
#define PVR_850 PVR_821
#define PVR_860 PVR_821
#define PVR_8240 0x00810100
#define PVR_8245 0x80811014
#define PVR_8260 PVR_8240
#if 0
/* Segment Registers */
#define SR0 0
#define SR1 1
#define SR2 2
#define SR3 3
#define SR4 4
#define SR5 5
#define SR6 6
#define SR7 7
#define SR8 8
#define SR9 9
#define SR10 10
#define SR11 11
#define SR12 12
#define SR13 13
#define SR14 14
#define SR15 15
#endif
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned int rval; \
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
#define mfspr(rn) ({unsigned int rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
#define mfsrin(v) ({unsigned int rval; \
asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
rval;})
#define proc_trap() asm volatile("trap")
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PPC_REGS_H__ */
#endif /* __KERNEL__ */

Просмотреть файл

@ -0,0 +1,350 @@
/*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*/
#ifndef __PPC_SYSTEM_H
#define __PPC_SYSTEM_H
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/hw_irq.h>
#include <asm/ppc_asm.h>
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
* read_barrier_depends() prevents data-dependent loads being reordered
* across this point (nop on PPC).
*
* We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for
* rmb(), though. Note that lwsync is interpreted as sync by
* 32-bit and older 64-bit CPUs.
*
* For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device.
* However, smp_wmb() can be a lighter-weight eieio barrier on
* SMP since it is only used to order updates to system memory.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
#ifdef __KERNEL__
struct task_struct;
struct pt_regs;
#ifdef CONFIG_DEBUGGER
extern int (*__debugger)(struct pt_regs *regs);
extern int (*__debugger_ipi)(struct pt_regs *regs);
extern int (*__debugger_bpt)(struct pt_regs *regs);
extern int (*__debugger_sstep)(struct pt_regs *regs);
extern int (*__debugger_iabr_match)(struct pt_regs *regs);
extern int (*__debugger_dabr_match)(struct pt_regs *regs);
extern int (*__debugger_fault_handler)(struct pt_regs *regs);
#define DEBUGGER_BOILERPLATE(__NAME) \
static inline int __NAME(struct pt_regs *regs) \
{ \
if (unlikely(__ ## __NAME)) \
return __ ## __NAME(regs); \
return 0; \
}
DEBUGGER_BOILERPLATE(debugger)
DEBUGGER_BOILERPLATE(debugger_ipi)
DEBUGGER_BOILERPLATE(debugger_bpt)
DEBUGGER_BOILERPLATE(debugger_sstep)
DEBUGGER_BOILERPLATE(debugger_iabr_match)
DEBUGGER_BOILERPLATE(debugger_dabr_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
#ifdef CONFIG_XMON
extern void xmon_init(int enable);
#endif
#else
static inline int debugger(struct pt_regs *regs) { return 0; }
static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
extern int set_dabr(unsigned long dabr);
extern void print_backtrace(unsigned long *);
extern void show_regs(struct pt_regs * regs);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
#ifdef CONFIG_6xx
extern long _get_L2CR(void);
extern long _get_L3CR(void);
extern void _set_L2CR(unsigned long);
extern void _set_L3CR(unsigned long);
#else
#define _get_L2CR() 0L
#define _get_L3CR() 0L
#define _set_L2CR(val) do { } while(0)
#define _set_L3CR(val) do { } while(0)
#endif
extern void via_cuda_init(void);
extern void pmac_nvram_init(void);
extern void read_rtc_time(void);
extern void pmac_find_display(void);
extern void giveup_fpu(struct task_struct *);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void);
extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
extern int fix_alignment(struct pt_regs *);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
#else
static inline void flush_altivec_to_thread(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_SPE
extern void flush_spe_to_thread(struct task_struct *);
#else
static inline void flush_spe_to_thread(struct task_struct *t)
{
}
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...);
extern void cacheable_memzero(void *p, unsigned int nb);
extern void *cacheable_memcpy(void *, const void *, unsigned int);
extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern int die(const char *, struct pt_regs *, long);
extern void _exception(int, struct pt_regs *, int, unsigned long);
#ifdef CONFIG_BOOKE_WDT
extern u32 booke_wdt_enabled;
extern u32 booke_wdt_period;
#endif /* CONFIG_BOOKE_WDT */
/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
extern unsigned char e2a(unsigned char);
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
extern unsigned int rtas_data;
/*
* Atomic exchange
*
* Changes the memory location '*ptr' to be val and returns
* the previous value stored there.
*/
static __inline__ unsigned long
__xchg_u32(volatile void *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
: "=&r" (prev), "=m" (*(volatile unsigned int *)p)
: "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
: "cc", "memory");
return prev;
}
#ifdef CONFIG_PPC64
static __inline__ unsigned long
__xchg_u64(volatile void *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
: "=&r" (prev), "=m" (*(volatile unsigned long *)p)
: "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
: "cc", "memory");
return prev;
}
#endif
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid xchg().
*/
extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long
__xchg(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
case 4:
return __xchg_u32(ptr, x);
#ifdef CONFIG_PPC64
case 8:
return __xchg_u64(ptr, x);
#endif
}
__xchg_called_with_bad_pointer();
return x;
}
#define xchg(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
})
#define tas(ptr) (xchg((ptr),1))
/*
* Compare and exchange - if *p == old, set it to new,
* and return the old value of *p.
*/
#define __HAVE_ARCH_CMPXCHG 1
static __inline__ unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
{
unsigned int prev;
__asm__ __volatile__ (
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
ISYNC_ON_SMP
"\n\
2:"
: "=&r" (prev), "=m" (*p)
: "r" (p), "r" (old), "r" (new), "m" (*p)
: "cc", "memory");
return prev;
}
#ifdef CONFIG_PPC64
static __inline__ unsigned long
__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
{
unsigned long prev;
__asm__ __volatile__ (
EIEIO_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b"
ISYNC_ON_SMP
"\n\
2:"
: "=&r" (prev), "=m" (*p)
: "r" (p), "r" (old), "r" (new), "m" (*p)
: "cc", "memory");
return prev;
}
#endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
#ifdef CONFIG_PPC64
case 8:
return __cmpxchg_u64(ptr, old, new);
#endif
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#ifdef CONFIG_PPC64
/*
* We handle most unaligned accesses in hardware. On the other hand
* unaligned DMA can be very expensive on some ppc64 IO chips (it does
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
*/
#define NET_IP_ALIGN 0
#endif
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */
#endif /* __PPC_SYSTEM_H */

Просмотреть файл

@ -45,30 +45,21 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void) __attribute__((noreturn));
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define PROC_CHANGE_PENALTY 20
#define raw_smp_processor_id() (current_thread_info()->cpu)
extern int __cpu_up(unsigned int cpu);
extern int smp_hw_index[];
#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
struct klock_info_struct {
unsigned long kernel_flag;
unsigned char akp;
};
extern struct klock_info_struct klock_info;
#define KLOCK_HELD 0xffffffff
#define KLOCK_CLEAR 0x0
#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
#endif /* __ASSEMBLY__ */
#else /* !(CONFIG_SMP) */
static inline void cpu_die(void) { }
#define get_hard_smp_processor_id(cpu) 0
#define hard_smp_processor_id() 0
#endif /* !(CONFIG_SMP) */