Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: arch/tile: check kmalloc() result arch/tile: catch up on various minor cleanups. arch/tile: avoid erroneous error return for PTRACE_POKEUSR. tile: set ARCH_KMALLOC_MINALIGN tile: remove homegrown L1_CACHE_ALIGN macro arch/tile: Miscellaneous cleanup changes. arch/tile: Split the icache flush code off to a generic <arch> header. arch/tile: Fix bug in support for atomic64_xx() ops. arch/tile: Shrink the tile-opcode files considerably. arch/tile: Add driver to enable access to the user dynamic network. arch/tile: Enable more sophisticated IRQ model for 32-bit chips. Move list types from <linux/list.h> to <linux/types.h>. Add wait4() back to the set of <asm-generic/unistd.h> syscalls. Revert adding some arch-specific signal syscalls to <linux/syscalls.h>. arch/tile: Do not use GFP_KERNEL for dma_alloc_coherent(). Feedback from fujita.tomonori@lab.ntt.co.jp. arch/tile: core support for Tilera 32-bit chips. Fix up the "generic" unistd.h ABI to be more useful.
This commit is contained in:
Коммит
45d7f32c7a
|
@ -5626,6 +5626,12 @@ F: include/linux/tipc*.h
|
|||
F: include/net/tipc/
|
||||
F: net/tipc/
|
||||
|
||||
TILE ARCHITECTURE
|
||||
M: Chris Metcalf <cmetcalf@tilera.com>
|
||||
W: http://www.tilera.com/scm/
|
||||
S: Supported
|
||||
F: arch/tile/
|
||||
|
||||
TLAN NETWORK DRIVER
|
||||
M: Samuel Chessman <chessman@tux.org>
|
||||
L: tlan-devel@lists.sourceforge.net (subscribers-only)
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
obj-y += kernel/
|
||||
obj-y += mm/
|
|
@ -0,0 +1,356 @@
|
|||
# For a description of the syntax of this configuration file,
|
||||
# see Documentation/kbuild/config-language.txt.
|
||||
|
||||
config MMU
|
||||
def_bool y
|
||||
|
||||
config GENERIC_CSUM
|
||||
def_bool y
|
||||
|
||||
config GENERIC_HARDIRQS
|
||||
def_bool y
|
||||
|
||||
config GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
def_bool y
|
||||
|
||||
config GENERIC_IRQ_PROBE
|
||||
def_bool y
|
||||
|
||||
config GENERIC_PENDING_IRQ
|
||||
def_bool y
|
||||
depends on GENERIC_HARDIRQS && SMP
|
||||
|
||||
config SEMAPHORE_SLEEPERS
|
||||
def_bool y
|
||||
|
||||
config HAVE_ARCH_ALLOC_REMAP
|
||||
def_bool y
|
||||
|
||||
config HAVE_SETUP_PER_CPU_AREA
|
||||
def_bool y
|
||||
|
||||
config NEED_PER_CPU_PAGE_FIRST_CHUNK
|
||||
def_bool y
|
||||
|
||||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y
|
||||
|
||||
config GENERIC_TIME
|
||||
def_bool y
|
||||
|
||||
config GENERIC_CLOCKEVENTS
|
||||
def_bool y
|
||||
|
||||
# FIXME: tilegx can implement a more efficent rwsem.
|
||||
config RWSEM_GENERIC_SPINLOCK
|
||||
def_bool y
|
||||
|
||||
# We have a very flat architecture from a migration point of view,
|
||||
# so save boot time by presetting this (particularly useful on tile-sim).
|
||||
config DEFAULT_MIGRATION_COST
|
||||
int
|
||||
default "10000000"
|
||||
|
||||
# We only support gcc 4.4 and above, so this should work.
|
||||
config ARCH_SUPPORTS_OPTIMIZED_INLINING
|
||||
def_bool y
|
||||
|
||||
config ARCH_PHYS_ADDR_T_64BIT
|
||||
def_bool y
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config STACKTRACE_SUPPORT
|
||||
def_bool y
|
||||
select STACKTRACE
|
||||
|
||||
# We use discontigmem for now; at some point we may want to switch
|
||||
# to sparsemem (Tilera bug 7996).
|
||||
config ARCH_DISCONTIGMEM_ENABLE
|
||||
def_bool y
|
||||
|
||||
config ARCH_DISCONTIGMEM_DEFAULT
|
||||
def_bool y
|
||||
|
||||
config TRACE_IRQFLAGS_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config STRICT_DEVMEM
|
||||
def_bool y
|
||||
|
||||
# SMP is required for Tilera Linux.
|
||||
config SMP
|
||||
def_bool y
|
||||
|
||||
# Allow checking for compile-time determined overflow errors in
|
||||
# copy_from_user(). There are still unprovable places in the
|
||||
# generic code as of 2.6.34, so this option is not really compatible
|
||||
# with -Werror, which is more useful in general.
|
||||
config DEBUG_COPY_FROM_USER
|
||||
def_bool n
|
||||
|
||||
config HVC_TILE
|
||||
select HVC_DRIVER
|
||||
def_bool y
|
||||
|
||||
config TILE
|
||||
def_bool y
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
select GENERIC_FIND_NEXT_BIT
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
select CC_OPTIMIZE_FOR_SIZE
|
||||
|
||||
# FIXME: investigate whether we need/want these options.
|
||||
# select HAVE_IOREMAP_PROT
|
||||
# select HAVE_OPTPROBES
|
||||
# select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
# select HAVE_HW_BREAKPOINT
|
||||
# select PERF_EVENTS
|
||||
# select HAVE_USER_RETURN_NOTIFIER
|
||||
# config NO_BOOTMEM
|
||||
# config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
# config HUGETLB_PAGE_SIZE_VARIABLE
|
||||
|
||||
|
||||
mainmenu "Linux/TILE Kernel Configuration"
|
||||
|
||||
# Please note: TILE-Gx support is not yet finalized; this is
|
||||
# the preliminary support. TILE-Gx drivers are only provided
|
||||
# with the alpha or beta test versions for Tilera customers.
|
||||
config TILEGX
|
||||
depends on EXPERIMENTAL
|
||||
bool "Building with TILE-Gx (64-bit) compiler and toolchain"
|
||||
|
||||
config 64BIT
|
||||
depends on TILEGX
|
||||
def_bool y
|
||||
|
||||
config ARCH_DEFCONFIG
|
||||
string
|
||||
default "arch/tile/configs/tile_defconfig" if !TILEGX
|
||||
default "arch/tile/configs/tilegx_defconfig" if TILEGX
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
menu "Tilera-specific configuration"
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of tiles (2-255)"
|
||||
range 2 255
|
||||
depends on SMP
|
||||
default "64"
|
||||
---help---
|
||||
Building with 64 is the recommended value, but a slightly
|
||||
smaller kernel memory footprint results from using a smaller
|
||||
value on chips with fewer tiles.
|
||||
|
||||
source "kernel/time/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.hz"
|
||||
|
||||
config KEXEC
|
||||
bool "kexec system call"
|
||||
---help---
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
but it is independent of the system firmware. It is used
|
||||
to implement the "mboot" Tilera booter.
|
||||
|
||||
The name comes from the similarity to the exec system call.
|
||||
|
||||
config COMPAT
|
||||
bool "Support 32-bit TILE-Gx binaries in addition to 64-bit"
|
||||
depends on TILEGX
|
||||
select COMPAT_BINFMT_ELF
|
||||
default y
|
||||
---help---
|
||||
If enabled, the kernel will support running TILE-Gx binaries
|
||||
that were built with the -m32 option.
|
||||
|
||||
config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
# We do not currently support disabling HIGHMEM on tile64 and tilepro.
|
||||
config HIGHMEM
|
||||
bool # "Support for more than 512 MB of RAM"
|
||||
default !TILEGX
|
||||
---help---
|
||||
Linux can use the full amount of RAM in the system by
|
||||
default. However, the address space of TILE processors is
|
||||
only 4 Gigabytes large. That means that, if you have a large
|
||||
amount of physical memory, not all of it can be "permanently
|
||||
mapped" by the kernel. The physical memory that's not
|
||||
permanently mapped is called "high memory".
|
||||
|
||||
If you are compiling a kernel which will never run on a
|
||||
machine with more than 512 MB total physical RAM, answer
|
||||
"false" here. This will result in the kernel mapping all of
|
||||
physical memory into the top 1 GB of virtual memory space.
|
||||
|
||||
If unsure, say "true".
|
||||
|
||||
# We do not currently support disabling NUMA.
|
||||
config NUMA
|
||||
bool # "NUMA Memory Allocation and Scheduler Support"
|
||||
depends on SMP && DISCONTIGMEM
|
||||
default y
|
||||
---help---
|
||||
NUMA memory allocation is required for TILE processors
|
||||
unless booting with memory striping enabled in the
|
||||
hypervisor, or with only a single memory controller.
|
||||
It is recommended that this option always be enabled.
|
||||
|
||||
config NODES_SHIFT
|
||||
int "Log base 2 of the max number of memory controllers"
|
||||
default 2
|
||||
depends on NEED_MULTIPLE_NODES
|
||||
---help---
|
||||
By default, 2, i.e. 2^2 == 4 DDR2 controllers.
|
||||
In a system with more controllers, this value should be raised.
|
||||
|
||||
# Need 16MB areas to enable hugetlb
|
||||
# See build-time check in arch/tile/mm/init.c.
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default 9
|
||||
|
||||
choice
|
||||
depends on !TILEGX
|
||||
prompt "Memory split" if EMBEDDED
|
||||
default VMSPLIT_3G
|
||||
---help---
|
||||
Select the desired split between kernel and user memory.
|
||||
|
||||
If the address range available to the kernel is less than the
|
||||
physical memory installed, the remaining memory will be available
|
||||
as "high memory". Accessing high memory is a little more costly
|
||||
than low memory, as it needs to be mapped into the kernel first.
|
||||
Note that increasing the kernel address space limits the range
|
||||
available to user programs, making the address space there
|
||||
tighter. Selecting anything other than the default 3G/1G split
|
||||
will also likely make your kernel incompatible with binary-only
|
||||
kernel modules.
|
||||
|
||||
If you are not absolutely sure what you are doing, leave this
|
||||
option alone!
|
||||
|
||||
config VMSPLIT_375G
|
||||
bool "3.75G/0.25G user/kernel split (no kernel networking)"
|
||||
config VMSPLIT_35G
|
||||
bool "3.5G/0.5G user/kernel split"
|
||||
config VMSPLIT_3G
|
||||
bool "3G/1G user/kernel split"
|
||||
config VMSPLIT_3G_OPT
|
||||
bool "3G/1G user/kernel split (for full 1G low memory)"
|
||||
config VMSPLIT_2G
|
||||
bool "2G/2G user/kernel split"
|
||||
config VMSPLIT_1G
|
||||
bool "1G/3G user/kernel split"
|
||||
endchoice
|
||||
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default 0xF0000000 if VMSPLIT_375G
|
||||
default 0xE0000000 if VMSPLIT_35G
|
||||
default 0xB0000000 if VMSPLIT_3G_OPT
|
||||
default 0x80000000 if VMSPLIT_2G
|
||||
default 0x40000000 if VMSPLIT_1G
|
||||
default 0xC0000000
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config CMDLINE_BOOL
|
||||
bool "Built-in kernel command line"
|
||||
default n
|
||||
---help---
|
||||
Allow for specifying boot arguments to the kernel at
|
||||
build time. On some systems (e.g. embedded ones), it is
|
||||
necessary or convenient to provide some or all of the
|
||||
kernel boot arguments with the kernel itself (that is,
|
||||
to not rely on the boot loader to provide them.)
|
||||
|
||||
To compile command line arguments into the kernel,
|
||||
set this option to 'Y', then fill in the
|
||||
the boot arguments in CONFIG_CMDLINE.
|
||||
|
||||
Systems with fully functional boot loaders (e.g. mboot, or
|
||||
if booting over PCI) should leave this option set to 'N'.
|
||||
|
||||
config CMDLINE
|
||||
string "Built-in kernel command string"
|
||||
depends on CMDLINE_BOOL
|
||||
default ""
|
||||
---help---
|
||||
Enter arguments here that should be compiled into the kernel
|
||||
image and used at boot time. If the boot loader provides a
|
||||
command line at boot time, it is appended to this string to
|
||||
form the full kernel command line, when the system boots.
|
||||
|
||||
However, you can use the CONFIG_CMDLINE_OVERRIDE option to
|
||||
change this behavior.
|
||||
|
||||
In most cases, the command line (whether built-in or provided
|
||||
by the boot loader) should specify the device for the root
|
||||
file system.
|
||||
|
||||
config CMDLINE_OVERRIDE
|
||||
bool "Built-in command line overrides boot loader arguments"
|
||||
default n
|
||||
depends on CMDLINE_BOOL
|
||||
---help---
|
||||
Set this option to 'Y' to have the kernel ignore the boot loader
|
||||
command line, and use ONLY the built-in command line.
|
||||
|
||||
This is used to work around broken boot loaders. This should
|
||||
be set to 'N' under normal conditions.
|
||||
|
||||
config VMALLOC_RESERVE
|
||||
hex
|
||||
default 0x1000000
|
||||
|
||||
config HARDWALL
|
||||
bool "Hardwall support to allow access to user dynamic network"
|
||||
default y
|
||||
|
||||
endmenu # Tilera-specific configuration
|
||||
|
||||
menu "Bus options"
|
||||
|
||||
config NO_IOMEM
|
||||
def_bool !PCI
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool !PCI
|
||||
|
||||
source "drivers/pci/Kconfig"
|
||||
|
||||
source "drivers/pci/hotplug/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Executable file formats"
|
||||
|
||||
# only elf supported
|
||||
config KCORE_ELF
|
||||
def_bool y
|
||||
depends on PROC_FS
|
||||
|
||||
source "fs/Kconfig.binfmt"
|
||||
|
||||
endmenu
|
||||
|
||||
source "net/Kconfig"
|
||||
|
||||
source "drivers/Kconfig"
|
||||
|
||||
source "fs/Kconfig"
|
||||
|
||||
source "arch/tile/Kconfig.debug"
|
||||
|
||||
source "security/Kconfig"
|
||||
|
||||
source "crypto/Kconfig"
|
||||
|
||||
source "lib/Kconfig"
|
|
@ -0,0 +1,43 @@
|
|||
menu "Kernel hacking"
|
||||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config EARLY_PRINTK
|
||||
bool "Early printk" if EMBEDDED && DEBUG_KERNEL
|
||||
default y
|
||||
help
|
||||
Write kernel log output directly via the hypervisor console.
|
||||
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
early before the console code is initialized. For normal operation
|
||||
it is not recommended because it looks ugly and doesn't cooperate
|
||||
with klogd/syslogd. You should normally N here,
|
||||
unless you want to debug such a crash.
|
||||
|
||||
config DEBUG_STACKOVERFLOW
|
||||
bool "Check for stack overflows"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
This option will cause messages to be printed if free stack space
|
||||
drops below a certain limit.
|
||||
|
||||
config DEBUG_STACK_USAGE
|
||||
bool "Stack utilization instrumentation"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Enables the display of the minimum amount of free stack which each
|
||||
task has ever had available in the sysrq-T and sysrq-P debug output.
|
||||
|
||||
This option will slow down process creation somewhat.
|
||||
|
||||
config DEBUG_EXTRA_FLAGS
|
||||
string "Additional compiler arguments when building with '-g'"
|
||||
depends on DEBUG_INFO
|
||||
default ""
|
||||
help
|
||||
Debug info can be large, and flags like
|
||||
`-femit-struct-debug-baseonly' can reduce the kernel file
|
||||
size and build time noticeably. Such flags are often
|
||||
helpful if the main use of debug info is line number info.
|
||||
|
||||
endmenu
|
|
@ -0,0 +1,52 @@
|
|||
#
|
||||
# This file is subject to the terms and conditions of the GNU General Public
|
||||
# License. See the file "COPYING" in the main directory of this archive
|
||||
# for more details.
|
||||
#
|
||||
# This file is included by the global makefile so that you can add your own
|
||||
# architecture-specific flags and dependencies. Remember to do have actions
|
||||
# for "archclean" and "archdep" for cleaning up and making dependencies for
|
||||
# this architecture
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
# If building with TILERA_ROOT set (i.e. using the Tilera Multicore
|
||||
# Development Environment) we can set CROSS_COMPILE based on that.
|
||||
ifdef TILERA_ROOT
|
||||
CROSS_COMPILE = $(TILERA_ROOT)/bin/tile-
|
||||
endif
|
||||
endif
|
||||
|
||||
# If we're not cross-compiling, make sure we're on the right architecture.
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
HOST_ARCH = $(shell uname -m)
|
||||
ifneq ($(HOST_ARCH),$(ARCH))
|
||||
$(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
|
||||
|
||||
LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
|
||||
|
||||
# Provide the path to use for "make defconfig".
|
||||
KBUILD_DEFCONFIG := $(ARCH)_defconfig
|
||||
|
||||
# Used as a file extension when useful, e.g. head_$(BITS).o
|
||||
# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
|
||||
# uses the right default anyway.
|
||||
export BITS
|
||||
ifeq ($(CONFIG_TILEGX),y)
|
||||
BITS := 64
|
||||
else
|
||||
BITS := 32
|
||||
endif
|
||||
|
||||
head-y := arch/tile/kernel/head_$(BITS).o
|
||||
|
||||
libs-y += arch/tile/lib/
|
||||
libs-y += $(LIBGCC_PATH)
|
||||
|
||||
|
||||
# See arch/tile/Kbuild for content of core part of the kernel
|
||||
core-y += arch/tile/
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* ABI-related register definitions helpful when writing assembly code.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_ABI_H__
|
||||
#define __ARCH_ABI_H__
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* Registers 0 - 55 are "normal", but some perform special roles. */
|
||||
|
||||
#define TREG_FP 52 /**< Frame pointer. */
|
||||
#define TREG_TP 53 /**< Thread pointer. */
|
||||
#define TREG_SP 54 /**< Stack pointer. */
|
||||
#define TREG_LR 55 /**< Link to calling function PC. */
|
||||
|
||||
/** Index of last normal general-purpose register. */
|
||||
#define TREG_LAST_GPR 55
|
||||
|
||||
/* Registers 56 - 62 are "special" network registers. */
|
||||
|
||||
#define TREG_SN 56 /**< Static network access. */
|
||||
#define TREG_IDN0 57 /**< IDN demux 0 access. */
|
||||
#define TREG_IDN1 58 /**< IDN demux 1 access. */
|
||||
#define TREG_UDN0 59 /**< UDN demux 0 access. */
|
||||
#define TREG_UDN1 60 /**< UDN demux 1 access. */
|
||||
#define TREG_UDN2 61 /**< UDN demux 2 access. */
|
||||
#define TREG_UDN3 62 /**< UDN demux 3 access. */
|
||||
|
||||
/* Register 63 is the "special" zero register. */
|
||||
|
||||
#define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */
|
||||
|
||||
|
||||
/** By convention, this register is used to hold the syscall number. */
|
||||
#define TREG_SYSCALL_NR 10
|
||||
|
||||
/** Name of register that holds the syscall number, for use in assembly. */
|
||||
#define TREG_SYSCALL_NR_NAME r10
|
||||
|
||||
|
||||
/**
|
||||
* The ABI requires callers to allocate a caller state save area of
|
||||
* this many bytes at the bottom of each stack frame.
|
||||
*/
|
||||
#ifdef __tile__
|
||||
#define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* The operand to an 'info' opcode directing the backtracer to not
|
||||
* try to find the calling frame.
|
||||
*/
|
||||
#define INFO_OP_CANNOT_BACKTRACE 2
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
#if CHIP_WORD_SIZE() > 32
|
||||
|
||||
/** Unsigned type that can hold a register. */
|
||||
typedef unsigned long long uint_reg_t;
|
||||
|
||||
/** Signed type that can hold a register. */
|
||||
typedef long long int_reg_t;
|
||||
|
||||
/** String prefix to use for printf(). */
|
||||
#define INT_REG_FMT "ll"
|
||||
|
||||
#elif !defined(__LP64__) /* avoid confusion with LP64 cross-build tools */
|
||||
|
||||
/** Unsigned type that can hold a register. */
|
||||
typedef unsigned long uint_reg_t;
|
||||
|
||||
/** Signed type that can hold a register. */
|
||||
typedef long int_reg_t;
|
||||
|
||||
/** String prefix to use for printf(). */
|
||||
#define INT_REG_FMT "l"
|
||||
|
||||
#endif
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#endif /* !__ARCH_ABI_H__ */
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#if __tile_chip__ == 0
|
||||
#include <arch/chip_tile64.h>
|
||||
#elif __tile_chip__ == 1
|
||||
#include <arch/chip_tilepro.h>
|
||||
#elif defined(__tilegx__)
|
||||
#include <arch/chip_tilegx.h>
|
||||
#else
|
||||
#error Unexpected Tilera chip type
|
||||
#endif
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @file
|
||||
* Global header file.
|
||||
* This header file specifies defines for TILE64.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_CHIP_H__
|
||||
#define __ARCH_CHIP_H__
|
||||
|
||||
/** Specify chip version.
|
||||
* When possible, prefer the CHIP_xxx symbols below for future-proofing.
|
||||
* This is intended for cross-compiling; native compilation should
|
||||
* use the predefined __tile_chip__ symbol.
|
||||
*/
|
||||
#define TILE_CHIP 0
|
||||
|
||||
/** Specify chip revision.
|
||||
* This provides for the case of a respin of a particular chip type;
|
||||
* the normal value for this symbol is "0".
|
||||
* This is intended for cross-compiling; native compilation should
|
||||
* use the predefined __tile_chip_rev__ symbol.
|
||||
*/
|
||||
#define TILE_CHIP_REV 0
|
||||
|
||||
/** The name of this architecture. */
|
||||
#define CHIP_ARCH_NAME "tile64"
|
||||
|
||||
/** The ELF e_machine type for binaries for this chip. */
|
||||
#define CHIP_ELF_TYPE() EM_TILE64
|
||||
|
||||
/** The alternate ELF e_machine type for binaries for this chip. */
|
||||
#define CHIP_COMPAT_ELF_TYPE() 0x2506
|
||||
|
||||
/** What is the native word size of the machine? */
|
||||
#define CHIP_WORD_SIZE() 32
|
||||
|
||||
/** How many bits of a virtual address are used. Extra bits must be
|
||||
* the sign extension of the low bits.
|
||||
*/
|
||||
#define CHIP_VA_WIDTH() 32
|
||||
|
||||
/** How many bits are in a physical address? */
|
||||
#define CHIP_PA_WIDTH() 36
|
||||
|
||||
/** Size of the L2 cache, in bytes. */
|
||||
#define CHIP_L2_CACHE_SIZE() 65536
|
||||
|
||||
/** Log size of an L2 cache line in bytes. */
|
||||
#define CHIP_L2_LOG_LINE_SIZE() 6
|
||||
|
||||
/** Size of an L2 cache line, in bytes. */
|
||||
#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L2 cache. */
|
||||
#define CHIP_L2_ASSOC() 2
|
||||
|
||||
/** Size of the L1 data cache, in bytes. */
|
||||
#define CHIP_L1D_CACHE_SIZE() 8192
|
||||
|
||||
/** Log size of an L1 data cache line in bytes. */
|
||||
#define CHIP_L1D_LOG_LINE_SIZE() 4
|
||||
|
||||
/** Size of an L1 data cache line, in bytes. */
|
||||
#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L1 data cache. */
|
||||
#define CHIP_L1D_ASSOC() 2
|
||||
|
||||
/** Size of the L1 instruction cache, in bytes. */
|
||||
#define CHIP_L1I_CACHE_SIZE() 8192
|
||||
|
||||
/** Log size of an L1 instruction cache line in bytes. */
|
||||
#define CHIP_L1I_LOG_LINE_SIZE() 6
|
||||
|
||||
/** Size of an L1 instruction cache line, in bytes. */
|
||||
#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L1 instruction cache. */
|
||||
#define CHIP_L1I_ASSOC() 1
|
||||
|
||||
/** Stride with which flush instructions must be issued. */
|
||||
#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
|
||||
|
||||
/** Stride with which inv instructions must be issued. */
|
||||
#define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE()
|
||||
|
||||
/** Stride with which finv instructions must be issued. */
|
||||
#define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE()
|
||||
|
||||
/** Can the local cache coherently cache data that is homed elsewhere? */
|
||||
#define CHIP_HAS_COHERENT_LOCAL_CACHE() 0
|
||||
|
||||
/** How many simultaneous outstanding victims can the L2 cache have? */
|
||||
#define CHIP_MAX_OUTSTANDING_VICTIMS() 2
|
||||
|
||||
/** Does the TLB support the NC and NOALLOC bits? */
|
||||
#define CHIP_HAS_NC_AND_NOALLOC_BITS() 0
|
||||
|
||||
/** Does the chip support hash-for-home caching? */
|
||||
#define CHIP_HAS_CBOX_HOME_MAP() 0
|
||||
|
||||
/** Number of entries in the chip's home map tables. */
|
||||
/* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */
|
||||
|
||||
/** Do uncacheable requests miss in the cache regardless of whether
|
||||
* there is matching data? */
|
||||
#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0
|
||||
|
||||
/** Does the mf instruction wait for victims? */
|
||||
#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1
|
||||
|
||||
/** Does the chip have an "inv" instruction that doesn't also flush? */
|
||||
#define CHIP_HAS_INV() 0
|
||||
|
||||
/** Does the chip have a "wh64" instruction? */
|
||||
#define CHIP_HAS_WH64() 0
|
||||
|
||||
/** Does this chip have a 'dword_align' instruction? */
|
||||
#define CHIP_HAS_DWORD_ALIGN() 0
|
||||
|
||||
/** Number of performance counters. */
|
||||
#define CHIP_PERFORMANCE_COUNTERS() 2
|
||||
|
||||
/** Does this chip have auxiliary performance counters? */
|
||||
#define CHIP_HAS_AUX_PERF_COUNTERS() 0
|
||||
|
||||
/** Is the CBOX_MSR1 SPR supported? */
|
||||
#define CHIP_HAS_CBOX_MSR1() 0
|
||||
|
||||
/** Is the TILE_RTF_HWM SPR supported? */
|
||||
#define CHIP_HAS_TILE_RTF_HWM() 0
|
||||
|
||||
/** Is the TILE_WRITE_PENDING SPR supported? */
|
||||
#define CHIP_HAS_TILE_WRITE_PENDING() 0
|
||||
|
||||
/** Is the PROC_STATUS SPR supported? */
|
||||
#define CHIP_HAS_PROC_STATUS_SPR() 0
|
||||
|
||||
/** Log of the number of mshims we have. */
|
||||
#define CHIP_LOG_NUM_MSHIMS() 2
|
||||
|
||||
/** Are the bases of the interrupt vector areas fixed? */
|
||||
#define CHIP_HAS_FIXED_INTVEC_BASE() 1
|
||||
|
||||
/** Are the interrupt masks split up into 2 SPRs? */
|
||||
#define CHIP_HAS_SPLIT_INTR_MASK() 1
|
||||
|
||||
/** Is the cycle count split up into 2 SPRs? */
|
||||
#define CHIP_HAS_SPLIT_CYCLE() 1
|
||||
|
||||
/** Does the chip have a static network? */
|
||||
#define CHIP_HAS_SN() 1
|
||||
|
||||
/** Does the chip have a static network processor? */
|
||||
#define CHIP_HAS_SN_PROC() 1
|
||||
|
||||
/** Size of the L1 static network processor instruction cache, in bytes. */
|
||||
#define CHIP_L1SNI_CACHE_SIZE() 2048
|
||||
|
||||
/** Does the chip have DMA support in each tile? */
|
||||
#define CHIP_HAS_TILE_DMA() 1
|
||||
|
||||
/** Does the chip have the second revision of the directly accessible
|
||||
* dynamic networks? This encapsulates a number of characteristics,
|
||||
* including the absence of the catch-all, the absence of inline message
|
||||
* tags, the absence of support for network context-switching, and so on.
|
||||
*/
|
||||
#define CHIP_HAS_REV1_XDN() 0
|
||||
|
||||
/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
|
||||
#define CHIP_HAS_CMPEXCH() 0
|
||||
|
||||
/** Does the chip have memory-mapped I/O support? */
|
||||
#define CHIP_HAS_MMIO() 0
|
||||
|
||||
/** Does the chip have post-completion interrupts? */
|
||||
#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
|
||||
|
||||
/** Does the chip have native single step support? */
|
||||
#define CHIP_HAS_SINGLE_STEP() 0
|
||||
|
||||
#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
|
||||
|
||||
/** How many entries are present in the instruction TLB? */
|
||||
#define CHIP_ITLB_ENTRIES() 8
|
||||
|
||||
/** How many entries are present in the data TLB? */
|
||||
#define CHIP_DTLB_ENTRIES() 16
|
||||
|
||||
/** How many MAF entries does the XAUI shim have? */
|
||||
#define CHIP_XAUI_MAF_ENTRIES() 16
|
||||
|
||||
/** Does the memory shim have a source-id table? */
|
||||
#define CHIP_HAS_MSHIM_SRCID_TABLE() 1
|
||||
|
||||
/** Does the L1 instruction cache clear on reset? */
|
||||
#define CHIP_HAS_L1I_CLEAR_ON_RESET() 0
|
||||
|
||||
/** Does the chip come out of reset with valid coordinates on all tiles?
|
||||
* Note that if defined, this also implies that the upper left is 1,1.
|
||||
*/
|
||||
#define CHIP_HAS_VALID_TILE_COORD_RESET() 0
|
||||
|
||||
/** Does the chip have unified packet formats? */
|
||||
#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0
|
||||
|
||||
/** Does the chip support write reordering? */
|
||||
#define CHIP_HAS_WRITE_REORDERING() 0
|
||||
|
||||
/** Does the chip support Y-X routing as well as X-Y? */
|
||||
#define CHIP_HAS_Y_X_ROUTING() 0
|
||||
|
||||
/** Is INTCTRL_3 managed with the correct MPL? */
|
||||
#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0
|
||||
|
||||
/** Is it possible to configure the chip to be big-endian? */
|
||||
#define CHIP_HAS_BIG_ENDIAN_CONFIG() 0
|
||||
|
||||
/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
|
||||
#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
|
||||
|
||||
/** Is the DIAG_TRACE_WAY SPR supported? */
|
||||
#define CHIP_HAS_DIAG_TRACE_WAY() 0
|
||||
|
||||
/** Is the MEM_STRIPE_CONFIG SPR supported? */
|
||||
#define CHIP_HAS_MEM_STRIPE_CONFIG() 0
|
||||
|
||||
/** Are the TLB_PERF SPRs supported? */
|
||||
#define CHIP_HAS_TLB_PERF() 0
|
||||
|
||||
/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
|
||||
#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
|
||||
|
||||
/** Does the chip support rev1 DMA packets? */
|
||||
#define CHIP_HAS_REV1_DMA_PACKETS() 0
|
||||
|
||||
/** Does the chip have an IPI shim? */
|
||||
#define CHIP_HAS_IPI() 0
|
||||
|
||||
#endif /* !__OPEN_SOURCE__ */
|
||||
#endif /* __ARCH_CHIP_H__ */
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @file
|
||||
* Global header file.
|
||||
* This header file specifies defines for TILEPro.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_CHIP_H__
|
||||
#define __ARCH_CHIP_H__
|
||||
|
||||
/** Specify chip version.
|
||||
* When possible, prefer the CHIP_xxx symbols below for future-proofing.
|
||||
* This is intended for cross-compiling; native compilation should
|
||||
* use the predefined __tile_chip__ symbol.
|
||||
*/
|
||||
#define TILE_CHIP 1
|
||||
|
||||
/** Specify chip revision.
|
||||
* This provides for the case of a respin of a particular chip type;
|
||||
* the normal value for this symbol is "0".
|
||||
* This is intended for cross-compiling; native compilation should
|
||||
* use the predefined __tile_chip_rev__ symbol.
|
||||
*/
|
||||
#define TILE_CHIP_REV 0
|
||||
|
||||
/** The name of this architecture. */
|
||||
#define CHIP_ARCH_NAME "tilepro"
|
||||
|
||||
/** The ELF e_machine type for binaries for this chip. */
|
||||
#define CHIP_ELF_TYPE() EM_TILEPRO
|
||||
|
||||
/** The alternate ELF e_machine type for binaries for this chip. */
|
||||
#define CHIP_COMPAT_ELF_TYPE() 0x2507
|
||||
|
||||
/** What is the native word size of the machine? */
|
||||
#define CHIP_WORD_SIZE() 32
|
||||
|
||||
/** How many bits of a virtual address are used. Extra bits must be
|
||||
* the sign extension of the low bits.
|
||||
*/
|
||||
#define CHIP_VA_WIDTH() 32
|
||||
|
||||
/** How many bits are in a physical address? */
|
||||
#define CHIP_PA_WIDTH() 36
|
||||
|
||||
/** Size of the L2 cache, in bytes. */
|
||||
#define CHIP_L2_CACHE_SIZE() 65536
|
||||
|
||||
/** Log size of an L2 cache line in bytes. */
|
||||
#define CHIP_L2_LOG_LINE_SIZE() 6
|
||||
|
||||
/** Size of an L2 cache line, in bytes. */
|
||||
#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L2 cache. */
|
||||
#define CHIP_L2_ASSOC() 4
|
||||
|
||||
/** Size of the L1 data cache, in bytes. */
|
||||
#define CHIP_L1D_CACHE_SIZE() 8192
|
||||
|
||||
/** Log size of an L1 data cache line in bytes. */
|
||||
#define CHIP_L1D_LOG_LINE_SIZE() 4
|
||||
|
||||
/** Size of an L1 data cache line, in bytes. */
|
||||
#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L1 data cache. */
|
||||
#define CHIP_L1D_ASSOC() 2
|
||||
|
||||
/** Size of the L1 instruction cache, in bytes. */
|
||||
#define CHIP_L1I_CACHE_SIZE() 16384
|
||||
|
||||
/** Log size of an L1 instruction cache line in bytes. */
|
||||
#define CHIP_L1I_LOG_LINE_SIZE() 6
|
||||
|
||||
/** Size of an L1 instruction cache line, in bytes. */
|
||||
#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L1 instruction cache. */
|
||||
#define CHIP_L1I_ASSOC() 1
|
||||
|
||||
/** Stride with which flush instructions must be issued. */
|
||||
#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
|
||||
|
||||
/** Stride with which inv instructions must be issued. */
|
||||
#define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE()
|
||||
|
||||
/** Stride with which finv instructions must be issued. */
|
||||
#define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE()
|
||||
|
||||
/** Can the local cache coherently cache data that is homed elsewhere? */
|
||||
#define CHIP_HAS_COHERENT_LOCAL_CACHE() 1
|
||||
|
||||
/** How many simultaneous outstanding victims can the L2 cache have? */
|
||||
#define CHIP_MAX_OUTSTANDING_VICTIMS() 4
|
||||
|
||||
/** Does the TLB support the NC and NOALLOC bits? */
|
||||
#define CHIP_HAS_NC_AND_NOALLOC_BITS() 1
|
||||
|
||||
/** Does the chip support hash-for-home caching? */
|
||||
#define CHIP_HAS_CBOX_HOME_MAP() 1
|
||||
|
||||
/** Number of entries in the chip's home map tables. */
|
||||
#define CHIP_CBOX_HOME_MAP_SIZE() 64
|
||||
|
||||
/** Do uncacheable requests miss in the cache regardless of whether
|
||||
* there is matching data? */
|
||||
#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1
|
||||
|
||||
/** Does the mf instruction wait for victims? */
|
||||
#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0
|
||||
|
||||
/** Does the chip have an "inv" instruction that doesn't also flush? */
|
||||
#define CHIP_HAS_INV() 1
|
||||
|
||||
/** Does the chip have a "wh64" instruction? */
|
||||
#define CHIP_HAS_WH64() 1
|
||||
|
||||
/** Does this chip have a 'dword_align' instruction? */
|
||||
#define CHIP_HAS_DWORD_ALIGN() 1
|
||||
|
||||
/** Number of performance counters. */
|
||||
#define CHIP_PERFORMANCE_COUNTERS() 4
|
||||
|
||||
/** Does this chip have auxiliary performance counters? */
|
||||
#define CHIP_HAS_AUX_PERF_COUNTERS() 1
|
||||
|
||||
/** Is the CBOX_MSR1 SPR supported? */
|
||||
#define CHIP_HAS_CBOX_MSR1() 1
|
||||
|
||||
/** Is the TILE_RTF_HWM SPR supported? */
|
||||
#define CHIP_HAS_TILE_RTF_HWM() 1
|
||||
|
||||
/** Is the TILE_WRITE_PENDING SPR supported? */
|
||||
#define CHIP_HAS_TILE_WRITE_PENDING() 1
|
||||
|
||||
/** Is the PROC_STATUS SPR supported? */
|
||||
#define CHIP_HAS_PROC_STATUS_SPR() 1
|
||||
|
||||
/** Log of the number of mshims we have. */
|
||||
#define CHIP_LOG_NUM_MSHIMS() 2
|
||||
|
||||
/** Are the bases of the interrupt vector areas fixed? */
|
||||
#define CHIP_HAS_FIXED_INTVEC_BASE() 1
|
||||
|
||||
/** Are the interrupt masks split up into 2 SPRs? */
|
||||
#define CHIP_HAS_SPLIT_INTR_MASK() 1
|
||||
|
||||
/** Is the cycle count split up into 2 SPRs? */
|
||||
#define CHIP_HAS_SPLIT_CYCLE() 1
|
||||
|
||||
/** Does the chip have a static network? */
|
||||
#define CHIP_HAS_SN() 1
|
||||
|
||||
/** Does the chip have a static network processor? */
|
||||
#define CHIP_HAS_SN_PROC() 0
|
||||
|
||||
/** Size of the L1 static network processor instruction cache, in bytes. */
|
||||
/* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 1 */
|
||||
|
||||
/** Does the chip have DMA support in each tile? */
|
||||
#define CHIP_HAS_TILE_DMA() 1
|
||||
|
||||
/** Does the chip have the second revision of the directly accessible
|
||||
* dynamic networks? This encapsulates a number of characteristics,
|
||||
* including the absence of the catch-all, the absence of inline message
|
||||
* tags, the absence of support for network context-switching, and so on.
|
||||
*/
|
||||
#define CHIP_HAS_REV1_XDN() 0
|
||||
|
||||
/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
|
||||
#define CHIP_HAS_CMPEXCH() 0
|
||||
|
||||
/** Does the chip have memory-mapped I/O support? */
|
||||
#define CHIP_HAS_MMIO() 0
|
||||
|
||||
/** Does the chip have post-completion interrupts? */
|
||||
#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
|
||||
|
||||
/** Does the chip have native single step support? */
|
||||
#define CHIP_HAS_SINGLE_STEP() 0
|
||||
|
||||
#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
|
||||
|
||||
/** How many entries are present in the instruction TLB? */
|
||||
#define CHIP_ITLB_ENTRIES() 16
|
||||
|
||||
/** How many entries are present in the data TLB? */
|
||||
#define CHIP_DTLB_ENTRIES() 16
|
||||
|
||||
/** How many MAF entries does the XAUI shim have? */
|
||||
#define CHIP_XAUI_MAF_ENTRIES() 32
|
||||
|
||||
/** Does the memory shim have a source-id table? */
|
||||
#define CHIP_HAS_MSHIM_SRCID_TABLE() 0
|
||||
|
||||
/** Does the L1 instruction cache clear on reset? */
|
||||
#define CHIP_HAS_L1I_CLEAR_ON_RESET() 1
|
||||
|
||||
/** Does the chip come out of reset with valid coordinates on all tiles?
|
||||
* Note that if defined, this also implies that the upper left is 1,1.
|
||||
*/
|
||||
#define CHIP_HAS_VALID_TILE_COORD_RESET() 1
|
||||
|
||||
/** Does the chip have unified packet formats? */
|
||||
#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1
|
||||
|
||||
/** Does the chip support write reordering? */
|
||||
#define CHIP_HAS_WRITE_REORDERING() 1
|
||||
|
||||
/** Does the chip support Y-X routing as well as X-Y? */
|
||||
#define CHIP_HAS_Y_X_ROUTING() 1
|
||||
|
||||
/** Is INTCTRL_3 managed with the correct MPL? */
|
||||
#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1
|
||||
|
||||
/** Is it possible to configure the chip to be big-endian? */
|
||||
#define CHIP_HAS_BIG_ENDIAN_CONFIG() 1
|
||||
|
||||
/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
|
||||
#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 1
|
||||
|
||||
/** Is the DIAG_TRACE_WAY SPR supported? */
|
||||
#define CHIP_HAS_DIAG_TRACE_WAY() 1
|
||||
|
||||
/** Is the MEM_STRIPE_CONFIG SPR supported? */
|
||||
#define CHIP_HAS_MEM_STRIPE_CONFIG() 1
|
||||
|
||||
/** Are the TLB_PERF SPRs supported? */
|
||||
#define CHIP_HAS_TLB_PERF() 1
|
||||
|
||||
/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
|
||||
#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 1
|
||||
|
||||
/** Does the chip support rev1 DMA packets? */
|
||||
#define CHIP_HAS_REV1_DMA_PACKETS() 1
|
||||
|
||||
/** Does the chip have an IPI shim? */
|
||||
#define CHIP_HAS_IPI() 0
|
||||
|
||||
#endif /* !__OPEN_SOURCE__ */
|
||||
#endif /* __ARCH_CHIP_H__ */
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* Support for invalidating bytes in the instruction
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_ICACHE_H__
|
||||
#define __ARCH_ICACHE_H__
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
|
||||
/**
|
||||
* Invalidate the instruction cache for the given range of memory.
|
||||
*
|
||||
* @param addr The start of memory to be invalidated.
|
||||
* @param size The number of bytes to be invalidated.
|
||||
* @param page_size The system's page size, typically the PAGE_SIZE constant
|
||||
* in sys/page.h. This value must be a power of two no larger
|
||||
* than the page containing the code to be invalidated. If the value
|
||||
* is smaller than the actual page size, this function will still
|
||||
* work, but may run slower than necessary.
|
||||
*/
|
||||
static __inline void
|
||||
invalidate_icache(const void* addr, unsigned long size,
|
||||
unsigned long page_size)
|
||||
{
|
||||
const unsigned long cache_way_size =
|
||||
CHIP_L1I_CACHE_SIZE() / CHIP_L1I_ASSOC();
|
||||
unsigned long max_useful_size;
|
||||
const char* start, *end;
|
||||
long num_passes;
|
||||
|
||||
if (__builtin_expect(size == 0, 0))
|
||||
return;
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* Limit the number of bytes visited to avoid redundant iterations. */
|
||||
max_useful_size = (page_size < cache_way_size) ? page_size : cache_way_size;
|
||||
|
||||
/* No PA aliasing is possible, so one pass always suffices. */
|
||||
num_passes = 1;
|
||||
#else
|
||||
/* Limit the number of bytes visited to avoid redundant iterations. */
|
||||
max_useful_size = cache_way_size;
|
||||
|
||||
/*
|
||||
* Compute how many passes we need (we'll treat 0 as if it were 1).
|
||||
* This works because we know the page size is a power of two.
|
||||
*/
|
||||
num_passes = cache_way_size >> __builtin_ctzl(page_size);
|
||||
#endif
|
||||
|
||||
if (__builtin_expect(size > max_useful_size, 0))
|
||||
size = max_useful_size;
|
||||
|
||||
/* Locate the first and last bytes to be invalidated. */
|
||||
start = (const char *)((unsigned long)addr & -CHIP_L1I_LINE_SIZE());
|
||||
end = (const char*)addr + size - 1;
|
||||
|
||||
__insn_mf();
|
||||
|
||||
do
|
||||
{
|
||||
const char* p;
|
||||
|
||||
for (p = start; p <= end; p += CHIP_L1I_LINE_SIZE())
|
||||
__insn_icoh(p);
|
||||
|
||||
start += page_size;
|
||||
end += page_size;
|
||||
}
|
||||
while (--num_passes > 0);
|
||||
|
||||
__insn_drain();
|
||||
}
|
||||
|
||||
|
||||
#endif /* __ARCH_ICACHE_H__ */
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <arch/interrupts_64.h>
|
||||
#else
|
||||
#include <arch/interrupts_32.h>
|
||||
#endif
|
|
@ -0,0 +1,304 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_INTERRUPTS_H__
|
||||
#define __ARCH_INTERRUPTS_H__
|
||||
|
||||
/** Mask for an interrupt. */
|
||||
#ifdef __ASSEMBLER__
|
||||
/* Note: must handle breaking interrupts into high and low words manually. */
|
||||
#define INT_MASK(intno) (1 << (intno))
|
||||
#else
|
||||
#define INT_MASK(intno) (1ULL << (intno))
|
||||
#endif
|
||||
|
||||
|
||||
/** Where a given interrupt executes */
|
||||
#define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8))
|
||||
|
||||
/** Where to store a vector for a given interrupt. */
|
||||
#define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0)
|
||||
|
||||
/** The base address of user-level interrupts. */
|
||||
#define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0)
|
||||
|
||||
|
||||
/** Additional synthetic interrupt. */
|
||||
#define INT_BREAKPOINT (63)
|
||||
|
||||
#define INT_ITLB_MISS 0
|
||||
#define INT_MEM_ERROR 1
|
||||
#define INT_ILL 2
|
||||
#define INT_GPV 3
|
||||
#define INT_SN_ACCESS 4
|
||||
#define INT_IDN_ACCESS 5
|
||||
#define INT_UDN_ACCESS 6
|
||||
#define INT_IDN_REFILL 7
|
||||
#define INT_UDN_REFILL 8
|
||||
#define INT_IDN_COMPLETE 9
|
||||
#define INT_UDN_COMPLETE 10
|
||||
#define INT_SWINT_3 11
|
||||
#define INT_SWINT_2 12
|
||||
#define INT_SWINT_1 13
|
||||
#define INT_SWINT_0 14
|
||||
#define INT_UNALIGN_DATA 15
|
||||
#define INT_DTLB_MISS 16
|
||||
#define INT_DTLB_ACCESS 17
|
||||
#define INT_DMATLB_MISS 18
|
||||
#define INT_DMATLB_ACCESS 19
|
||||
#define INT_SNITLB_MISS 20
|
||||
#define INT_SN_NOTIFY 21
|
||||
#define INT_SN_FIREWALL 22
|
||||
#define INT_IDN_FIREWALL 23
|
||||
#define INT_UDN_FIREWALL 24
|
||||
#define INT_TILE_TIMER 25
|
||||
#define INT_IDN_TIMER 26
|
||||
#define INT_UDN_TIMER 27
|
||||
#define INT_DMA_NOTIFY 28
|
||||
#define INT_IDN_CA 29
|
||||
#define INT_UDN_CA 30
|
||||
#define INT_IDN_AVAIL 31
|
||||
#define INT_UDN_AVAIL 32
|
||||
#define INT_PERF_COUNT 33
|
||||
#define INT_INTCTRL_3 34
|
||||
#define INT_INTCTRL_2 35
|
||||
#define INT_INTCTRL_1 36
|
||||
#define INT_INTCTRL_0 37
|
||||
#define INT_BOOT_ACCESS 38
|
||||
#define INT_WORLD_ACCESS 39
|
||||
#define INT_I_ASID 40
|
||||
#define INT_D_ASID 41
|
||||
#define INT_DMA_ASID 42
|
||||
#define INT_SNI_ASID 43
|
||||
#define INT_DMA_CPL 44
|
||||
#define INT_SN_CPL 45
|
||||
#define INT_DOUBLE_FAULT 46
|
||||
#define INT_SN_STATIC_ACCESS 47
|
||||
#define INT_AUX_PERF_COUNT 48
|
||||
|
||||
#define NUM_INTERRUPTS 49
|
||||
|
||||
#define QUEUED_INTERRUPTS ( \
|
||||
INT_MASK(INT_MEM_ERROR) | \
|
||||
INT_MASK(INT_DMATLB_MISS) | \
|
||||
INT_MASK(INT_DMATLB_ACCESS) | \
|
||||
INT_MASK(INT_SNITLB_MISS) | \
|
||||
INT_MASK(INT_SN_NOTIFY) | \
|
||||
INT_MASK(INT_SN_FIREWALL) | \
|
||||
INT_MASK(INT_IDN_FIREWALL) | \
|
||||
INT_MASK(INT_UDN_FIREWALL) | \
|
||||
INT_MASK(INT_TILE_TIMER) | \
|
||||
INT_MASK(INT_IDN_TIMER) | \
|
||||
INT_MASK(INT_UDN_TIMER) | \
|
||||
INT_MASK(INT_DMA_NOTIFY) | \
|
||||
INT_MASK(INT_IDN_CA) | \
|
||||
INT_MASK(INT_UDN_CA) | \
|
||||
INT_MASK(INT_IDN_AVAIL) | \
|
||||
INT_MASK(INT_UDN_AVAIL) | \
|
||||
INT_MASK(INT_PERF_COUNT) | \
|
||||
INT_MASK(INT_INTCTRL_3) | \
|
||||
INT_MASK(INT_INTCTRL_2) | \
|
||||
INT_MASK(INT_INTCTRL_1) | \
|
||||
INT_MASK(INT_INTCTRL_0) | \
|
||||
INT_MASK(INT_BOOT_ACCESS) | \
|
||||
INT_MASK(INT_WORLD_ACCESS) | \
|
||||
INT_MASK(INT_I_ASID) | \
|
||||
INT_MASK(INT_D_ASID) | \
|
||||
INT_MASK(INT_DMA_ASID) | \
|
||||
INT_MASK(INT_SNI_ASID) | \
|
||||
INT_MASK(INT_DMA_CPL) | \
|
||||
INT_MASK(INT_SN_CPL) | \
|
||||
INT_MASK(INT_DOUBLE_FAULT) | \
|
||||
INT_MASK(INT_AUX_PERF_COUNT) | \
|
||||
0)
|
||||
#define NONQUEUED_INTERRUPTS ( \
|
||||
INT_MASK(INT_ITLB_MISS) | \
|
||||
INT_MASK(INT_ILL) | \
|
||||
INT_MASK(INT_GPV) | \
|
||||
INT_MASK(INT_SN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_ACCESS) | \
|
||||
INT_MASK(INT_UDN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_REFILL) | \
|
||||
INT_MASK(INT_UDN_REFILL) | \
|
||||
INT_MASK(INT_IDN_COMPLETE) | \
|
||||
INT_MASK(INT_UDN_COMPLETE) | \
|
||||
INT_MASK(INT_SWINT_3) | \
|
||||
INT_MASK(INT_SWINT_2) | \
|
||||
INT_MASK(INT_SWINT_1) | \
|
||||
INT_MASK(INT_SWINT_0) | \
|
||||
INT_MASK(INT_UNALIGN_DATA) | \
|
||||
INT_MASK(INT_DTLB_MISS) | \
|
||||
INT_MASK(INT_DTLB_ACCESS) | \
|
||||
INT_MASK(INT_SN_STATIC_ACCESS) | \
|
||||
0)
|
||||
#define CRITICAL_MASKED_INTERRUPTS ( \
|
||||
INT_MASK(INT_MEM_ERROR) | \
|
||||
INT_MASK(INT_DMATLB_MISS) | \
|
||||
INT_MASK(INT_DMATLB_ACCESS) | \
|
||||
INT_MASK(INT_SNITLB_MISS) | \
|
||||
INT_MASK(INT_SN_NOTIFY) | \
|
||||
INT_MASK(INT_SN_FIREWALL) | \
|
||||
INT_MASK(INT_IDN_FIREWALL) | \
|
||||
INT_MASK(INT_UDN_FIREWALL) | \
|
||||
INT_MASK(INT_TILE_TIMER) | \
|
||||
INT_MASK(INT_IDN_TIMER) | \
|
||||
INT_MASK(INT_UDN_TIMER) | \
|
||||
INT_MASK(INT_DMA_NOTIFY) | \
|
||||
INT_MASK(INT_IDN_CA) | \
|
||||
INT_MASK(INT_UDN_CA) | \
|
||||
INT_MASK(INT_IDN_AVAIL) | \
|
||||
INT_MASK(INT_UDN_AVAIL) | \
|
||||
INT_MASK(INT_PERF_COUNT) | \
|
||||
INT_MASK(INT_INTCTRL_3) | \
|
||||
INT_MASK(INT_INTCTRL_2) | \
|
||||
INT_MASK(INT_INTCTRL_1) | \
|
||||
INT_MASK(INT_INTCTRL_0) | \
|
||||
INT_MASK(INT_AUX_PERF_COUNT) | \
|
||||
0)
|
||||
#define CRITICAL_UNMASKED_INTERRUPTS ( \
|
||||
INT_MASK(INT_ITLB_MISS) | \
|
||||
INT_MASK(INT_ILL) | \
|
||||
INT_MASK(INT_GPV) | \
|
||||
INT_MASK(INT_SN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_ACCESS) | \
|
||||
INT_MASK(INT_UDN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_REFILL) | \
|
||||
INT_MASK(INT_UDN_REFILL) | \
|
||||
INT_MASK(INT_IDN_COMPLETE) | \
|
||||
INT_MASK(INT_UDN_COMPLETE) | \
|
||||
INT_MASK(INT_SWINT_3) | \
|
||||
INT_MASK(INT_SWINT_2) | \
|
||||
INT_MASK(INT_SWINT_1) | \
|
||||
INT_MASK(INT_SWINT_0) | \
|
||||
INT_MASK(INT_UNALIGN_DATA) | \
|
||||
INT_MASK(INT_DTLB_MISS) | \
|
||||
INT_MASK(INT_DTLB_ACCESS) | \
|
||||
INT_MASK(INT_BOOT_ACCESS) | \
|
||||
INT_MASK(INT_WORLD_ACCESS) | \
|
||||
INT_MASK(INT_I_ASID) | \
|
||||
INT_MASK(INT_D_ASID) | \
|
||||
INT_MASK(INT_DMA_ASID) | \
|
||||
INT_MASK(INT_SNI_ASID) | \
|
||||
INT_MASK(INT_DMA_CPL) | \
|
||||
INT_MASK(INT_SN_CPL) | \
|
||||
INT_MASK(INT_DOUBLE_FAULT) | \
|
||||
INT_MASK(INT_SN_STATIC_ACCESS) | \
|
||||
0)
|
||||
#define MASKABLE_INTERRUPTS ( \
|
||||
INT_MASK(INT_MEM_ERROR) | \
|
||||
INT_MASK(INT_IDN_REFILL) | \
|
||||
INT_MASK(INT_UDN_REFILL) | \
|
||||
INT_MASK(INT_IDN_COMPLETE) | \
|
||||
INT_MASK(INT_UDN_COMPLETE) | \
|
||||
INT_MASK(INT_DMATLB_MISS) | \
|
||||
INT_MASK(INT_DMATLB_ACCESS) | \
|
||||
INT_MASK(INT_SNITLB_MISS) | \
|
||||
INT_MASK(INT_SN_NOTIFY) | \
|
||||
INT_MASK(INT_SN_FIREWALL) | \
|
||||
INT_MASK(INT_IDN_FIREWALL) | \
|
||||
INT_MASK(INT_UDN_FIREWALL) | \
|
||||
INT_MASK(INT_TILE_TIMER) | \
|
||||
INT_MASK(INT_IDN_TIMER) | \
|
||||
INT_MASK(INT_UDN_TIMER) | \
|
||||
INT_MASK(INT_DMA_NOTIFY) | \
|
||||
INT_MASK(INT_IDN_CA) | \
|
||||
INT_MASK(INT_UDN_CA) | \
|
||||
INT_MASK(INT_IDN_AVAIL) | \
|
||||
INT_MASK(INT_UDN_AVAIL) | \
|
||||
INT_MASK(INT_PERF_COUNT) | \
|
||||
INT_MASK(INT_INTCTRL_3) | \
|
||||
INT_MASK(INT_INTCTRL_2) | \
|
||||
INT_MASK(INT_INTCTRL_1) | \
|
||||
INT_MASK(INT_INTCTRL_0) | \
|
||||
INT_MASK(INT_AUX_PERF_COUNT) | \
|
||||
0)
|
||||
#define UNMASKABLE_INTERRUPTS ( \
|
||||
INT_MASK(INT_ITLB_MISS) | \
|
||||
INT_MASK(INT_ILL) | \
|
||||
INT_MASK(INT_GPV) | \
|
||||
INT_MASK(INT_SN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_ACCESS) | \
|
||||
INT_MASK(INT_UDN_ACCESS) | \
|
||||
INT_MASK(INT_SWINT_3) | \
|
||||
INT_MASK(INT_SWINT_2) | \
|
||||
INT_MASK(INT_SWINT_1) | \
|
||||
INT_MASK(INT_SWINT_0) | \
|
||||
INT_MASK(INT_UNALIGN_DATA) | \
|
||||
INT_MASK(INT_DTLB_MISS) | \
|
||||
INT_MASK(INT_DTLB_ACCESS) | \
|
||||
INT_MASK(INT_BOOT_ACCESS) | \
|
||||
INT_MASK(INT_WORLD_ACCESS) | \
|
||||
INT_MASK(INT_I_ASID) | \
|
||||
INT_MASK(INT_D_ASID) | \
|
||||
INT_MASK(INT_DMA_ASID) | \
|
||||
INT_MASK(INT_SNI_ASID) | \
|
||||
INT_MASK(INT_DMA_CPL) | \
|
||||
INT_MASK(INT_SN_CPL) | \
|
||||
INT_MASK(INT_DOUBLE_FAULT) | \
|
||||
INT_MASK(INT_SN_STATIC_ACCESS) | \
|
||||
0)
|
||||
#define SYNC_INTERRUPTS ( \
|
||||
INT_MASK(INT_ITLB_MISS) | \
|
||||
INT_MASK(INT_ILL) | \
|
||||
INT_MASK(INT_GPV) | \
|
||||
INT_MASK(INT_SN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_ACCESS) | \
|
||||
INT_MASK(INT_UDN_ACCESS) | \
|
||||
INT_MASK(INT_IDN_REFILL) | \
|
||||
INT_MASK(INT_UDN_REFILL) | \
|
||||
INT_MASK(INT_IDN_COMPLETE) | \
|
||||
INT_MASK(INT_UDN_COMPLETE) | \
|
||||
INT_MASK(INT_SWINT_3) | \
|
||||
INT_MASK(INT_SWINT_2) | \
|
||||
INT_MASK(INT_SWINT_1) | \
|
||||
INT_MASK(INT_SWINT_0) | \
|
||||
INT_MASK(INT_UNALIGN_DATA) | \
|
||||
INT_MASK(INT_DTLB_MISS) | \
|
||||
INT_MASK(INT_DTLB_ACCESS) | \
|
||||
INT_MASK(INT_SN_STATIC_ACCESS) | \
|
||||
0)
|
||||
#define NON_SYNC_INTERRUPTS ( \
|
||||
INT_MASK(INT_MEM_ERROR) | \
|
||||
INT_MASK(INT_DMATLB_MISS) | \
|
||||
INT_MASK(INT_DMATLB_ACCESS) | \
|
||||
INT_MASK(INT_SNITLB_MISS) | \
|
||||
INT_MASK(INT_SN_NOTIFY) | \
|
||||
INT_MASK(INT_SN_FIREWALL) | \
|
||||
INT_MASK(INT_IDN_FIREWALL) | \
|
||||
INT_MASK(INT_UDN_FIREWALL) | \
|
||||
INT_MASK(INT_TILE_TIMER) | \
|
||||
INT_MASK(INT_IDN_TIMER) | \
|
||||
INT_MASK(INT_UDN_TIMER) | \
|
||||
INT_MASK(INT_DMA_NOTIFY) | \
|
||||
INT_MASK(INT_IDN_CA) | \
|
||||
INT_MASK(INT_UDN_CA) | \
|
||||
INT_MASK(INT_IDN_AVAIL) | \
|
||||
INT_MASK(INT_UDN_AVAIL) | \
|
||||
INT_MASK(INT_PERF_COUNT) | \
|
||||
INT_MASK(INT_INTCTRL_3) | \
|
||||
INT_MASK(INT_INTCTRL_2) | \
|
||||
INT_MASK(INT_INTCTRL_1) | \
|
||||
INT_MASK(INT_INTCTRL_0) | \
|
||||
INT_MASK(INT_BOOT_ACCESS) | \
|
||||
INT_MASK(INT_WORLD_ACCESS) | \
|
||||
INT_MASK(INT_I_ASID) | \
|
||||
INT_MASK(INT_D_ASID) | \
|
||||
INT_MASK(INT_DMA_ASID) | \
|
||||
INT_MASK(INT_SNI_ASID) | \
|
||||
INT_MASK(INT_DMA_CPL) | \
|
||||
INT_MASK(INT_SN_CPL) | \
|
||||
INT_MASK(INT_DOUBLE_FAULT) | \
|
||||
INT_MASK(INT_AUX_PERF_COUNT) | \
|
||||
0)
|
||||
#endif /* !__ARCH_INTERRUPTS_H__ */
|
|
@ -0,0 +1,512 @@
|
|||
// Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License
|
||||
// as published by the Free Software Foundation, version 2.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but
|
||||
// WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
// NON INFRINGEMENT. See the GNU General Public License for
|
||||
// more details.
|
||||
|
||||
//! @file
|
||||
//!
|
||||
//! Some low-level simulator definitions.
|
||||
//!
|
||||
|
||||
#ifndef __ARCH_SIM_DEF_H__
|
||||
#define __ARCH_SIM_DEF_H__
|
||||
|
||||
|
||||
//! Internal: the low bits of the SIM_CONTROL_* SPR values specify
|
||||
//! the operation to perform, and the remaining bits are
|
||||
//! an operation-specific parameter (often unused).
|
||||
//!
|
||||
#define _SIM_CONTROL_OPERATOR_BITS 8
|
||||
|
||||
|
||||
//== Values which can be written to SPR_SIM_CONTROL.
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, stops profiling.
|
||||
//!
|
||||
#define SIM_CONTROL_PROFILER_DISABLE 0
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, starts profiling.
|
||||
//!
|
||||
#define SIM_CONTROL_PROFILER_ENABLE 1
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, clears profiling counters.
|
||||
//!
|
||||
#define SIM_CONTROL_PROFILER_CLEAR 2
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, checkpoints the simulator.
|
||||
//!
|
||||
#define SIM_CONTROL_CHECKPOINT 3
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
|
||||
//! sets the tracing mask to the given mask. See "sim_set_tracing()".
|
||||
//!
|
||||
#define SIM_CONTROL_SET_TRACING 4
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
|
||||
//! dumps the requested items of machine state to the log.
|
||||
//!
|
||||
#define SIM_CONTROL_DUMP 5
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, clears chip-level profiling counters.
|
||||
//!
|
||||
#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, disables chip-level profiling.
|
||||
//!
|
||||
#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables chip-level profiling.
|
||||
//!
|
||||
#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables chip-level functional mode
|
||||
//!
|
||||
#define SIM_CONTROL_ENABLE_FUNCTIONAL 9
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, disables chip-level functional mode.
|
||||
//!
|
||||
#define SIM_CONTROL_DISABLE_FUNCTIONAL 10
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables chip-level functional mode.
|
||||
//! All tiles must perform this write for functional mode to be enabled.
|
||||
//! Ignored in naked boot mode unless --functional is specified.
|
||||
//! WARNING: Only the hypervisor startup code should use this!
|
||||
//!
|
||||
#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! writes a string directly to the simulator output. Written to once for
|
||||
//! each character in the string, plus a final NUL. Instead of NUL,
|
||||
//! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
|
||||
//!
|
||||
// ISSUE: Document the meaning of "newline", and the handling of NUL.
|
||||
//
|
||||
#define SIM_CONTROL_PUTC 12
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
|
||||
//! this core. This is intended to be used before a loop that will
|
||||
//! invalidate the cache by loading new data and evicting all current data.
|
||||
//! Generally speaking, this API should only be used by system code.
|
||||
//!
|
||||
#define SIM_CONTROL_GRINDER_CLEAR 13
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, shuts down the simulator.
|
||||
//!
|
||||
#define SIM_CONTROL_SHUTDOWN 14
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! indicates that a fork syscall just created the given process.
|
||||
//!
|
||||
#define SIM_CONTROL_OS_FORK 15
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! indicates that an exit syscall was just executed by the given process.
|
||||
//!
|
||||
#define SIM_CONTROL_OS_EXIT 16
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! indicates that the OS just switched to the given process.
|
||||
//!
|
||||
#define SIM_CONTROL_OS_SWITCH 17
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that an exec syscall was just executed. Written to once for
|
||||
//! each character in the executable name, plus a final NUL.
|
||||
//!
|
||||
#define SIM_CONTROL_OS_EXEC 18
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that an interpreter (PT_INTERP) was loaded. Written to once
|
||||
//! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
|
||||
//! hex load address starting with "0x", and "PATH" is the executable name.
|
||||
//!
|
||||
#define SIM_CONTROL_OS_INTERP 19
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that a dll was loaded. Written to once for each character
|
||||
//! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
|
||||
//! address starting with "0x", and "PATH" is the executable name.
|
||||
//!
|
||||
#define SIM_CONTROL_DLOPEN 20
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
|
||||
//! indicates that a dll was unloaded. Written to once for each character
|
||||
//! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
|
||||
//! address starting with "0x".
|
||||
//!
|
||||
#define SIM_CONTROL_DLCLOSE 21
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
|
||||
//! indicates whether to allow data reads to remotely-cached
|
||||
//! dirty cache lines to be cached locally without grinder warnings or
|
||||
//! assertions (used by Linux kernel fast memcpy).
|
||||
//!
|
||||
#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, enables memory tracing.
|
||||
//!
|
||||
#define SIM_CONTROL_ENABLE_MEM_LOGGING 23
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, disables memory tracing.
|
||||
//!
|
||||
#define SIM_CONTROL_DISABLE_MEM_LOGGING 24
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
|
||||
//! the gbe or xgbe shims. Must specify the shim id, the type, the units, and
|
||||
//! the rate, as defined in SIM_SHAPING_SPR_ARG.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING 25
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
|
||||
//! requests that a simulator command be executed. Written to once for each
|
||||
//! character in the command, plus a final NUL.
|
||||
//!
|
||||
#define SIM_CONTROL_COMMAND 26
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, indicates that the simulated system
|
||||
//! is panicking, to allow debugging via --debug-on-panic.
|
||||
//!
|
||||
#define SIM_CONTROL_PANIC 27
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, triggers a simulator syscall.
|
||||
//! See "sim_syscall()" for more info.
|
||||
//!
|
||||
#define SIM_CONTROL_SYSCALL 32
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
|
||||
//! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
|
||||
//! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
|
||||
//!
|
||||
#define SIM_CONTROL_OS_FORK_PARENT 33
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
//! (shifted by 8), clears the pending magic data section. The cleared
|
||||
//! pending magic data section and any subsequently appended magic bytes
|
||||
//! will only take effect when the classifier blast programmer is run.
|
||||
#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
//! (shifted by 8) and a byte of data (shifted by 16), appends that byte
|
||||
//! to the shim's pending magic data section. The pending magic data
|
||||
//! section takes effect when the classifier blast programmer is run.
|
||||
#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
|
||||
|
||||
//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
|
||||
//! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
|
||||
//! mask of links (shifted by 32), enable or disable the corresponding
|
||||
//! mPIPE links.
|
||||
#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
|
||||
|
||||
//== Syscall numbers for use with "sim_syscall()".
|
||||
|
||||
//! Syscall number for sim_add_watchpoint().
|
||||
//!
|
||||
#define SIM_SYSCALL_ADD_WATCHPOINT 2
|
||||
|
||||
//! Syscall number for sim_remove_watchpoint().
|
||||
//!
|
||||
#define SIM_SYSCALL_REMOVE_WATCHPOINT 3
|
||||
|
||||
//! Syscall number for sim_query_watchpoint().
|
||||
//!
|
||||
#define SIM_SYSCALL_QUERY_WATCHPOINT 4
|
||||
|
||||
//! Syscall number that asserts that the cache lines whose 64-bit PA
|
||||
//! is passed as the second argument to sim_syscall(), and over a
|
||||
//! range passed as the third argument, are no longer in cache.
|
||||
//! The simulator raises an error if this is not the case.
|
||||
//!
|
||||
#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
|
||||
|
||||
|
||||
//== Bit masks which can be shifted by 8, combined with
|
||||
//== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
|
||||
//! Enable --trace-cycle when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_CYCLES 0x01
|
||||
|
||||
//! Enable --trace-router when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_ROUTER 0x02
|
||||
|
||||
//! Enable --trace-register-writes when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_REGISTER_WRITES 0x04
|
||||
|
||||
//! Enable --trace-disasm when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_DISASM 0x08
|
||||
|
||||
//! Enable --trace-stall-info when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_STALL_INFO 0x10
|
||||
|
||||
//! Enable --trace-memory-controller when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_MEMORY_CONTROLLER 0x20
|
||||
|
||||
//! Enable --trace-l2 when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_L2_CACHE 0x40
|
||||
|
||||
//! Enable --trace-lines when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_LINES 0x80
|
||||
|
||||
//! Turn off all tracing when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_NONE 0
|
||||
|
||||
//! Turn on all tracing when passed to simulator_set_tracing().
|
||||
//!
|
||||
#define SIM_TRACE_ALL (-1)
|
||||
|
||||
//! @}
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to set tracing flags.
|
||||
//!
|
||||
#define SIM_TRACE_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
|
||||
//== Bit masks which can be shifted by 8, combined with
|
||||
//== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
|
||||
//! Dump the general-purpose registers.
|
||||
//!
|
||||
#define SIM_DUMP_REGS 0x001
|
||||
|
||||
//! Dump the SPRs.
|
||||
//!
|
||||
#define SIM_DUMP_SPRS 0x002
|
||||
|
||||
//! Dump the ITLB.
|
||||
//!
|
||||
#define SIM_DUMP_ITLB 0x004
|
||||
|
||||
//! Dump the DTLB.
|
||||
//!
|
||||
#define SIM_DUMP_DTLB 0x008
|
||||
|
||||
//! Dump the L1 I-cache.
|
||||
//!
|
||||
#define SIM_DUMP_L1I 0x010
|
||||
|
||||
//! Dump the L1 D-cache.
|
||||
//!
|
||||
#define SIM_DUMP_L1D 0x020
|
||||
|
||||
//! Dump the L2 cache.
|
||||
//!
|
||||
#define SIM_DUMP_L2 0x040
|
||||
|
||||
//! Dump the switch registers.
|
||||
//!
|
||||
#define SIM_DUMP_SNREGS 0x080
|
||||
|
||||
//! Dump the switch ITLB.
|
||||
//!
|
||||
#define SIM_DUMP_SNITLB 0x100
|
||||
|
||||
//! Dump the switch L1 I-cache.
|
||||
//!
|
||||
#define SIM_DUMP_SNL1I 0x200
|
||||
|
||||
//! Dump the current backtrace.
|
||||
//!
|
||||
#define SIM_DUMP_BACKTRACE 0x400
|
||||
|
||||
//! Only dump valid lines in caches.
|
||||
//!
|
||||
#define SIM_DUMP_VALID_LINES 0x800
|
||||
|
||||
//! Dump everything that is dumpable.
|
||||
//!
|
||||
#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
|
||||
|
||||
// @}
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to dump machine state.
|
||||
//!
|
||||
#define SIM_DUMP_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
|
||||
//== Bit masks which can be shifted by 8, combined with
|
||||
//== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers.
|
||||
//!
|
||||
#define SIM_CHIP_MEMCTL 0x001
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface.
|
||||
//!
|
||||
#define SIM_CHIP_XAUI 0x002
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface.
|
||||
//!
|
||||
#define SIM_CHIP_PCIE 0x004
|
||||
|
||||
//! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface.
|
||||
//!
|
||||
#define SIM_CHIP_MPIPE 0x008
|
||||
|
||||
//! Reference all chip devices.
|
||||
//!
|
||||
#define SIM_CHIP_ALL (-1)
|
||||
|
||||
//! @}
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics.
|
||||
//!
|
||||
#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.
|
||||
//!
|
||||
#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics.
|
||||
//!
|
||||
#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
|
||||
(SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
|
||||
|
||||
|
||||
|
||||
// Shim bitrate controls.
|
||||
|
||||
//! The number of bits used to store the shim id.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
|
||||
|
||||
//! @addtogroup arch_sim
|
||||
//! @{
|
||||
|
||||
//! Change the gbe 0 bitrate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_GBE_0 0x0
|
||||
|
||||
//! Change the gbe 1 bitrate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_GBE_1 0x1
|
||||
|
||||
//! Change the gbe 2 bitrate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_GBE_2 0x2
|
||||
|
||||
//! Change the gbe 3 bitrate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_GBE_3 0x3
|
||||
|
||||
//! Change the xgbe 0 bitrate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_XGBE_0 0x4
|
||||
|
||||
//! Change the xgbe 1 bitrate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_XGBE_1 0x5
|
||||
|
||||
//! The type of shaping to do.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_TYPE_BITS 2
|
||||
|
||||
//! Control the multiplier.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_MULTIPLIER 0
|
||||
|
||||
//! Control the PPS.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_PPS 1
|
||||
|
||||
//! Control the BPS.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_BPS 2
|
||||
|
||||
//! The number of bits for the units for the shaping parameter.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_UNITS_BITS 2
|
||||
|
||||
//! Provide a number in single units.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
|
||||
|
||||
//! Provide a number in kilo units.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_UNITS_KILO 1
|
||||
|
||||
//! Provide a number in mega units.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_UNITS_MEGA 2
|
||||
|
||||
//! Provide a number in giga units.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_UNITS_GIGA 3
|
||||
|
||||
// @}
|
||||
|
||||
//! How many bits are available for the rate.
|
||||
//!
|
||||
#define SIM_CONTROL_SHAPING_RATE_BITS \
|
||||
(32 - (_SIM_CONTROL_OPERATOR_BITS + \
|
||||
SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
|
||||
SIM_CONTROL_SHAPING_TYPE_BITS + \
|
||||
SIM_CONTROL_SHAPING_UNITS_BITS))
|
||||
|
||||
//! Computes the value to write to SPR_SIM_CONTROL to change a bitrate.
|
||||
//!
|
||||
#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
|
||||
(SIM_CONTROL_SHAPING | \
|
||||
((shim) | \
|
||||
((type) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS)) | \
|
||||
((units) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
|
||||
SIM_CONTROL_SHAPING_TYPE_BITS)) | \
|
||||
((rate) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
|
||||
SIM_CONTROL_SHAPING_TYPE_BITS + \
|
||||
SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
|
||||
|
||||
|
||||
//== Values returned when reading SPR_SIM_CONTROL.
|
||||
// ISSUE: These names should share a longer common prefix.
|
||||
|
||||
//! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
|
||||
//! (SIM_TRACE_xxx values).
|
||||
//!
|
||||
#define SIM_TRACE_FLAG_MASK 0xFFFF
|
||||
|
||||
//! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled.
|
||||
//!
|
||||
#define SIM_PROFILER_ENABLED_MASK 0x10000
|
||||
|
||||
|
||||
//== Special arguments for "SIM_CONTROL_PUTC".
|
||||
|
||||
//! Flag value for forcing a PUTC string-flush, including
|
||||
//! coordinate/cycle prefix and newline.
|
||||
//!
|
||||
#define SIM_PUTC_FLUSH_STRING 0x100
|
||||
|
||||
//! Flag value for forcing a PUTC binary-data-flush, which skips the
|
||||
//! prefix and does not append a newline.
|
||||
//!
|
||||
#define SIM_PUTC_FLUSH_BINARY 0x101
|
||||
|
||||
|
||||
#endif //__ARCH_SIM_DEF_H__
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <arch/spr_def_64.h>
|
||||
#else
|
||||
#include <arch/spr_def_32.h>
|
||||
#endif
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __DOXYGEN__
|
||||
|
||||
#ifndef __ARCH_SPR_DEF_H__
|
||||
#define __ARCH_SPR_DEF_H__
|
||||
|
||||
#define SPR_AUX_PERF_COUNT_0 0x6005
|
||||
#define SPR_AUX_PERF_COUNT_1 0x6006
|
||||
#define SPR_AUX_PERF_COUNT_CTL 0x6007
|
||||
#define SPR_AUX_PERF_COUNT_STS 0x6008
|
||||
#define SPR_CYCLE_HIGH 0x4e06
|
||||
#define SPR_CYCLE_LOW 0x4e07
|
||||
#define SPR_DMA_BYTE 0x3900
|
||||
#define SPR_DMA_CHUNK_SIZE 0x3901
|
||||
#define SPR_DMA_CTR 0x3902
|
||||
#define SPR_DMA_CTR__REQUEST_MASK 0x1
|
||||
#define SPR_DMA_CTR__SUSPEND_MASK 0x2
|
||||
#define SPR_DMA_DST_ADDR 0x3903
|
||||
#define SPR_DMA_DST_CHUNK_ADDR 0x3904
|
||||
#define SPR_DMA_SRC_ADDR 0x3905
|
||||
#define SPR_DMA_SRC_CHUNK_ADDR 0x3906
|
||||
#define SPR_DMA_STATUS__DONE_MASK 0x1
|
||||
#define SPR_DMA_STATUS__BUSY_MASK 0x2
|
||||
#define SPR_DMA_STATUS__RUNNING_MASK 0x10
|
||||
#define SPR_DMA_STRIDE 0x3907
|
||||
#define SPR_DMA_USER_STATUS 0x3908
|
||||
#define SPR_DONE 0x4e08
|
||||
#define SPR_EVENT_BEGIN 0x4e0d
|
||||
#define SPR_EVENT_END 0x4e0e
|
||||
#define SPR_EX_CONTEXT_0_0 0x4a05
|
||||
#define SPR_EX_CONTEXT_0_1 0x4a06
|
||||
#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
|
||||
#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
|
||||
#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
|
||||
#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
|
||||
#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
|
||||
#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
|
||||
#define SPR_EX_CONTEXT_1_0 0x4805
|
||||
#define SPR_EX_CONTEXT_1_1 0x4806
|
||||
#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
|
||||
#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
|
||||
#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
|
||||
#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
|
||||
#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
|
||||
#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
|
||||
#define SPR_FAIL 0x4e09
|
||||
#define SPR_INTCTRL_0_STATUS 0x4a07
|
||||
#define SPR_INTCTRL_1_STATUS 0x4807
|
||||
#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
|
||||
#define SPR_INTERRUPT_MASK_0_0 0x4a08
|
||||
#define SPR_INTERRUPT_MASK_0_1 0x4a09
|
||||
#define SPR_INTERRUPT_MASK_1_0 0x4809
|
||||
#define SPR_INTERRUPT_MASK_1_1 0x480a
|
||||
#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
|
||||
#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
|
||||
#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
|
||||
#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
|
||||
#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
|
||||
#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
|
||||
#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
|
||||
#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
|
||||
#define SPR_MPL_DMA_CPL_SET_0 0x5800
|
||||
#define SPR_MPL_DMA_CPL_SET_1 0x5801
|
||||
#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
|
||||
#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
|
||||
#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
|
||||
#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
|
||||
#define SPR_MPL_INTCTRL_1_SET_0 0x4800
|
||||
#define SPR_MPL_INTCTRL_1_SET_1 0x4801
|
||||
#define SPR_MPL_SN_ACCESS_SET_0 0x0800
|
||||
#define SPR_MPL_SN_ACCESS_SET_1 0x0801
|
||||
#define SPR_MPL_SN_CPL_SET_0 0x5a00
|
||||
#define SPR_MPL_SN_CPL_SET_1 0x5a01
|
||||
#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
|
||||
#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
|
||||
#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
|
||||
#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
|
||||
#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
|
||||
#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
|
||||
#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
|
||||
#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
|
||||
#define SPR_MPL_UDN_CA_SET_0 0x3c00
|
||||
#define SPR_MPL_UDN_CA_SET_1 0x3c01
|
||||
#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
|
||||
#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
|
||||
#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
|
||||
#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
|
||||
#define SPR_MPL_UDN_REFILL_SET_0 0x1000
|
||||
#define SPR_MPL_UDN_REFILL_SET_1 0x1001
|
||||
#define SPR_MPL_UDN_TIMER_SET_0 0x3600
|
||||
#define SPR_MPL_UDN_TIMER_SET_1 0x3601
|
||||
#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
|
||||
#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
|
||||
#define SPR_PASS 0x4e0b
|
||||
#define SPR_PERF_COUNT_0 0x4205
|
||||
#define SPR_PERF_COUNT_1 0x4206
|
||||
#define SPR_PERF_COUNT_CTL 0x4207
|
||||
#define SPR_PERF_COUNT_STS 0x4208
|
||||
#define SPR_PROC_STATUS 0x4f00
|
||||
#define SPR_SIM_CONTROL 0x4e0c
|
||||
#define SPR_SNCTL 0x0805
|
||||
#define SPR_SNCTL__FRZFABRIC_MASK 0x1
|
||||
#define SPR_SNCTL__FRZPROC_MASK 0x2
|
||||
#define SPR_SNPC 0x080b
|
||||
#define SPR_SNSTATIC 0x080c
|
||||
#define SPR_SYSTEM_SAVE_0_0 0x4b00
|
||||
#define SPR_SYSTEM_SAVE_0_1 0x4b01
|
||||
#define SPR_SYSTEM_SAVE_0_2 0x4b02
|
||||
#define SPR_SYSTEM_SAVE_0_3 0x4b03
|
||||
#define SPR_SYSTEM_SAVE_1_0 0x4900
|
||||
#define SPR_SYSTEM_SAVE_1_1 0x4901
|
||||
#define SPR_SYSTEM_SAVE_1_2 0x4902
|
||||
#define SPR_SYSTEM_SAVE_1_3 0x4903
|
||||
#define SPR_TILE_COORD 0x4c17
|
||||
#define SPR_TILE_RTF_HWM 0x4e10
|
||||
#define SPR_TILE_TIMER_CONTROL 0x3205
|
||||
#define SPR_TILE_WRITE_PENDING 0x4e0f
|
||||
#define SPR_UDN_AVAIL_EN 0x4005
|
||||
#define SPR_UDN_CA_DATA 0x0d00
|
||||
#define SPR_UDN_DATA_AVAIL 0x0d03
|
||||
#define SPR_UDN_DEADLOCK_TIMEOUT 0x3606
|
||||
#define SPR_UDN_DEMUX_CA_COUNT 0x0c05
|
||||
#define SPR_UDN_DEMUX_COUNT_0 0x0c06
|
||||
#define SPR_UDN_DEMUX_COUNT_1 0x0c07
|
||||
#define SPR_UDN_DEMUX_COUNT_2 0x0c08
|
||||
#define SPR_UDN_DEMUX_COUNT_3 0x0c09
|
||||
#define SPR_UDN_DEMUX_CTL 0x0c0a
|
||||
#define SPR_UDN_DEMUX_QUEUE_SEL 0x0c0c
|
||||
#define SPR_UDN_DEMUX_STATUS 0x0c0d
|
||||
#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
|
||||
#define SPR_UDN_DIRECTION_PROTECT 0x3005
|
||||
#define SPR_UDN_REFILL_EN 0x1005
|
||||
#define SPR_UDN_SP_FIFO_DATA 0x0c11
|
||||
#define SPR_UDN_SP_FIFO_SEL 0x0c12
|
||||
#define SPR_UDN_SP_FREEZE 0x0c13
|
||||
#define SPR_UDN_SP_FREEZE__SP_FRZ_MASK 0x1
|
||||
#define SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
|
||||
#define SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
|
||||
#define SPR_UDN_SP_STATE 0x0c14
|
||||
#define SPR_UDN_TAG_0 0x0c15
|
||||
#define SPR_UDN_TAG_1 0x0c16
|
||||
#define SPR_UDN_TAG_2 0x0c17
|
||||
#define SPR_UDN_TAG_3 0x0c18
|
||||
#define SPR_UDN_TAG_VALID 0x0c19
|
||||
#define SPR_UDN_TILE_COORD 0x0c1a
|
||||
|
||||
#endif /* !defined(__ARCH_SPR_DEF_H__) */
|
||||
|
||||
#endif /* !defined(__DOXYGEN__) */
|
|
@ -0,0 +1,3 @@
|
|||
include include/asm-generic/Kbuild.asm
|
||||
|
||||
header-y += ucontext.h
|
|
@ -0,0 +1 @@
|
|||
#include <generated/asm-offsets.h>
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Atomic primitives.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ATOMIC_H
|
||||
#define _ASM_TILE_ATOMIC_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
/**
|
||||
* atomic_read - read atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return v->counter;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_sub_return - subtract integer and return
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to subtract
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns @v - @i
|
||||
*/
|
||||
#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
|
||||
|
||||
/**
|
||||
* atomic_sub - subtract integer from atomic variable
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v.
|
||||
*/
|
||||
#define atomic_sub(i, v) atomic_add((int)(-(i)), (v))
|
||||
|
||||
/**
|
||||
* atomic_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns true if the result is
|
||||
* zero, or false for all other cases.
|
||||
*/
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
|
||||
|
||||
/**
|
||||
* atomic_inc_return - increment memory and return
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1 and returns the new value.
|
||||
*/
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_dec_return - decrement memory and return
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and returns the new value.
|
||||
*/
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_inc - increment atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1.
|
||||
*/
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_dec - decrement atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1.
|
||||
*/
|
||||
#define atomic_dec(v) atomic_sub(1, (v))
|
||||
|
||||
/**
|
||||
* atomic_dec_and_test - decrement and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and returns true if the result is 0.
|
||||
*/
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
|
||||
/**
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1 and returns true if the result is 0.
|
||||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
/**
|
||||
* atomic_add_negative - add and test if negative
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to add
|
||||
*
|
||||
* Atomically adds @i to @v and returns true if the result is
|
||||
* negative, or false when result is greater than or equal to zero.
|
||||
*/
|
||||
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
|
||||
|
||||
/**
|
||||
* atomic_inc_not_zero - increment unless the number is zero
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1, so long as @v is non-zero.
|
||||
* Returns non-zero if @v was non-zero, and zero otherwise.
|
||||
*/
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
|
||||
/*
|
||||
* We define xchg() and cmpxchg() in the included headers.
|
||||
* Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply
|
||||
* that cmpxchg() is an efficient operation, which is not particularly true.
|
||||
*/
|
||||
|
||||
/* Nonexistent functions intended to cause link errors. */
|
||||
extern unsigned long __xchg_called_with_bad_pointer(void);
|
||||
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
|
||||
|
||||
#define tas(ptr) (xchg((ptr), 1))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef __tilegx__
|
||||
#include <asm/atomic_32.h>
|
||||
#else
|
||||
#include <asm/atomic_64.h>
|
||||
#endif
|
||||
|
||||
/* Provide the appropriate atomic_long_t definitions. */
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm-generic/atomic-long.h>
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_ATOMIC_H */
|
|
@ -0,0 +1,370 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Do not include directly; use <asm/atomic.h>.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ATOMIC_32_H
|
||||
#define _ASM_TILE_ATOMIC_32_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Tile-specific routines to support <asm/atomic.h>. */
|
||||
int _atomic_xchg(atomic_t *v, int n);
|
||||
int _atomic_xchg_add(atomic_t *v, int i);
|
||||
int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
|
||||
int _atomic_cmpxchg(atomic_t *v, int o, int n);
|
||||
|
||||
/**
|
||||
* atomic_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline int atomic_xchg(atomic_t *v, int n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg(v, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_cmpxchg(v, o, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
_atomic_xchg_add(v, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_return - add integer and return
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to add
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg_add(v, i) + i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg_add_unless(v, a, u) != u;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_set - set atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: required value
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*
|
||||
* atomic_set() can't be just a raw store, since it would be lost if it
|
||||
* fell between the load and store of one of the other atomic ops.
|
||||
*/
|
||||
static inline void atomic_set(atomic_t *v, int n)
|
||||
{
|
||||
_atomic_xchg(v, n);
|
||||
}
|
||||
|
||||
#define xchg(ptr, x) ((typeof(*(ptr))) \
|
||||
((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
|
||||
atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
|
||||
__xchg_called_with_bad_pointer()))
|
||||
|
||||
#define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
|
||||
((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
|
||||
atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
|
||||
__cmpxchg_called_with_bad_pointer()))
|
||||
|
||||
/* A 64bit atomic type */
|
||||
|
||||
typedef struct {
|
||||
u64 __aligned(8) counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
||||
u64 _atomic64_xchg(atomic64_t *v, u64 n);
|
||||
u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
|
||||
u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
|
||||
u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic variable
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
static inline u64 atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
/*
|
||||
* Requires an atomic op to read both 32-bit parts consistently.
|
||||
* Casting away const is safe since the atomic support routines
|
||||
* do not write to memory if the value has not been modified.
|
||||
*/
|
||||
return _atomic64_xchg_add((atomic64_t *)v, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg(v, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic64_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_cmpxchg(v, o, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline void atomic64_add(u64 i, atomic64_t *v)
|
||||
{
|
||||
_atomic64_xchg_add(v, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_return - add integer and return
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: integer value to add
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add(v, i) + i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add_unless(v, a, u) != u;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_set - set atomic variable
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: required value
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*
|
||||
* atomic64_set() can't be just a raw store, since it would be lost if it
|
||||
* fell between the load and store of one of the other atomic ops.
|
||||
*/
|
||||
static inline void atomic64_set(atomic64_t *v, u64 n)
|
||||
{
|
||||
_atomic64_xchg(v, n);
|
||||
}
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
||||
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
||||
|
||||
/*
|
||||
* We need to barrier before modifying the word, since the _atomic_xxx()
|
||||
* routines just tns the lock and then read/modify/write of the word.
|
||||
* But after the word is updated, the routine issues an "mf" before returning,
|
||||
* and since it's a function call, we don't even need a compiler barrier.
|
||||
*/
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__before_atomic_inc() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() do { } while (0)
|
||||
#define smp_mb__after_atomic_inc() do { } while (0)
|
||||
|
||||
|
||||
/*
|
||||
* Support "tns" atomic integers. These are atomic integers that can
|
||||
* hold any value but "1". They are more efficient than regular atomic
|
||||
* operations because the "lock" (aka acquire) step is a single "tns"
|
||||
* in the uncontended case, and the "unlock" (aka release) step is a
|
||||
* single "store" without an mf. (However, note that on tilepro the
|
||||
* "tns" will evict the local cache line, so it's not all upside.)
|
||||
*
|
||||
* Note that you can ONLY observe the value stored in the pointer
|
||||
* using these operations; a direct read of the value may confusingly
|
||||
* return the special value "1".
|
||||
*/
|
||||
|
||||
int __tns_atomic_acquire(atomic_t *);
|
||||
void __tns_atomic_release(atomic_t *p, int v);
|
||||
|
||||
static inline void tns_atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
__tns_atomic_acquire(v);
|
||||
__tns_atomic_release(v, i);
|
||||
}
|
||||
|
||||
static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n)
|
||||
{
|
||||
int ret = __tns_atomic_acquire(v);
|
||||
__tns_atomic_release(v, (ret == o) ? n : ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int tns_atomic_xchg(atomic_t *v, int n)
|
||||
{
|
||||
int ret = __tns_atomic_acquire(v);
|
||||
__tns_atomic_release(v, n);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Internal definitions only beyond this point.
|
||||
*/
|
||||
|
||||
#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
|
||||
(!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
|
||||
|
||||
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
|
||||
/* Number of entries in atomic_lock_ptr[]. */
|
||||
#define ATOMIC_HASH_L1_SHIFT 6
|
||||
#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
|
||||
|
||||
/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
|
||||
#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
|
||||
#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
|
||||
|
||||
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/*
|
||||
* Number of atomic locks in atomic_locks[]. Must be a power of two.
|
||||
* There is no reason for more than PAGE_SIZE / 8 entries, since that
|
||||
* is the maximum number of pointer bits we can use to index this.
|
||||
* And we cannot have more than PAGE_SIZE / 4, since this has to
|
||||
* fit on a single page and each entry takes 4 bytes.
|
||||
*/
|
||||
#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
|
||||
#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern int atomic_locks[];
|
||||
#endif
|
||||
|
||||
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/*
|
||||
* All the code that may fault while holding an atomic lock must
|
||||
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
|
||||
* can correctly release and reacquire the lock. Note that we
|
||||
* mention the register number in a comment in "lib/atomic_asm.S" to help
|
||||
* assembly coders from using this register by mistake, so if it
|
||||
* is changed here, change that comment as well.
|
||||
*/
|
||||
#define ATOMIC_LOCK_REG 20
|
||||
#define ATOMIC_LOCK_REG_NAME r20
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Called from setup to initialize a hash table to point to per_cpu locks. */
|
||||
void __init_atomic_per_cpu(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Support releasing the atomic lock in do_page_fault_ics(). */
|
||||
void __atomic_fault_unlock(int *lock_ptr);
|
||||
#endif
|
||||
|
||||
/* Private helper routines in lib/atomic_asm_32.S */
|
||||
extern struct __get_user __atomic_cmpxchg(volatile int *p,
|
||||
int *lock, int o, int n);
|
||||
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
|
||||
int *lock, int o, int n);
|
||||
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
|
||||
extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
|
||||
extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
|
||||
extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
|
||||
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
|
||||
int *lock, u64 o, u64 n);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_ATOMIC_32_H */
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_AUXVEC_H
|
||||
#define _ASM_TILE_AUXVEC_H
|
||||
|
||||
/* No extensions to auxvec */
|
||||
|
||||
#endif /* _ASM_TILE_AUXVEC_H */
|
|
@ -0,0 +1,193 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _TILE_BACKTRACE_H
|
||||
#define _TILE_BACKTRACE_H
|
||||
|
||||
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#if CHIP_VA_WIDTH() > 32
|
||||
typedef unsigned long long VirtualAddress;
|
||||
#else
|
||||
typedef unsigned int VirtualAddress;
|
||||
#endif
|
||||
|
||||
|
||||
/** Reads 'size' bytes from 'address' and writes the data to 'result'.
|
||||
* Returns true if successful, else false (e.g. memory not readable).
|
||||
*/
|
||||
typedef bool (*BacktraceMemoryReader)(void *result,
|
||||
VirtualAddress address,
|
||||
unsigned int size,
|
||||
void *extra);
|
||||
|
||||
typedef struct {
|
||||
/** Current PC. */
|
||||
VirtualAddress pc;
|
||||
|
||||
/** Current stack pointer value. */
|
||||
VirtualAddress sp;
|
||||
|
||||
/** Current frame pointer value (i.e. caller's stack pointer) */
|
||||
VirtualAddress fp;
|
||||
|
||||
/** Internal use only: caller's PC for first frame. */
|
||||
VirtualAddress initial_frame_caller_pc;
|
||||
|
||||
/** Internal use only: callback to read memory. */
|
||||
BacktraceMemoryReader read_memory_func;
|
||||
|
||||
/** Internal use only: arbitrary argument to read_memory_func. */
|
||||
void *read_memory_func_extra;
|
||||
|
||||
} BacktraceIterator;
|
||||
|
||||
|
||||
/** Initializes a backtracer to start from the given location.
|
||||
*
|
||||
* If the frame pointer cannot be determined it is set to -1.
|
||||
*
|
||||
* @param state The state to be filled in.
|
||||
* @param read_memory_func A callback that reads memory. If NULL, a default
|
||||
* value is provided.
|
||||
* @param read_memory_func_extra An arbitrary argument to read_memory_func.
|
||||
* @param pc The current PC.
|
||||
* @param lr The current value of the 'lr' register.
|
||||
* @param sp The current value of the 'sp' register.
|
||||
* @param r52 The current value of the 'r52' register.
|
||||
*/
|
||||
extern void backtrace_init(BacktraceIterator *state,
|
||||
BacktraceMemoryReader read_memory_func,
|
||||
void *read_memory_func_extra,
|
||||
VirtualAddress pc, VirtualAddress lr,
|
||||
VirtualAddress sp, VirtualAddress r52);
|
||||
|
||||
|
||||
/** Advances the backtracing state to the calling frame, returning
|
||||
* true iff successful.
|
||||
*/
|
||||
extern bool backtrace_next(BacktraceIterator *state);
|
||||
|
||||
|
||||
typedef enum {
|
||||
|
||||
/* We have no idea what the caller's pc is. */
|
||||
PC_LOC_UNKNOWN,
|
||||
|
||||
/* The caller's pc is currently in lr. */
|
||||
PC_LOC_IN_LR,
|
||||
|
||||
/* The caller's pc can be found by dereferencing the caller's sp. */
|
||||
PC_LOC_ON_STACK
|
||||
|
||||
} CallerPCLocation;
|
||||
|
||||
|
||||
typedef enum {
|
||||
|
||||
/* We have no idea what the caller's sp is. */
|
||||
SP_LOC_UNKNOWN,
|
||||
|
||||
/* The caller's sp is currently in r52. */
|
||||
SP_LOC_IN_R52,
|
||||
|
||||
/* The caller's sp can be found by adding a certain constant
|
||||
* to the current value of sp.
|
||||
*/
|
||||
SP_LOC_OFFSET
|
||||
|
||||
} CallerSPLocation;
|
||||
|
||||
|
||||
/* Bit values ORed into CALLER_* values for info ops. */
|
||||
enum {
|
||||
/* Setting the low bit on any of these values means the info op
|
||||
* applies only to one bundle ago.
|
||||
*/
|
||||
ONE_BUNDLE_AGO_FLAG = 1,
|
||||
|
||||
/* Setting this bit on a CALLER_SP_* value means the PC is in LR.
|
||||
* If not set, PC is on the stack.
|
||||
*/
|
||||
PC_IN_LR_FLAG = 2,
|
||||
|
||||
/* This many of the low bits of a CALLER_SP_* value are for the
|
||||
* flag bits above.
|
||||
*/
|
||||
NUM_INFO_OP_FLAGS = 2,
|
||||
|
||||
/* We cannot have one in the memory pipe so this is the maximum. */
|
||||
MAX_INFO_OPS_PER_BUNDLE = 2
|
||||
};
|
||||
|
||||
|
||||
/** Internal constants used to define 'info' operands. */
|
||||
enum {
|
||||
/* 0 and 1 are reserved, as are all negative numbers. */
|
||||
|
||||
CALLER_UNKNOWN_BASE = 2,
|
||||
|
||||
CALLER_SP_IN_R52_BASE = 4,
|
||||
|
||||
CALLER_SP_OFFSET_BASE = 8
|
||||
};
|
||||
|
||||
|
||||
/** Current backtracer state describing where it thinks the caller is. */
|
||||
typedef struct {
|
||||
/*
|
||||
* Public fields
|
||||
*/
|
||||
|
||||
/* How do we find the caller's PC? */
|
||||
CallerPCLocation pc_location : 8;
|
||||
|
||||
/* How do we find the caller's SP? */
|
||||
CallerSPLocation sp_location : 8;
|
||||
|
||||
/* If sp_location == SP_LOC_OFFSET, then caller_sp == sp +
|
||||
* loc->sp_offset. Else this field is undefined.
|
||||
*/
|
||||
uint16_t sp_offset;
|
||||
|
||||
/* In the most recently visited bundle a terminating bundle? */
|
||||
bool at_terminating_bundle;
|
||||
|
||||
/*
|
||||
* Private fields
|
||||
*/
|
||||
|
||||
/* Will the forward scanner see someone clobbering sp
|
||||
* (i.e. changing it with something other than addi sp, sp, N?)
|
||||
*/
|
||||
bool sp_clobber_follows;
|
||||
|
||||
/* Operand to next "visible" info op (no more than one bundle past
|
||||
* the next terminating bundle), or -32768 if none.
|
||||
*/
|
||||
int16_t next_info_operand;
|
||||
|
||||
/* Is the info of in next_info_op in the very next bundle? */
|
||||
bool is_next_info_operand_adjacent;
|
||||
|
||||
} CallerLocation;
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* _TILE_BACKTRACE_H */
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright 1992, Linus Torvalds.
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITOPS_H
|
||||
#define _ASM_TILE_BITOPS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <asm/bitops_64.h>
|
||||
#else
|
||||
#include <asm/bitops_32.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __ffs - find first set bit in word
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no set bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long word)
|
||||
{
|
||||
return __builtin_ctzl(word);
|
||||
}
|
||||
|
||||
/**
|
||||
* ffz - find first zero bit in word
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no zero exists, so code should check against ~0UL first.
|
||||
*/
|
||||
static inline unsigned long ffz(unsigned long word)
|
||||
{
|
||||
return __builtin_ctzl(~word);
|
||||
}
|
||||
|
||||
/**
|
||||
* __fls - find last set bit in word
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no set bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static inline unsigned long __fls(unsigned long word)
|
||||
{
|
||||
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
|
||||
}
|
||||
|
||||
/**
|
||||
* ffs - find first set bit in word
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined the same way as the libc and compiler builtin ffs
|
||||
* routines, therefore differs in spirit from the other bitops.
|
||||
*
|
||||
* ffs(value) returns 0 if value is 0 or the position of the first
|
||||
* set bit if value is nonzero. The first (least significant) bit
|
||||
* is at position 1.
|
||||
*/
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
return __builtin_ffs(x);
|
||||
}
|
||||
|
||||
/**
|
||||
* fls - find last set bit in word
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined in a similar way as the libc and compiler builtin
|
||||
* ffs, but returns the position of the most significant set bit.
|
||||
*
|
||||
* fls(value) returns 0 if value is 0 or the position of the last
|
||||
* set bit if value is nonzero. The last (most significant) bit is
|
||||
* at position 32.
|
||||
*/
|
||||
static inline int fls(int x)
|
||||
{
|
||||
return (sizeof(int) * 8) - __builtin_clz(x);
|
||||
}
|
||||
|
||||
static inline int fls64(__u64 w)
|
||||
{
|
||||
return (sizeof(__u64) * 8) - __builtin_clzll(w);
|
||||
}
|
||||
|
||||
static inline unsigned int hweight32(unsigned int w)
|
||||
{
|
||||
return __builtin_popcount(w);
|
||||
}
|
||||
|
||||
static inline unsigned int hweight16(unsigned int w)
|
||||
{
|
||||
return __builtin_popcount(w & 0xffff);
|
||||
}
|
||||
|
||||
static inline unsigned int hweight8(unsigned int w)
|
||||
{
|
||||
return __builtin_popcount(w & 0xff);
|
||||
}
|
||||
|
||||
static inline unsigned long hweight64(__u64 w)
|
||||
{
|
||||
return __builtin_popcountll(w);
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
#include <asm-generic/bitops/ext2-non-atomic.h>
|
||||
#include <asm-generic/bitops/minix.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITOPS_H */
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITOPS_32_H
|
||||
#define _ASM_TILE_BITOPS_32_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/* Tile-specific routines to support <asm/bitops.h>. */
|
||||
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
|
||||
|
||||
/**
|
||||
* set_bit - Atomically set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* This function is atomic and may not be reordered.
|
||||
* See __set_bit() if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bit - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* clear_bit() is atomic and may not be reordered.
|
||||
* See __clear_bit() if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*
|
||||
* clear_bit() may not contain a memory barrier, so if it is used for
|
||||
* locking purposes, you should call smp_mb__before_clear_bit() and/or
|
||||
* smp_mb__after_clear_bit() to ensure changes are visible on other cpus.
|
||||
*/
|
||||
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
* change_bit - Toggle a bit in memory
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* change_bit() is atomic and may not be reordered.
|
||||
* See __change_bit() if you do not require the atomic guarantees.
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_or(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_andn(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_change_bit(unsigned nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_xor(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() do {} while (0)
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
#include <asm-generic/bitops/ext2-atomic.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITOPS_32_H */
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITSPERLONG_H
|
||||
#define _ASM_TILE_BITSPERLONG_H
|
||||
|
||||
#ifdef __LP64__
|
||||
# define __BITS_PER_LONG 64
|
||||
#else
|
||||
# define __BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITSPERLONG_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/bug.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/bugs.h>
|
|
@ -0,0 +1 @@
|
|||
#include <linux/byteorder/little_endian.h>
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CACHE_H
|
||||
#define _ASM_TILE_CACHE_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* bytes per L1 data cache line */
|
||||
#define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
/* bytes per L2 cache line */
|
||||
#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
|
||||
#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
|
||||
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
|
||||
|
||||
/*
|
||||
* TILE-Gx is fully coherents so we don't need to define
|
||||
* ARCH_KMALLOC_MINALIGN.
|
||||
*/
|
||||
#ifndef __tilegx__
|
||||
#define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES
|
||||
#endif
|
||||
|
||||
/* use the cache line size for the L2, which is where it counts */
|
||||
#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
|
||||
#define SMP_CACHE_BYTES L2_CACHE_BYTES
|
||||
#define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT
|
||||
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
|
||||
|
||||
/* Group together read-mostly things to avoid cache false sharing */
|
||||
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
|
||||
|
||||
/*
|
||||
* Attribute for data that is kept read/write coherent until the end of
|
||||
* initialization, then bumped to read/only incoherent for performance.
|
||||
*/
|
||||
#define __write_once __attribute__((__section__(".w1data")))
|
||||
|
||||
#endif /* _ASM_TILE_CACHE_H */
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CACHEFLUSH_H
|
||||
#define _ASM_TILE_CACHEFLUSH_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* Keep includes the same across arches. */
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cache.h>
|
||||
#include <asm/system.h>
|
||||
#include <arch/icache.h>
|
||||
|
||||
/* Caches are physically-indexed and so don't need special treatment */
|
||||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma, pg) do { } while (0)
|
||||
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
|
||||
|
||||
/* Flush the icache just on this cpu */
|
||||
extern void __flush_icache_range(unsigned long start, unsigned long end);
|
||||
|
||||
/* Flush the entire icache on this cpu. */
|
||||
#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE())
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* When the kernel writes to its own text we need to do an SMP
|
||||
* broadcast to make the L1I coherent everywhere. This includes
|
||||
* module load and single step.
|
||||
*/
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#else
|
||||
#define flush_icache_range __flush_icache_range
|
||||
#endif
|
||||
|
||||
/*
|
||||
* An update to an executable user page requires icache flushing.
|
||||
* We could carefully update only tiles that are running this process,
|
||||
* and rely on the fact that we flush the icache on every context
|
||||
* switch to avoid doing extra work here. But for now, I'll be
|
||||
* conservative and just do a global icache flush.
|
||||
*/
|
||||
static inline void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr,
|
||||
void *dst, void *src, int len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
if (vma->vm_flags & VM_EXEC) {
|
||||
flush_icache_range((unsigned long) dst,
|
||||
(unsigned long) dst + len);
|
||||
}
|
||||
}
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
|
||||
/*
|
||||
* Invalidate a VA range; pads to L2 cacheline boundaries.
|
||||
*
|
||||
* Note that on TILE64, __inv_buffer() actually flushes modified
|
||||
* cache lines in addition to invalidating them, i.e., it's the
|
||||
* same as __finv_buffer().
|
||||
*/
|
||||
static inline void __inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_inv(next);
|
||||
next += CHIP_INV_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __flush_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_flush(next);
|
||||
next += CHIP_FLUSH_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __finv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_finv(next);
|
||||
next += CHIP_FINV_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Invalidate a VA range, then memory fence. */
|
||||
static inline void inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
__inv_buffer(buffer, size);
|
||||
mb_incoherent();
|
||||
}
|
||||
|
||||
/* Flush a VA range, then memory fence. */
|
||||
static inline void flush_buffer(void *buffer, size_t size)
|
||||
{
|
||||
__flush_buffer(buffer, size);
|
||||
mb_incoherent();
|
||||
}
|
||||
|
||||
/* Flush & invalidate a VA range, then memory fence. */
|
||||
static inline void finv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
__finv_buffer(buffer, size);
|
||||
mb_incoherent();
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_CACHEFLUSH_H */
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CHECKSUM_H
|
||||
#define _ASM_TILE_CHECKSUM_H
|
||||
|
||||
#include <asm-generic/checksum.h>
|
||||
|
||||
/* Allow us to provide a more optimized do_csum(). */
|
||||
__wsum do_csum(const unsigned char *buff, int len);
|
||||
#define do_csum do_csum
|
||||
|
||||
#endif /* _ASM_TILE_CHECKSUM_H */
|
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_COMPAT_H
|
||||
#define _ASM_TILE_COMPAT_H
|
||||
|
||||
/*
|
||||
* Architecture specific compatibility types
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
|
||||
/* "long" and pointer-based types are different. */
|
||||
typedef s32 compat_long_t;
|
||||
typedef u32 compat_ulong_t;
|
||||
typedef u32 compat_size_t;
|
||||
typedef s32 compat_ssize_t;
|
||||
typedef s32 compat_off_t;
|
||||
typedef s32 compat_time_t;
|
||||
typedef s32 compat_clock_t;
|
||||
typedef u32 compat_ino_t;
|
||||
typedef u32 compat_caddr_t;
|
||||
typedef u32 compat_uptr_t;
|
||||
|
||||
/* Many types are "int" or otherwise the same. */
|
||||
typedef __kernel_pid_t compat_pid_t;
|
||||
typedef __kernel_uid_t __compat_uid_t;
|
||||
typedef __kernel_gid_t __compat_gid_t;
|
||||
typedef __kernel_uid32_t __compat_uid32_t;
|
||||
typedef __kernel_uid32_t __compat_gid32_t;
|
||||
typedef __kernel_mode_t compat_mode_t;
|
||||
typedef __kernel_dev_t compat_dev_t;
|
||||
typedef __kernel_loff_t compat_loff_t;
|
||||
typedef __kernel_nlink_t compat_nlink_t;
|
||||
typedef __kernel_ipc_pid_t compat_ipc_pid_t;
|
||||
typedef __kernel_daddr_t compat_daddr_t;
|
||||
typedef __kernel_fsid_t compat_fsid_t;
|
||||
typedef __kernel_timer_t compat_timer_t;
|
||||
typedef __kernel_key_t compat_key_t;
|
||||
typedef int compat_int_t;
|
||||
typedef s64 compat_s64;
|
||||
typedef uint compat_uint_t;
|
||||
typedef u64 compat_u64;
|
||||
|
||||
/* We use the same register dump format in 32-bit images. */
|
||||
typedef unsigned long compat_elf_greg_t;
|
||||
#define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t))
|
||||
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
|
||||
|
||||
struct compat_timespec {
|
||||
compat_time_t tv_sec;
|
||||
s32 tv_nsec;
|
||||
};
|
||||
|
||||
struct compat_timeval {
|
||||
compat_time_t tv_sec;
|
||||
s32 tv_usec;
|
||||
};
|
||||
|
||||
#define compat_stat stat
|
||||
#define compat_statfs statfs
|
||||
|
||||
struct compat_sysctl {
|
||||
unsigned int name;
|
||||
int nlen;
|
||||
unsigned int oldval;
|
||||
unsigned int oldlenp;
|
||||
unsigned int newval;
|
||||
unsigned int newlen;
|
||||
unsigned int __unused[4];
|
||||
};
|
||||
|
||||
|
||||
struct compat_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_off_t l_start;
|
||||
compat_off_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define F_GETLK64 12 /* using 'struct flock64' */
|
||||
#define F_SETLK64 13
|
||||
#define F_SETLKW64 14
|
||||
|
||||
struct compat_flock64 {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
compat_loff_t l_start;
|
||||
compat_loff_t l_len;
|
||||
compat_pid_t l_pid;
|
||||
};
|
||||
|
||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
||||
|
||||
#define _COMPAT_NSIG 64
|
||||
#define _COMPAT_NSIG_BPW 32
|
||||
|
||||
typedef u32 compat_sigset_word;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
__compat_uid32_t uid;
|
||||
__compat_gid32_t gid;
|
||||
__compat_uid32_t cuid;
|
||||
__compat_gid32_t cgid;
|
||||
unsigned short mode;
|
||||
unsigned short __pad1;
|
||||
unsigned short seq;
|
||||
unsigned short __pad2;
|
||||
compat_ulong_t unused1;
|
||||
compat_ulong_t unused2;
|
||||
};
|
||||
|
||||
struct compat_semid64_ds {
|
||||
struct compat_ipc64_perm sem_perm;
|
||||
compat_time_t sem_otime;
|
||||
compat_ulong_t __unused1;
|
||||
compat_time_t sem_ctime;
|
||||
compat_ulong_t __unused2;
|
||||
compat_ulong_t sem_nsems;
|
||||
compat_ulong_t __unused3;
|
||||
compat_ulong_t __unused4;
|
||||
};
|
||||
|
||||
struct compat_msqid64_ds {
|
||||
struct compat_ipc64_perm msg_perm;
|
||||
compat_time_t msg_stime;
|
||||
compat_ulong_t __unused1;
|
||||
compat_time_t msg_rtime;
|
||||
compat_ulong_t __unused2;
|
||||
compat_time_t msg_ctime;
|
||||
compat_ulong_t __unused3;
|
||||
compat_ulong_t msg_cbytes;
|
||||
compat_ulong_t msg_qnum;
|
||||
compat_ulong_t msg_qbytes;
|
||||
compat_pid_t msg_lspid;
|
||||
compat_pid_t msg_lrpid;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
struct compat_shmid64_ds {
|
||||
struct compat_ipc64_perm shm_perm;
|
||||
compat_size_t shm_segsz;
|
||||
compat_time_t shm_atime;
|
||||
compat_ulong_t __unused1;
|
||||
compat_time_t shm_dtime;
|
||||
compat_ulong_t __unused2;
|
||||
compat_time_t shm_ctime;
|
||||
compat_ulong_t __unused3;
|
||||
compat_pid_t shm_cpid;
|
||||
compat_pid_t shm_lpid;
|
||||
compat_ulong_t shm_nattch;
|
||||
compat_ulong_t __unused4;
|
||||
compat_ulong_t __unused5;
|
||||
};
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
* be used for syscall parameters, just declare them
|
||||
* as pointers because the syscall entry code will have
|
||||
* appropriately converted them already.
|
||||
*/
|
||||
|
||||
static inline void __user *compat_ptr(compat_uptr_t uptr)
|
||||
{
|
||||
return (void __user *)(long)(s32)uptr;
|
||||
}
|
||||
|
||||
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
||||
{
|
||||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
/* Sign-extend when storing a kernel pointer to a user's ptregs. */
|
||||
static inline unsigned long ptr_to_compat_reg(void __user *uptr)
|
||||
{
|
||||
return (long)(int)(long __force)uptr;
|
||||
}
|
||||
|
||||
static inline void __user *compat_alloc_user_space(long len)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(current);
|
||||
return (void __user *)regs->sp - len;
|
||||
}
|
||||
|
||||
static inline int is_compat_task(void)
|
||||
{
|
||||
return current_thread_info()->status & TS_COMPAT;
|
||||
}
|
||||
|
||||
extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
|
||||
siginfo_t *info, sigset_t *set,
|
||||
struct pt_regs *regs);
|
||||
|
||||
/* Compat syscalls. */
|
||||
struct compat_sigaction;
|
||||
struct compat_siginfo;
|
||||
struct compat_sigaltstack;
|
||||
long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
|
||||
compat_uptr_t __user *envp);
|
||||
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
|
||||
struct compat_sigaction __user *oact,
|
||||
size_t sigsetsize);
|
||||
long compat_sys_rt_sigqueueinfo(int pid, int sig,
|
||||
struct compat_siginfo __user *uinfo);
|
||||
long compat_sys_rt_sigreturn(void);
|
||||
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr);
|
||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high);
|
||||
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
|
||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 nbytes_lo, u32 nbytes_hi);
|
||||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi);
|
||||
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
|
||||
struct compat_timespec __user *interval);
|
||||
|
||||
/* Versions of compat functions that differ from generic Linux. */
|
||||
struct compat_msgbuf;
|
||||
long tile_compat_sys_msgsnd(int msqid,
|
||||
struct compat_msgbuf __user *msgp,
|
||||
size_t msgsz, int msgflg);
|
||||
long tile_compat_sys_msgrcv(int msqid,
|
||||
struct compat_msgbuf __user *msgp,
|
||||
size_t msgsz, long msgtyp, int msgflg);
|
||||
long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
||||
compat_long_t addr, compat_long_t data);
|
||||
|
||||
/* Tilera Linux syscalls that don't have "compat" versions. */
|
||||
#define compat_sys_flush_cache sys_flush_cache
|
||||
|
||||
#endif /* _ASM_TILE_COMPAT_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/cputime.h>
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_CURRENT_H
|
||||
#define _ASM_TILE_CURRENT_H
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static inline struct task_struct *get_current(void)
|
||||
{
|
||||
return current_thread_info()->task;
|
||||
}
|
||||
#define current get_current()
|
||||
|
||||
/* Return a usable "task_struct" pointer even if the real one is corrupt. */
|
||||
struct task_struct *validate_current(void);
|
||||
|
||||
#endif /* _ASM_TILE_CURRENT_H */
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DELAY_H
|
||||
#define _ASM_TILE_DELAY_H
|
||||
|
||||
/* Undefined functions to get compile-time errors. */
|
||||
extern void __bad_udelay(void);
|
||||
extern void __bad_ndelay(void);
|
||||
|
||||
extern void __udelay(unsigned long usecs);
|
||||
extern void __ndelay(unsigned long nsecs);
|
||||
extern void __delay(unsigned long loops);
|
||||
|
||||
#define udelay(n) (__builtin_constant_p(n) ? \
|
||||
((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \
|
||||
__udelay(n))
|
||||
|
||||
#define ndelay(n) (__builtin_constant_p(n) ? \
|
||||
((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \
|
||||
__ndelay(n))
|
||||
|
||||
#endif /* _ASM_TILE_DELAY_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/device.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/div64.h>
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DMA_MAPPING_H
|
||||
#define _ASM_TILE_DMA_MAPPING_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/*
|
||||
* Note that on x86 and powerpc, there is a "struct dma_mapping_ops"
|
||||
* that is used for all the DMA operations. For now, we don't have an
|
||||
* equivalent on tile, because we only have a single way of doing DMA.
|
||||
* (Tilera bug 7994 to use dma_mapping_ops.)
|
||||
*/
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction);
|
||||
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nhwentries, enum dma_data_direction);
|
||||
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||
size_t size, enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction);
|
||||
|
||||
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
|
||||
void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
|
||||
extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_single_for_device(struct device *, dma_addr_t,
|
||||
size_t, enum dma_data_direction);
|
||||
extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t,
|
||||
unsigned long offset, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_single_range_for_device(struct device *, dma_addr_t,
|
||||
unsigned long offset, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction);
|
||||
|
||||
static inline int
|
||||
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_get_cache_alignment(void)
|
||||
{
|
||||
return L2_CACHE_BYTES;
|
||||
}
|
||||
|
||||
#define dma_is_consistent(d, h) (1)
|
||||
|
||||
|
||||
#endif /* _ASM_TILE_DMA_MAPPING_H */
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_DMA_H
|
||||
#define _ASM_TILE_DMA_H
|
||||
|
||||
#include <asm-generic/dma.h>
|
||||
|
||||
/* Needed by drivers/pci/quirks.c */
|
||||
#ifdef CONFIG_PCI
|
||||
extern int isa_dma_bridge_buggy;
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_DMA_H */
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_ELF_H
|
||||
#define _ASM_TILE_ELF_H
|
||||
|
||||
/*
|
||||
* ELF register definitions.
|
||||
*/
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
#define EM_TILE64 187
|
||||
#define EM_TILEPRO 188
|
||||
#define EM_TILEGX 191
|
||||
|
||||
/* Provide a nominal data structure. */
|
||||
#define ELF_NFPREG 0
|
||||
typedef double elf_fpreg_t;
|
||||
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#else
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#endif
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
|
||||
/*
|
||||
* There seems to be a bug in how compat_binfmt_elf.c works: it
|
||||
* #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info().
|
||||
* Hack around this by providing an enum value of ELF_ARCH.
|
||||
*/
|
||||
enum { ELF_ARCH = CHIP_ELF_TYPE() };
|
||||
#define ELF_ARCH ELF_ARCH
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
|
||||
(x)->e_machine == CHIP_ELF_TYPE())
|
||||
|
||||
/* The module loader only handles a few relocation types. */
|
||||
#ifndef __tilegx__
|
||||
#define R_TILE_32 1
|
||||
#define R_TILE_JOFFLONG_X1 15
|
||||
#define R_TILE_IMM16_X0_LO 25
|
||||
#define R_TILE_IMM16_X1_LO 26
|
||||
#define R_TILE_IMM16_X0_HA 29
|
||||
#define R_TILE_IMM16_X1_HA 30
|
||||
#else
|
||||
#define R_TILEGX_64 1
|
||||
#define R_TILEGX_JUMPOFF_X1 21
|
||||
#define R_TILEGX_IMM16_X0_HW0 36
|
||||
#define R_TILEGX_IMM16_X1_HW0 37
|
||||
#define R_TILEGX_IMM16_X0_HW1 38
|
||||
#define R_TILEGX_IMM16_X1_HW1 39
|
||||
#define R_TILEGX_IMM16_X0_HW2_LAST 48
|
||||
#define R_TILEGX_IMM16_X1_HW2_LAST 49
|
||||
#endif
|
||||
|
||||
/* Use standard page size for core dumps. */
|
||||
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
* use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
* the loader. We need to make sure that it is out of the way of the program
|
||||
* that it will "exec", and that there is sufficient room for the brk.
|
||||
*/
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
#define ELF_CORE_COPY_REGS(_dest, _regs) \
|
||||
memcpy((char *) &_dest, (char *) _regs, \
|
||||
sizeof(struct pt_regs));
|
||||
|
||||
/* No additional FP registers to copy. */
|
||||
#define ELF_CORE_COPY_FPREGS(t, fpu) 0
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
* instruction set this CPU supports. This could be done in user space,
|
||||
* but it's not easy, and we've already done it here.
|
||||
*/
|
||||
#define ELF_HWCAP (0)
|
||||
|
||||
/*
|
||||
* This yields a string that ld.so will use to load implementation
|
||||
* specific libraries for optimization. This is more specific in
|
||||
* intent than poking at uname or /proc/cpuinfo.
|
||||
*/
|
||||
#define ELF_PLATFORM (NULL)
|
||||
|
||||
extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr);
|
||||
|
||||
#define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr)
|
||||
|
||||
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
|
||||
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
|
||||
|
||||
/* Tilera Linux has no personalities currently, so no need to do anything. */
|
||||
#define SET_PERSONALITY(ex) do { } while (0)
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||
/* Support auto-mapping of the user interrupt vectors. */
|
||||
struct linux_binprm;
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_ELF_PLATFORM "tilegx-m32"
|
||||
|
||||
/*
|
||||
* "Compat" binaries have the same machine type, but 32-bit class,
|
||||
* since they're not a separate machine type, but just a 32-bit
|
||||
* variant of the standard 64-bit architecture.
|
||||
*/
|
||||
#define compat_elf_check_arch(x) \
|
||||
((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
|
||||
(x)->e_machine == CHIP_ELF_TYPE())
|
||||
|
||||
#define compat_start_thread(regs, ip, usp) do { \
|
||||
regs->pc = ptr_to_compat_reg((void *)(ip)); \
|
||||
regs->sp = ptr_to_compat_reg((void *)(usp)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Use SET_PERSONALITY to indicate compatibility via TS_COMPAT.
|
||||
*/
|
||||
#undef SET_PERSONALITY
|
||||
#define SET_PERSONALITY(ex) \
|
||||
do { \
|
||||
current->personality = PER_LINUX; \
|
||||
current_thread_info()->status &= ~TS_COMPAT; \
|
||||
} while (0)
|
||||
#define COMPAT_SET_PERSONALITY(ex) \
|
||||
do { \
|
||||
current->personality = PER_LINUX_32BIT; \
|
||||
current_thread_info()->status |= TS_COMPAT; \
|
||||
} while (0)
|
||||
|
||||
#define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#endif /* _ASM_TILE_ELF_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/emergency-restart.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/errno.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/fcntl.h>
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Copyright (C) 1998 Ingo Molnar
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_FIXMAP_H
|
||||
#define _ASM_TILE_FIXMAP_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/kernel.h>
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#include <linux/threads.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#endif
|
||||
|
||||
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
|
||||
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
* addresses. The point is to have a constant address at
|
||||
* compile time, but to set the physical address only
|
||||
* in the boot process. We allocate these special addresses
|
||||
* from the end of supervisor virtual memory backwards.
|
||||
* Also this lets us do fail-safe vmalloc(), we
|
||||
* can guarantee that these special addresses and
|
||||
* vmalloc()-ed addresses never overlap.
|
||||
*
|
||||
* these 'compile-time allocated' memory buffers are
|
||||
* fixed-size 4k pages. (or larger if used with an increment
|
||||
* higher than 1) use fixmap_set(idx,phys) to associate
|
||||
* physical memory with fixmap indices.
|
||||
*
|
||||
* TLB entries of such buffers will not be flushed across
|
||||
* task switches.
|
||||
*
|
||||
* We don't bother with a FIX_HOLE since above the fixmaps
|
||||
* is unmapped memory in any case.
|
||||
*/
|
||||
enum fixed_addresses {
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
#endif
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
/*
|
||||
* Temporary boot-time mappings, used before ioremap() is functional.
|
||||
* Not currently needed by the Tile architecture.
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 0
|
||||
#if NR_FIX_BTMAPS
|
||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
|
||||
__end_of_fixed_addresses
|
||||
#else
|
||||
__end_of_fixed_addresses = __end_of_permanent_fixed_addresses
|
||||
#endif
|
||||
};
|
||||
|
||||
extern void __set_fixmap(enum fixed_addresses idx,
|
||||
unsigned long phys, pgprot_t flags);
|
||||
|
||||
#define set_fixmap(idx, phys) \
|
||||
__set_fixmap(idx, phys, PAGE_KERNEL)
|
||||
/*
|
||||
* Some hardware wants to get fixmapped without caching.
|
||||
*/
|
||||
#define set_fixmap_nocache(idx, phys) \
|
||||
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
|
||||
|
||||
#define clear_fixmap(idx) \
|
||||
__set_fixmap(idx, 0, __pgprot(0))
|
||||
|
||||
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
||||
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
|
||||
#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE)
|
||||
|
||||
extern void __this_fixmap_does_not_exist(void);
|
||||
|
||||
/*
|
||||
* 'index to address' translation. If anyone tries to use the idx
|
||||
* directly without tranlation, we catch the bug with a NULL-deference
|
||||
* kernel oops. Illegal ranges of incoming indices are caught too.
|
||||
*/
|
||||
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
|
||||
{
|
||||
/*
|
||||
* this branch gets completely eliminated after inlining,
|
||||
* except when someone tries to use fixaddr indices in an
|
||||
* illegal way. (such as mixing up address types or using
|
||||
* out-of-range indices).
|
||||
*
|
||||
* If it doesn't get removed, the linker will complain
|
||||
* loudly with a reasonably clear error message..
|
||||
*/
|
||||
if (idx >= __end_of_fixed_addresses)
|
||||
__this_fixmap_does_not_exist();
|
||||
|
||||
return __fix_to_virt(idx);
|
||||
}
|
||||
|
||||
static inline unsigned long virt_to_fix(const unsigned long vaddr)
|
||||
{
|
||||
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
|
||||
return __virt_to_fix(vaddr);
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_FIXMAP_H */
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_FTRACE_H
|
||||
#define _ASM_TILE_FTRACE_H
|
||||
|
||||
/* empty */
|
||||
|
||||
#endif /* _ASM_TILE_FTRACE_H */
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* These routines make two important assumptions:
|
||||
*
|
||||
* 1. atomic_t is really an int and can be freely cast back and forth
|
||||
* (validated in __init_atomic_per_cpu).
|
||||
*
|
||||
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
|
||||
* the same locking convention that all the kernel atomic routines use.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_FUTEX_H
|
||||
#define _ASM_TILE_FUTEX_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
extern struct __get_user futex_set(int __user *v, int i);
|
||||
extern struct __get_user futex_add(int __user *v, int n);
|
||||
extern struct __get_user futex_or(int __user *v, int n);
|
||||
extern struct __get_user futex_andn(int __user *v, int n);
|
||||
extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
|
||||
|
||||
#ifndef __tilegx__
|
||||
extern struct __get_user futex_xor(int __user *v, int n);
|
||||
#else
|
||||
static inline struct __get_user futex_xor(int __user *uaddr, int n)
|
||||
{
|
||||
struct __get_user asm_ret = __get_user_4(uaddr);
|
||||
if (!asm_ret.err) {
|
||||
int oldval, newval;
|
||||
do {
|
||||
oldval = asm_ret.val;
|
||||
newval = oldval ^ n;
|
||||
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
||||
} while (asm_ret.err == 0 && oldval != asm_ret.val);
|
||||
}
|
||||
return asm_ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int ret;
|
||||
struct __get_user asm_ret;
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
asm_ret = futex_set(uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
asm_ret = futex_add(uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
asm_ret = futex_or(uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
asm_ret = futex_andn(uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
asm_ret = futex_xor(uaddr, oparg);
|
||||
break;
|
||||
default:
|
||||
asm_ret.err = -ENOSYS;
|
||||
}
|
||||
pagefault_enable();
|
||||
|
||||
ret = asm_ret.err;
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ:
|
||||
ret = (asm_ret.val == cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_NE:
|
||||
ret = (asm_ret.val != cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_LT:
|
||||
ret = (asm_ret.val < cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_GE:
|
||||
ret = (asm_ret.val >= cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_LE:
|
||||
ret = (asm_ret.val <= cmparg);
|
||||
break;
|
||||
case FUTEX_OP_CMP_GT:
|
||||
ret = (asm_ret.val > cmparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
int newval)
|
||||
{
|
||||
struct __get_user asm_ret;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
||||
return asm_ret.err ? asm_ret.err : asm_ret.val;
|
||||
}
|
||||
|
||||
#ifndef __tilegx__
|
||||
/* Return failure from the atomic wrappers. */
|
||||
struct __get_user __atomic_bad_address(int __user *addr);
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_FUTEX_H */
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HARDIRQ_H
|
||||
#define _ASM_TILE_HARDIRQ_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
long idle_timestamp;
|
||||
|
||||
/* Hard interrupt statistics. */
|
||||
unsigned int irq_timer_count;
|
||||
unsigned int irq_syscall_count;
|
||||
unsigned int irq_resched_count;
|
||||
unsigned int irq_hv_flush_count;
|
||||
unsigned int irq_call_count;
|
||||
unsigned int irq_hv_msg_count;
|
||||
unsigned int irq_dev_intr_count;
|
||||
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
||||
|
||||
#define __ARCH_IRQ_STAT
|
||||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
|
||||
#define HARDIRQ_BITS 8
|
||||
|
||||
#endif /* _ASM_TILE_HARDIRQ_H */
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Provide methods for the HARDWALL_FILE for accessing the UDN.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HARDWALL_H
|
||||
#define _ASM_TILE_HARDWALL_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define HARDWALL_IOCTL_BASE 0xa2
|
||||
|
||||
/*
|
||||
* The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
|
||||
* The resulting ioctl value is passed to the kernel in conjunction
|
||||
* with a pointer to a little-endian bitmask of cpus, which must be
|
||||
* physically in a rectangular configuration on the chip.
|
||||
* The "size" is the number of bytes of cpu mask data.
|
||||
*/
|
||||
#define _HARDWALL_CREATE 1
|
||||
#define HARDWALL_CREATE(size) \
|
||||
_IOC(_IOC_READ, HARDWALL_IOCTL_BASE, _HARDWALL_CREATE, (size))
|
||||
|
||||
#define _HARDWALL_ACTIVATE 2
|
||||
#define HARDWALL_ACTIVATE \
|
||||
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_ACTIVATE)
|
||||
|
||||
#define _HARDWALL_DEACTIVATE 3
|
||||
#define HARDWALL_DEACTIVATE \
|
||||
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE)
|
||||
|
||||
#ifndef __KERNEL__
|
||||
|
||||
/* This is the canonical name expected by userspace. */
|
||||
#define HARDWALL_FILE "/dev/hardwall"
|
||||
|
||||
#else
|
||||
|
||||
/* Hook for /proc/tile/hardwall. */
|
||||
struct seq_file;
|
||||
int proc_tile_hardwall_show(struct seq_file *sf, void *v);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_HARDWALL_H */
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
|
||||
* Gerhard.Wichert@pdb.siemens.de
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Used in CONFIG_HIGHMEM systems for memory pages which
|
||||
* are not addressable by direct kernel virtual addresses.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HIGHMEM_H
|
||||
#define _ASM_TILE_HIGHMEM_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
|
||||
/* declarations for highmem.c */
|
||||
extern unsigned long highstart_pfn, highend_pfn;
|
||||
|
||||
extern pte_t *pkmap_page_table;
|
||||
|
||||
/*
|
||||
* Ordering is:
|
||||
*
|
||||
* FIXADDR_TOP
|
||||
* fixed_addresses
|
||||
* FIXADDR_START
|
||||
* temp fixed addresses
|
||||
* FIXADDR_BOOT_START
|
||||
* Persistent kmap area
|
||||
* PKMAP_BASE
|
||||
* VMALLOC_END
|
||||
* Vmalloc area
|
||||
* VMALLOC_START
|
||||
* high_memory
|
||||
*/
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
void *kmap_high(struct page *page);
|
||||
void kunmap_high(struct page *page);
|
||||
void *kmap(struct page *page);
|
||||
void kunmap(struct page *page);
|
||||
void *kmap_fix_kpte(struct page *page, int finished);
|
||||
|
||||
/* This macro is used only in map_new_virtual() to map "page". */
|
||||
#define kmap_prot page_to_kpgprot(page)
|
||||
|
||||
void kunmap_atomic(void *kvaddr, enum km_type type);
|
||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||
struct page *kmap_atomic_to_page(void *ptr);
|
||||
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
|
||||
void *kmap_atomic(struct page *page, enum km_type type);
|
||||
void kmap_atomic_fix_kpte(struct page *page, int finished);
|
||||
|
||||
#define flush_cache_kmaps() do { } while (0)
|
||||
|
||||
#endif /* _ASM_TILE_HIGHMEM_H */
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Handle issues around the Tile "home cache" model of coherence.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HOMECACHE_H
|
||||
#define _ASM_TILE_HOMECACHE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
struct page;
|
||||
struct task_struct;
|
||||
struct vm_area_struct;
|
||||
struct zone;
|
||||
|
||||
/*
|
||||
* Coherence point for the page is its memory controller.
|
||||
* It is not present in any cache (L1 or L2).
|
||||
*/
|
||||
#define PAGE_HOME_UNCACHED -1
|
||||
|
||||
/*
|
||||
* Is this page immutable (unwritable) and thus able to be cached more
|
||||
* widely than would otherwise be possible? On tile64 this means we
|
||||
* mark the PTE to cache locally; on tilepro it means we have "nc" set.
|
||||
*/
|
||||
#define PAGE_HOME_IMMUTABLE -2
|
||||
|
||||
/*
|
||||
* Each cpu considers its own cache to be the home for the page,
|
||||
* which makes it incoherent.
|
||||
*/
|
||||
#define PAGE_HOME_INCOHERENT -3
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Home for the page is distributed via hash-for-home. */
|
||||
#define PAGE_HOME_HASH -4
|
||||
#endif
|
||||
|
||||
/* Homing is unknown or unspecified. Not valid for page_home(). */
|
||||
#define PAGE_HOME_UNKNOWN -5
|
||||
|
||||
/* Home on the current cpu. Not valid for page_home(). */
|
||||
#define PAGE_HOME_HERE -6
|
||||
|
||||
/* Support wrapper to use instead of explicit hv_flush_remote(). */
|
||||
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
|
||||
const struct cpumask *cache_cpumask,
|
||||
HV_VirtAddr tlb_va, unsigned long tlb_length,
|
||||
unsigned long tlb_pgsize,
|
||||
const struct cpumask *tlb_cpumask,
|
||||
HV_Remote_ASID *asids, int asidcount);
|
||||
|
||||
/* Set homing-related bits in a PTE (can also pass a pgprot_t). */
|
||||
extern pte_t pte_set_home(pte_t pte, int home);
|
||||
|
||||
/* Do a cache eviction on the specified cpus. */
|
||||
extern void homecache_evict(const struct cpumask *mask);
|
||||
|
||||
/*
|
||||
* Change a kernel page's homecache. It must not be mapped in user space.
|
||||
* If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
|
||||
* no other cpu can reference the page, and causes a full-chip cache/TLB flush.
|
||||
*/
|
||||
extern void homecache_change_page_home(struct page *, int order, int home);
|
||||
|
||||
/*
|
||||
* Flush a page out of whatever cache(s) it is in.
|
||||
* This is more than just finv, since it properly handles waiting
|
||||
* for the data to reach memory on tilepro, but it can be quite
|
||||
* heavyweight, particularly on hash-for-home memory.
|
||||
*/
|
||||
extern void homecache_flush_cache(struct page *, int order);
|
||||
|
||||
/*
|
||||
* Allocate a page with the given GFP flags, home, and optionally
|
||||
* node. These routines are actually just wrappers around the normal
|
||||
* alloc_pages() / alloc_pages_node() functions, which set and clear
|
||||
* a per-cpu variable to communicate with homecache_new_kernel_page().
|
||||
* If !CONFIG_HOMECACHE, uses homecache_change_page_home().
|
||||
*/
|
||||
extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
|
||||
unsigned int order, int home);
|
||||
extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
|
||||
unsigned int order, int home);
|
||||
#define homecache_alloc_page(gfp_mask, home) \
|
||||
homecache_alloc_pages(gfp_mask, 0, home)
|
||||
|
||||
/*
|
||||
* These routines are just pass-throughs to free_pages() when
|
||||
* we support full homecaching. If !CONFIG_HOMECACHE, then these
|
||||
* routines use homecache_change_page_home() to reset the home
|
||||
* back to the default before returning the page to the allocator.
|
||||
*/
|
||||
void homecache_free_pages(unsigned long addr, unsigned int order);
|
||||
#define homecache_free_page(page) \
|
||||
homecache_free_pages((page), 0)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Report the page home for LOWMEM pages by examining their kernel PTE,
|
||||
* or for highmem pages as the default home.
|
||||
*/
|
||||
extern int page_home(struct page *);
|
||||
|
||||
#define homecache_migrate_kthread() do {} while (0)
|
||||
|
||||
#define homecache_kpte_lock() 0
|
||||
#define homecache_kpte_unlock(flags) do {} while (0)
|
||||
|
||||
|
||||
#endif /* _ASM_TILE_HOMECACHE_H */
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HUGETLB_H
|
||||
#define _ASM_TILE_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
unsigned long len) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (addr & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
{
|
||||
free_pgd_range(tlb, addr, end, floor, ceiling);
|
||||
}
|
||||
|
||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return pte_wrprotect(pte);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
static inline int arch_prepare_hugepage(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_release_hugepage(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_HUGETLB_H */
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* This header defines a wrapper interface for managing hypervisor
|
||||
* device calls that will result in an interrupt at some later time.
|
||||
* In particular, this provides wrappers for hv_preada() and
|
||||
* hv_pwritea().
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HV_DRIVER_H
|
||||
#define _ASM_TILE_HV_DRIVER_H
|
||||
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
struct hv_driver_cb;
|
||||
|
||||
/* A callback to be invoked when an operation completes. */
|
||||
typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result);
|
||||
|
||||
/*
|
||||
* A structure to hold information about an outstanding call.
|
||||
* The driver must allocate a separate structure for each call.
|
||||
*/
|
||||
struct hv_driver_cb {
|
||||
hv_driver_callback_t *callback; /* Function to call on interrupt. */
|
||||
void *dev; /* Driver-specific state variable. */
|
||||
};
|
||||
|
||||
/* Wrapper for invoking hv_dev_preada(). */
|
||||
static inline int
|
||||
tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len,
|
||||
HV_SGL sgl[/* sgl_len */], __hv64 offset,
|
||||
struct hv_driver_cb *callback)
|
||||
{
|
||||
return hv_dev_preada(devhdl, flags, sgl_len, sgl,
|
||||
offset, (HV_IntArg)callback);
|
||||
}
|
||||
|
||||
/* Wrapper for invoking hv_dev_pwritea(). */
|
||||
static inline int
|
||||
tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len,
|
||||
HV_SGL sgl[/* sgl_len */], __hv64 offset,
|
||||
struct hv_driver_cb *callback)
|
||||
{
|
||||
return hv_dev_pwritea(devhdl, flags, sgl_len, sgl,
|
||||
offset, (HV_IntArg)callback);
|
||||
}
|
||||
|
||||
|
||||
#endif /* _ASM_TILE_HV_DRIVER_H */
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HW_IRQ_H
|
||||
#define _ASM_TILE_HW_IRQ_H
|
||||
|
||||
#endif /* _ASM_TILE_HW_IRQ_H */
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IDE_H
|
||||
#define _ASM_TILE_IDE_H
|
||||
|
||||
/* For IDE on PCI */
|
||||
#define MAX_HWIFS 10
|
||||
|
||||
#define ide_default_io_ctl(base) (0)
|
||||
|
||||
#include <asm-generic/ide_iops.h>
|
||||
|
||||
#endif /* _ASM_TILE_IDE_H */
|
|
@ -0,0 +1,279 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IO_H
|
||||
#define _ASM_TILE_IO_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define IO_SPACE_LIMIT 0xfffffffful
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access.
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer.
|
||||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
|
||||
* long before casting it to a pointer to avoid compiler warnings.
|
||||
*/
|
||||
#if CHIP_HAS_MMIO()
|
||||
extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
pgprot_t pgprot);
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
#else
|
||||
#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
|
||||
#define iounmap(addr) ((void)0)
|
||||
#endif
|
||||
|
||||
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
|
||||
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
|
||||
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
|
||||
|
||||
void __iomem *ioport_map(unsigned long port, unsigned int len);
|
||||
extern inline void ioport_unmap(void __iomem *addr) {}
|
||||
|
||||
#define mmiowb()
|
||||
|
||||
/* Conversion between virtual and physical mappings. */
|
||||
#define mm_ptov(addr) ((void *)phys_to_virt(addr))
|
||||
#define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
||||
extern u8 _tile_readb(unsigned long addr);
|
||||
extern u16 _tile_readw(unsigned long addr);
|
||||
extern u32 _tile_readl(unsigned long addr);
|
||||
extern u64 _tile_readq(unsigned long addr);
|
||||
extern void _tile_writeb(u8 val, unsigned long addr);
|
||||
extern void _tile_writew(u16 val, unsigned long addr);
|
||||
extern void _tile_writel(u32 val, unsigned long addr);
|
||||
extern void _tile_writeq(u64 val, unsigned long addr);
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* The Tile architecture does not support IOMEM unless PCI is enabled.
|
||||
* Unfortunately we can't yet simply not declare these methods,
|
||||
* since some generic code that compiles into the kernel, but
|
||||
* we never run, uses them unconditionally.
|
||||
*/
|
||||
|
||||
static inline int iomem_panic(void)
|
||||
{
|
||||
panic("readb/writeb and friends do not exist on tile without PCI");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u8 _tile_readb(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline u16 _tile_readw(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline u32 _tile_readl(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline u64 _tile_readq(unsigned long addr)
|
||||
{
|
||||
return iomem_panic();
|
||||
}
|
||||
|
||||
static inline void _tile_writeb(u8 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
static inline void _tile_writew(u16 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
static inline void _tile_writel(u32 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
static inline void _tile_writeq(u64 val, unsigned long addr)
|
||||
{
|
||||
iomem_panic();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define readb(addr) _tile_readb((unsigned long)addr)
|
||||
#define readw(addr) _tile_readw((unsigned long)addr)
|
||||
#define readl(addr) _tile_readl((unsigned long)addr)
|
||||
#define readq(addr) _tile_readq((unsigned long)addr)
|
||||
#define writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
|
||||
#define writew(val, addr) _tile_writew(val, (unsigned long)addr)
|
||||
#define writel(val, addr) _tile_writel(val, (unsigned long)addr)
|
||||
#define writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
|
||||
|
||||
#define __raw_readb readb
|
||||
#define __raw_readw readw
|
||||
#define __raw_readl readl
|
||||
#define __raw_readq readq
|
||||
#define __raw_writeb writeb
|
||||
#define __raw_writew writew
|
||||
#define __raw_writel writel
|
||||
#define __raw_writeq writeq
|
||||
|
||||
#define readb_relaxed readb
|
||||
#define readw_relaxed readw
|
||||
#define readl_relaxed readl
|
||||
#define readq_relaxed readq
|
||||
|
||||
#define ioread8 readb
|
||||
#define ioread16 readw
|
||||
#define ioread32 readl
|
||||
#define ioread64 readq
|
||||
#define iowrite8 writeb
|
||||
#define iowrite16 writew
|
||||
#define iowrite32 writel
|
||||
#define iowrite64 writeq
|
||||
|
||||
static inline void *memcpy_fromio(void *dst, void *src, int len)
|
||||
{
|
||||
int x;
|
||||
BUG_ON((unsigned long)src & 0x3);
|
||||
for (x = 0; x < len; x += 4)
|
||||
*(u32 *)(dst + x) = readl(src + x);
|
||||
return dst;
|
||||
}
|
||||
|
||||
static inline void *memcpy_toio(void *dst, void *src, int len)
|
||||
{
|
||||
int x;
|
||||
BUG_ON((unsigned long)dst & 0x3);
|
||||
for (x = 0; x < len; x += 4)
|
||||
writel(*(u32 *)(src + x), dst + x);
|
||||
return dst;
|
||||
}
|
||||
|
||||
/*
|
||||
* The Tile architecture does not support IOPORT, even with PCI.
|
||||
* Unfortunately we can't yet simply not declare these methods,
|
||||
* since some generic code that compiles into the kernel, but
|
||||
* we never run, uses them unconditionally.
|
||||
*/
|
||||
|
||||
static inline int ioport_panic(void)
|
||||
{
|
||||
panic("inb/outb and friends do not exist on tile");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
}
|
||||
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
}
|
||||
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outb(u8 b, unsigned long addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outw(u16 b, unsigned long addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outl(u32 b, unsigned long addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
#define outb_p(x, addr) outb((x), (addr))
|
||||
#define outw_p(x, addr) outw((x), (addr))
|
||||
#define outl_p(x, addr) outl((x), (addr))
|
||||
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
#define ioread8_rep(p, dst, count) \
|
||||
insb((unsigned long) (p), (dst), (count))
|
||||
#define ioread16_rep(p, dst, count) \
|
||||
insw((unsigned long) (p), (dst), (count))
|
||||
#define ioread32_rep(p, dst, count) \
|
||||
insl((unsigned long) (p), (dst), (count))
|
||||
|
||||
#define iowrite8_rep(p, src, count) \
|
||||
outsb((unsigned long) (p), (src), (count))
|
||||
#define iowrite16_rep(p, src, count) \
|
||||
outsw((unsigned long) (p), (src), (count))
|
||||
#define iowrite32_rep(p, src, count) \
|
||||
outsl((unsigned long) (p), (src), (count))
|
||||
|
||||
#endif /* _ASM_TILE_IO_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/ioctl.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/ioctls.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/ipc.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/ipcbuf.h>
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IRQ_H
|
||||
#define _ASM_TILE_IRQ_H
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
/* The hypervisor interface provides 32 IRQs. */
|
||||
#define NR_IRQS 32
|
||||
|
||||
/* IRQ numbers used for linux IPIs. */
|
||||
#define IRQ_RESCHEDULE 1
|
||||
|
||||
void ack_bad_irq(unsigned int irq);
|
||||
|
||||
/*
|
||||
* Different ways of handling interrupts. Tile interrupts are always
|
||||
* per-cpu; there is no global interrupt controller to implement
|
||||
* enable/disable. Most onboard devices can send their interrupts to
|
||||
* many tiles at the same time, and Tile-specific drivers know how to
|
||||
* deal with this.
|
||||
*
|
||||
* However, generic devices (usually PCIE based, sometimes GPIO)
|
||||
* expect that interrupts will fire on a single core at a time and
|
||||
* that the irq can be enabled or disabled from any core at any time.
|
||||
* We implement this by directing such interrupts to a single core.
|
||||
*
|
||||
* One added wrinkle is that PCI interrupts can be either
|
||||
* hardware-cleared (legacy interrupts) or software cleared (MSI).
|
||||
* Other generic device systems (GPIO) are always software-cleared.
|
||||
*
|
||||
* The enums below are used by drivers for onboard devices, including
|
||||
* the internals of PCI root complex and GPIO. They allow the driver
|
||||
* to tell the generic irq code what kind of interrupt is mapped to a
|
||||
* particular IRQ number.
|
||||
*/
|
||||
enum {
|
||||
/* per-cpu interrupt; use enable/disable_percpu_irq() to mask */
|
||||
TILE_IRQ_PERCPU,
|
||||
/* global interrupt, hardware responsible for clearing. */
|
||||
TILE_IRQ_HW_CLEAR,
|
||||
/* global interrupt, software responsible for clearing. */
|
||||
TILE_IRQ_SW_CLEAR,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Paravirtualized drivers should call this when they dynamically
|
||||
* allocate a new IRQ or discover an IRQ that was pre-allocated by the
|
||||
* hypervisor for use with their particular device. This gives the
|
||||
* IRQ subsystem an opportunity to do interrupt-type-specific
|
||||
* initialization.
|
||||
*
|
||||
* ISSUE: We should modify this API so that registering anything
|
||||
* except percpu interrupts also requires providing callback methods
|
||||
* for enabling and disabling the interrupt. This would allow the
|
||||
* generic IRQ code to proxy enable/disable_irq() calls back into the
|
||||
* PCI subsystem, which in turn could enable or disable the interrupt
|
||||
* at the PCI shim.
|
||||
*/
|
||||
void tile_irq_activate(unsigned int irq, int tile_irq_type);
|
||||
|
||||
/*
|
||||
* For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
|
||||
* how to use enable/disable_percpu_irq() to manage interrupts on each
|
||||
* core. We can't use the generic enable/disable_irq() because they
|
||||
* use a single reference count per irq, rather than per cpu per irq.
|
||||
*/
|
||||
void enable_percpu_irq(unsigned int irq);
|
||||
void disable_percpu_irq(unsigned int irq);
|
||||
|
||||
|
||||
void setup_irq_regs(void);
|
||||
|
||||
#endif /* _ASM_TILE_IRQ_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/irq_regs.h>
|
|
@ -0,0 +1,266 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_IRQFLAGS_H
|
||||
#define _ASM_TILE_IRQFLAGS_H
|
||||
|
||||
#include <arch/interrupts.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/*
|
||||
* The set of interrupts we want to allow when interrupts are nominally
|
||||
* disabled. The remainder are effectively "NMI" interrupts from
|
||||
* the point of view of the generic Linux code. Note that synchronous
|
||||
* interrupts (aka "non-queued") are not blocked by the mask in any case.
|
||||
*/
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
#define LINUX_MASKABLE_INTERRUPTS \
|
||||
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
|
||||
#else
|
||||
#define LINUX_MASKABLE_INTERRUPTS \
|
||||
(~(INT_MASK(INT_PERF_COUNT)))
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
|
||||
#include <asm/percpu.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/* Set and clear kernel interrupt masks. */
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
|
||||
# error Fix assumptions about which word various interrupts are in
|
||||
#endif
|
||||
#define interrupt_mask_set(n) do { \
|
||||
int __n = (n); \
|
||||
int __mask = 1 << (__n & 0x1f); \
|
||||
if (__n < 32) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \
|
||||
else \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \
|
||||
} while (0)
|
||||
#define interrupt_mask_reset(n) do { \
|
||||
int __n = (n); \
|
||||
int __mask = 1 << (__n & 0x1f); \
|
||||
if (__n < 32) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \
|
||||
else \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \
|
||||
} while (0)
|
||||
#define interrupt_mask_check(n) ({ \
|
||||
int __n = (n); \
|
||||
(((__n < 32) ? \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \
|
||||
__insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \
|
||||
>> (__n & 0x1f)) & 1; \
|
||||
})
|
||||
#define interrupt_mask_set_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#define interrupt_mask_reset_mask(mask) do { \
|
||||
unsigned long long __m = (mask); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define interrupt_mask_set(n) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n)))
|
||||
#define interrupt_mask_reset(n) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n)))
|
||||
#define interrupt_mask_check(n) \
|
||||
((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1)
|
||||
#define interrupt_mask_set_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask))
|
||||
#define interrupt_mask_reset_mask(mask) \
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The set of interrupts we want active if irqs are enabled.
|
||||
* Note that in particular, the tile timer interrupt comes and goes
|
||||
* from this set, since we have no other way to turn off the timer.
|
||||
* Likewise, INTCTRL_1 is removed and re-added during device
|
||||
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
|
||||
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
|
||||
* is always claimed as an "active interrupt" so we can query that bit
|
||||
* to know our current state.
|
||||
*/
|
||||
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
||||
#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define raw_local_irq_disable() \
|
||||
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
|
||||
|
||||
/* Disable all interrupts, including NMIs. */
|
||||
#define raw_local_irq_disable_all() \
|
||||
interrupt_mask_set_mask(-1UL)
|
||||
|
||||
/* Re-enable all maskable interrupts. */
|
||||
#define raw_local_irq_enable() \
|
||||
interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
|
||||
|
||||
/* Disable or enable interrupts based on flag argument. */
|
||||
#define raw_local_irq_restore(disabled) do { \
|
||||
if (disabled) \
|
||||
raw_local_irq_disable(); \
|
||||
else \
|
||||
raw_local_irq_enable(); \
|
||||
} while (0)
|
||||
|
||||
/* Return true if "flags" argument means interrupts are disabled. */
|
||||
#define raw_irqs_disabled_flags(flags) ((flags) != 0)
|
||||
|
||||
/* Return true if interrupts are currently disabled. */
|
||||
#define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
|
||||
|
||||
/* Save whether interrupts are currently disabled. */
|
||||
#define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled())
|
||||
|
||||
/* Save whether interrupts are currently disabled, then disable them. */
|
||||
#define raw_local_irq_save(flags) \
|
||||
do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0)
|
||||
|
||||
/* Prevent the given interrupt from being enabled next time we enable irqs. */
|
||||
#define raw_local_irq_mask(interrupt) \
|
||||
(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
|
||||
|
||||
/* Prevent the given interrupt from being enabled immediately. */
|
||||
#define raw_local_irq_mask_now(interrupt) do { \
|
||||
raw_local_irq_mask(interrupt); \
|
||||
interrupt_mask_set(interrupt); \
|
||||
} while (0)
|
||||
|
||||
/* Allow the given interrupt to be enabled next time we enable irqs. */
|
||||
#define raw_local_irq_unmask(interrupt) \
|
||||
(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
|
||||
|
||||
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
|
||||
#define raw_local_irq_unmask_now(interrupt) do { \
|
||||
raw_local_irq_unmask(interrupt); \
|
||||
if (!irqs_disabled()) \
|
||||
interrupt_mask_reset(interrupt); \
|
||||
} while (0)
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/* We provide a somewhat more restricted set for assembly. */
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
#if INT_MEM_ERROR != 0
|
||||
# error Fix IRQ_DISABLED() macro
|
||||
#endif
|
||||
|
||||
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
|
||||
#define IRQS_DISABLED(tmp) \
|
||||
mfspr tmp, INTERRUPT_MASK_1; \
|
||||
andi tmp, tmp, 1
|
||||
|
||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
||||
moveli reg, hw2_last(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw1(interrupts_enabled_mask); \
|
||||
shl16insli reg, reg, hw0(interrupts_enabled_mask); \
|
||||
add reg, reg, tp
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
||||
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
|
||||
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
|
||||
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
|
||||
mtspr INTERRUPT_MASK_SET_1, tmp0
|
||||
|
||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
||||
#define IRQ_DISABLE_ALL(tmp) \
|
||||
movei tmp, -1; \
|
||||
mtspr INTERRUPT_MASK_SET_1, tmp
|
||||
|
||||
/* Enable interrupts. */
|
||||
#define IRQ_ENABLE(tmp0, tmp1) \
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
||||
ld tmp0, tmp0; \
|
||||
mtspr INTERRUPT_MASK_RESET_1, tmp0
|
||||
|
||||
#else /* !__tilegx__ */
|
||||
|
||||
/*
|
||||
* Return 0 or 1 to indicate whether interrupts are currently disabled.
|
||||
* Note that it's important that we use a bit from the "low" mask word,
|
||||
* since when we are enabling, that is the word we write first, so if we
|
||||
* are interrupted after only writing half of the mask, the interrupt
|
||||
* handler will correctly observe that we have interrupts enabled, and
|
||||
* will enable interrupts itself on return from the interrupt handler
|
||||
* (making the original code's write of the "high" mask word idempotent).
|
||||
*/
|
||||
#define IRQS_DISABLED(tmp) \
|
||||
mfspr tmp, INTERRUPT_MASK_1_0; \
|
||||
shri tmp, tmp, INT_MEM_ERROR; \
|
||||
andi tmp, tmp, 1
|
||||
|
||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
||||
moveli reg, lo16(interrupts_enabled_mask); \
|
||||
auli reg, reg, ha16(interrupts_enabled_mask);\
|
||||
add reg, reg, tp
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
||||
{ \
|
||||
movei tmp0, -1; \
|
||||
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
|
||||
}; \
|
||||
{ \
|
||||
mtspr INTERRUPT_MASK_SET_1_0, tmp0; \
|
||||
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
|
||||
}; \
|
||||
mtspr INTERRUPT_MASK_SET_1_1, tmp1
|
||||
|
||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
||||
#define IRQ_DISABLE_ALL(tmp) \
|
||||
movei tmp, -1; \
|
||||
mtspr INTERRUPT_MASK_SET_1_0, tmp; \
|
||||
mtspr INTERRUPT_MASK_SET_1_1, tmp
|
||||
|
||||
/* Enable interrupts. */
|
||||
#define IRQ_ENABLE(tmp0, tmp1) \
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
||||
{ \
|
||||
lw tmp0, tmp0; \
|
||||
addi tmp1, tmp0, 4 \
|
||||
}; \
|
||||
lw tmp1, tmp1; \
|
||||
mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \
|
||||
mtspr INTERRUPT_MASK_RESET_1_1, tmp1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do the CPU's IRQ-state tracing from assembly code. We call a
|
||||
* C function, but almost everywhere we do, we don't mind clobbering
|
||||
* all the caller-saved registers.
|
||||
*/
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
# define TRACE_IRQS_ON jal trace_hardirqs_on
|
||||
# define TRACE_IRQS_OFF jal trace_hardirqs_off
|
||||
#else
|
||||
# define TRACE_IRQS_ON
|
||||
# define TRACE_IRQS_OFF
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_IRQFLAGS_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/kdebug.h>
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* based on kexec.h from other architectures in linux-2.6.18
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KEXEC_H
|
||||
#define _ASM_TILE_KEXEC_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
/* Maximum physical address we can use pages from. */
|
||||
#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
|
||||
/* Maximum address we can reach in physical address mode. */
|
||||
#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
|
||||
/* Maximum address we can use for the control code buffer. */
|
||||
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
|
||||
|
||||
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* We don't bother to provide a unique identifier, since we can only
|
||||
* reboot with a single type of kernel image anyway.
|
||||
*/
|
||||
#define KEXEC_ARCH KEXEC_ARCH_DEFAULT
|
||||
|
||||
/* Use the tile override for the page allocator. */
|
||||
struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
|
||||
#define kimage_alloc_pages_arch kimage_alloc_pages_arch
|
||||
|
||||
#define MAX_NOTE_BYTES 1024
|
||||
|
||||
/* Defined in arch/tile/kernel/relocate_kernel.S */
|
||||
extern const unsigned char relocate_new_kernel[];
|
||||
extern const unsigned long relocate_new_kernel_size;
|
||||
extern void relocate_new_kernel_end(void);
|
||||
|
||||
/* Provide a dummy definition to avoid build failures. */
|
||||
static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_KEXEC_H */
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KMAP_TYPES_H
|
||||
#define _ASM_TILE_KMAP_TYPES_H
|
||||
|
||||
/*
|
||||
* In TILE Linux each set of four of these uses another 16MB chunk of
|
||||
* address space, given 64 tiles and 64KB pages, so we only enable
|
||||
* ones that are required by the kernel configuration.
|
||||
*/
|
||||
enum km_type {
|
||||
KM_BOUNCE_READ,
|
||||
KM_SKB_SUNRPC_DATA,
|
||||
KM_SKB_DATA_SOFTIRQ,
|
||||
KM_USER0,
|
||||
KM_USER1,
|
||||
KM_BIO_SRC_IRQ,
|
||||
KM_IRQ0,
|
||||
KM_IRQ1,
|
||||
KM_SOFTIRQ0,
|
||||
KM_SOFTIRQ1,
|
||||
KM_MEMCPY0,
|
||||
KM_MEMCPY1,
|
||||
#if defined(CONFIG_HIGHPTE)
|
||||
KM_PTE0,
|
||||
KM_PTE1,
|
||||
#endif
|
||||
KM_TYPE_NR
|
||||
};
|
||||
|
||||
#endif /* _ASM_TILE_KMAP_TYPES_H */
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_LINKAGE_H
|
||||
#define _ASM_TILE_LINKAGE_H
|
||||
|
||||
#include <feedback.h>
|
||||
|
||||
#define __ALIGN .align 8
|
||||
|
||||
/*
|
||||
* The STD_ENTRY and STD_ENDPROC macros put the function in a
|
||||
* self-named .text.foo section, and if linker feedback collection
|
||||
* is enabled, add a suitable call to the feedback collection code.
|
||||
* STD_ENTRY_SECTION lets you specify a non-standard section name.
|
||||
*/
|
||||
|
||||
#define STD_ENTRY(name) \
|
||||
.pushsection .text.##name, "ax"; \
|
||||
ENTRY(name); \
|
||||
FEEDBACK_ENTER(name)
|
||||
|
||||
#define STD_ENTRY_SECTION(name, section) \
|
||||
.pushsection section, "ax"; \
|
||||
ENTRY(name); \
|
||||
FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name)
|
||||
|
||||
#define STD_ENDPROC(name) \
|
||||
ENDPROC(name); \
|
||||
.Lend_##name:; \
|
||||
.popsection
|
||||
|
||||
/* Create a file-static function entry set up for feedback gathering. */
|
||||
#define STD_ENTRY_LOCAL(name) \
|
||||
.pushsection .text.##name, "ax"; \
|
||||
ALIGN; \
|
||||
name:; \
|
||||
FEEDBACK_ENTER(name)
|
||||
|
||||
#endif /* _ASM_TILE_LINKAGE_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/local.h>
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* The hypervisor's memory controller profiling infrastructure allows
|
||||
* the programmer to find out what fraction of the available memory
|
||||
* bandwidth is being consumed at each memory controller. The
|
||||
* profiler provides start, stop, and clear operations to allows
|
||||
* profiling over a specific time window, as well as an interface for
|
||||
* reading the most recent profile values.
|
||||
*
|
||||
* This header declares IOCTL codes necessary to control memprof.
|
||||
*/
|
||||
#ifndef _ASM_TILE_MEMPROF_H
|
||||
#define _ASM_TILE_MEMPROF_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define MEMPROF_IOCTL_TYPE 0xB4
|
||||
#define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0)
|
||||
#define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1)
|
||||
#define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2)
|
||||
|
||||
#endif /* _ASM_TILE_MEMPROF_H */
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMAN_H
|
||||
#define _ASM_TILE_MMAN_H
|
||||
|
||||
#include <asm-generic/mman-common.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* Standard Linux flags */
|
||||
|
||||
#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
|
||||
#define MAP_NONBLOCK 0x0080 /* do not block on IO */
|
||||
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
|
||||
#define MAP_LOCKED 0x0200 /* pages are locked */
|
||||
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
|
||||
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
|
||||
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
|
||||
#define MAP_HUGETLB 0x4000 /* create a huge page mapping */
|
||||
|
||||
|
||||
/*
|
||||
* Flags for mlockall
|
||||
*/
|
||||
#define MCL_CURRENT 1 /* lock all current mappings */
|
||||
#define MCL_FUTURE 2 /* lock all future mappings */
|
||||
|
||||
|
||||
#endif /* _ASM_TILE_MMAN_H */
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMU_H
|
||||
#define _ASM_TILE_MMU_H
|
||||
|
||||
/* Capture any arch- and mm-specific information. */
|
||||
struct mm_context {
|
||||
/*
|
||||
* Written under the mmap_sem semaphore; read without the
|
||||
* semaphore but atomically, but it is conservatively set.
|
||||
*/
|
||||
unsigned int priority_cached;
|
||||
};
|
||||
|
||||
typedef struct mm_context mm_context_t;
|
||||
|
||||
void leave_mm(int cpu);
|
||||
|
||||
#endif /* _ASM_TILE_MMU_H */
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMU_CONTEXT_H
|
||||
#define _ASM_TILE_MMU_CONTEXT_H
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
static inline int
|
||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
|
||||
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
|
||||
{
|
||||
/* FIXME: DIRECTIO should not always be set. FIXME. */
|
||||
int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
|
||||
if (rc < 0)
|
||||
panic("hv_install_context failed: %d", rc);
|
||||
}
|
||||
|
||||
static inline void install_page_table(pgd_t *pgdir, int asid)
|
||||
{
|
||||
pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir);
|
||||
__install_page_table(pgdir, asid, *ptep);
|
||||
}
|
||||
|
||||
/*
|
||||
* "Lazy" TLB mode is entered when we are switching to a kernel task,
|
||||
* which borrows the mm of the previous task. The goal of this
|
||||
* optimization is to avoid having to install a new page table. On
|
||||
* early x86 machines (where the concept originated) you couldn't do
|
||||
* anything short of a full page table install for invalidation, so
|
||||
* handling a remote TLB invalidate required doing a page table
|
||||
* re-install. Someone clearly decided that it was silly to keep
|
||||
* doing this while in "lazy" TLB mode, so the optimization involves
|
||||
* installing the swapper page table instead the first time one
|
||||
* occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
|
||||
* the kernel task doesn't need to take any more interrupts. At that
|
||||
* point it's then necessary to explicitly reinstall it when context
|
||||
* switching back to the original mm.
|
||||
*
|
||||
* On Tile, we have to do a page-table install whenever DMA is enabled,
|
||||
* so in that case lazy mode doesn't help anyway. And more generally,
|
||||
* we have efficient per-page TLB shootdown, and don't expect to spend
|
||||
* that much time in kernel tasks in general, so just leaving the
|
||||
* kernel task borrowing the old page table, but handling TLB
|
||||
* shootdowns, is a reasonable thing to do. And importantly, this
|
||||
* lets us use the hypervisor's internal APIs for TLB shootdown, which
|
||||
* means we don't have to worry about having TLB shootdowns blocked
|
||||
* when Linux is disabling interrupts; see the page migration code for
|
||||
* an example of where it's important for TLB shootdowns to complete
|
||||
* even when interrupts are disabled at the Linux level.
|
||||
*/
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
|
||||
{
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/*
|
||||
* We have to do an "identity" page table switch in order to
|
||||
* clear any pending DMA interrupts.
|
||||
*/
|
||||
if (current->thread.tile_dma_state.enabled)
|
||||
install_page_table(mm->pgd, __get_cpu_var(current_asid));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (likely(prev != next)) {
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* Pick new ASID. */
|
||||
int asid = __get_cpu_var(current_asid) + 1;
|
||||
if (asid > max_asid) {
|
||||
asid = min_asid;
|
||||
local_flush_tlb();
|
||||
}
|
||||
__get_cpu_var(current_asid) = asid;
|
||||
|
||||
/* Clear cpu from the old mm, and set it in the new one. */
|
||||
cpumask_clear_cpu(cpu, &prev->cpu_vm_mask);
|
||||
cpumask_set_cpu(cpu, &next->cpu_vm_mask);
|
||||
|
||||
/* Re-load page tables */
|
||||
install_page_table(next->pgd, asid);
|
||||
|
||||
/* See how we should set the red/black cache info */
|
||||
check_mm_caching(prev, next);
|
||||
|
||||
/*
|
||||
* Since we're changing to a new mm, we have to flush
|
||||
* the icache in case some physical page now being mapped
|
||||
* has subsequently been repurposed and has new code.
|
||||
*/
|
||||
__flush_icache();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static inline void activate_mm(struct mm_struct *prev_mm,
|
||||
struct mm_struct *next_mm)
|
||||
{
|
||||
switch_mm(prev_mm, next_mm, NULL);
|
||||
}
|
||||
|
||||
#define destroy_context(mm) do { } while (0)
|
||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
||||
|
||||
#endif /* _ASM_TILE_MMU_CONTEXT_H */
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_MMZONE_H
|
||||
#define _ASM_TILE_MMZONE_H
|
||||
|
||||
extern struct pglist_data node_data[];
|
||||
#define NODE_DATA(nid) (&node_data[nid])
|
||||
|
||||
extern void get_memcfg_numa(void);
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* Generally, memory ranges are always doled out by the hypervisor in
|
||||
* fixed-size, power-of-two increments. That would make computing the node
|
||||
* very easy. We could just take a couple high bits of the PA, which
|
||||
* denote the memory shim, and we'd be done. However, when we're doing
|
||||
* memory striping, this may not be true; PAs with different high bit
|
||||
* values might be in the same node. Thus, we keep a lookup table to
|
||||
* translate the high bits of the PFN to the node number.
|
||||
*/
|
||||
extern int highbits_to_node[];
|
||||
|
||||
static inline int pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
return highbits_to_node[__pfn_to_highbits(pfn)];
|
||||
}
|
||||
|
||||
/*
|
||||
* Following are macros that each numa implmentation must define.
|
||||
*/
|
||||
|
||||
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
|
||||
#define node_end_pfn(nid) \
|
||||
({ \
|
||||
pg_data_t *__pgdat = NODE_DATA(nid); \
|
||||
__pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
|
||||
})
|
||||
|
||||
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
|
||||
|
||||
static inline int pfn_valid(int pfn)
|
||||
{
|
||||
int nid = pfn_to_nid(pfn);
|
||||
|
||||
if (nid >= 0)
|
||||
return (pfn < node_end_pfn(nid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Information on the NUMA nodes that we compute early */
|
||||
extern unsigned long node_start_pfn[];
|
||||
extern unsigned long node_end_pfn[];
|
||||
extern unsigned long node_memmap_pfn[];
|
||||
extern unsigned long node_percpu_pfn[];
|
||||
extern unsigned long node_free_pfn[];
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
extern unsigned long node_lowmem_end_pfn[];
|
||||
#endif
|
||||
#ifdef CONFIG_PCI
|
||||
extern unsigned long pci_reserve_start_pfn;
|
||||
extern unsigned long pci_reserve_end_pfn;
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#endif /* _ASM_TILE_MMZONE_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/module.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/msgbuf.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/mutex-dec.h>
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_OPCODE_TILE_H
|
||||
#define _ASM_TILE_OPCODE_TILE_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#if CHIP_WORD_SIZE() == 64
|
||||
#include <asm/opcode-tile_64.h>
|
||||
#else
|
||||
#include <asm/opcode-tile_32.h>
|
||||
#endif
|
||||
|
||||
/* These definitions are not correct for TILE64, so just avoid them. */
|
||||
#undef TILE_ELF_MACHINE_CODE
|
||||
#undef TILE_ELF_NAME
|
||||
|
||||
#endif /* _ASM_TILE_OPCODE_TILE_H */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_OPCODE_CONSTANTS_H
|
||||
#define _ASM_TILE_OPCODE_CONSTANTS_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#if CHIP_WORD_SIZE() == 64
|
||||
#include <asm/opcode_constants_64.h>
|
||||
#else
|
||||
#include <asm/opcode_constants_32.h>
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_OPCODE_CONSTANTS_H */
|
|
@ -0,0 +1,480 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* This file is machine-generated; DO NOT EDIT! */
|
||||
|
||||
|
||||
#ifndef _TILE_OPCODE_CONSTANTS_H
|
||||
#define _TILE_OPCODE_CONSTANTS_H
|
||||
enum
|
||||
{
|
||||
ADDBS_U_SPECIAL_0_OPCODE_X0 = 98,
|
||||
ADDBS_U_SPECIAL_0_OPCODE_X1 = 68,
|
||||
ADDB_SPECIAL_0_OPCODE_X0 = 1,
|
||||
ADDB_SPECIAL_0_OPCODE_X1 = 1,
|
||||
ADDHS_SPECIAL_0_OPCODE_X0 = 99,
|
||||
ADDHS_SPECIAL_0_OPCODE_X1 = 69,
|
||||
ADDH_SPECIAL_0_OPCODE_X0 = 2,
|
||||
ADDH_SPECIAL_0_OPCODE_X1 = 2,
|
||||
ADDIB_IMM_0_OPCODE_X0 = 1,
|
||||
ADDIB_IMM_0_OPCODE_X1 = 1,
|
||||
ADDIH_IMM_0_OPCODE_X0 = 2,
|
||||
ADDIH_IMM_0_OPCODE_X1 = 2,
|
||||
ADDI_IMM_0_OPCODE_X0 = 3,
|
||||
ADDI_IMM_0_OPCODE_X1 = 3,
|
||||
ADDI_IMM_1_OPCODE_SN = 1,
|
||||
ADDI_OPCODE_Y0 = 9,
|
||||
ADDI_OPCODE_Y1 = 7,
|
||||
ADDLIS_OPCODE_X0 = 1,
|
||||
ADDLIS_OPCODE_X1 = 2,
|
||||
ADDLI_OPCODE_X0 = 2,
|
||||
ADDLI_OPCODE_X1 = 3,
|
||||
ADDS_SPECIAL_0_OPCODE_X0 = 96,
|
||||
ADDS_SPECIAL_0_OPCODE_X1 = 66,
|
||||
ADD_SPECIAL_0_OPCODE_X0 = 3,
|
||||
ADD_SPECIAL_0_OPCODE_X1 = 3,
|
||||
ADD_SPECIAL_0_OPCODE_Y0 = 0,
|
||||
ADD_SPECIAL_0_OPCODE_Y1 = 0,
|
||||
ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4,
|
||||
ADIFFH_SPECIAL_0_OPCODE_X0 = 5,
|
||||
ANDI_IMM_0_OPCODE_X0 = 1,
|
||||
ANDI_IMM_0_OPCODE_X1 = 4,
|
||||
ANDI_OPCODE_Y0 = 10,
|
||||
ANDI_OPCODE_Y1 = 8,
|
||||
AND_SPECIAL_0_OPCODE_X0 = 6,
|
||||
AND_SPECIAL_0_OPCODE_X1 = 4,
|
||||
AND_SPECIAL_2_OPCODE_Y0 = 0,
|
||||
AND_SPECIAL_2_OPCODE_Y1 = 0,
|
||||
AULI_OPCODE_X0 = 3,
|
||||
AULI_OPCODE_X1 = 4,
|
||||
AVGB_U_SPECIAL_0_OPCODE_X0 = 7,
|
||||
AVGH_SPECIAL_0_OPCODE_X0 = 8,
|
||||
BBNST_BRANCH_OPCODE_X1 = 15,
|
||||
BBNS_BRANCH_OPCODE_X1 = 14,
|
||||
BBNS_OPCODE_SN = 63,
|
||||
BBST_BRANCH_OPCODE_X1 = 13,
|
||||
BBS_BRANCH_OPCODE_X1 = 12,
|
||||
BBS_OPCODE_SN = 62,
|
||||
BGEZT_BRANCH_OPCODE_X1 = 7,
|
||||
BGEZ_BRANCH_OPCODE_X1 = 6,
|
||||
BGEZ_OPCODE_SN = 61,
|
||||
BGZT_BRANCH_OPCODE_X1 = 5,
|
||||
BGZ_BRANCH_OPCODE_X1 = 4,
|
||||
BGZ_OPCODE_SN = 58,
|
||||
BITX_UN_0_SHUN_0_OPCODE_X0 = 1,
|
||||
BITX_UN_0_SHUN_0_OPCODE_Y0 = 1,
|
||||
BLEZT_BRANCH_OPCODE_X1 = 11,
|
||||
BLEZ_BRANCH_OPCODE_X1 = 10,
|
||||
BLEZ_OPCODE_SN = 59,
|
||||
BLZT_BRANCH_OPCODE_X1 = 9,
|
||||
BLZ_BRANCH_OPCODE_X1 = 8,
|
||||
BLZ_OPCODE_SN = 60,
|
||||
BNZT_BRANCH_OPCODE_X1 = 3,
|
||||
BNZ_BRANCH_OPCODE_X1 = 2,
|
||||
BNZ_OPCODE_SN = 57,
|
||||
BPT_NOREG_RR_IMM_0_OPCODE_SN = 1,
|
||||
BRANCH_OPCODE_X1 = 5,
|
||||
BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2,
|
||||
BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2,
|
||||
BZT_BRANCH_OPCODE_X1 = 1,
|
||||
BZ_BRANCH_OPCODE_X1 = 0,
|
||||
BZ_OPCODE_SN = 56,
|
||||
CLZ_UN_0_SHUN_0_OPCODE_X0 = 3,
|
||||
CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3,
|
||||
CRC32_32_SPECIAL_0_OPCODE_X0 = 9,
|
||||
CRC32_8_SPECIAL_0_OPCODE_X0 = 10,
|
||||
CTZ_UN_0_SHUN_0_OPCODE_X0 = 4,
|
||||
CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4,
|
||||
DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1,
|
||||
DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2,
|
||||
DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95,
|
||||
FINV_UN_0_SHUN_0_OPCODE_X1 = 3,
|
||||
FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4,
|
||||
FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_X0 = 5,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_X1 = 5,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1,
|
||||
HALT_NOREG_RR_IMM_0_OPCODE_SN = 0,
|
||||
ICOH_UN_0_SHUN_0_OPCODE_X1 = 6,
|
||||
ILL_UN_0_SHUN_0_OPCODE_X1 = 7,
|
||||
ILL_UN_0_SHUN_0_OPCODE_Y1 = 2,
|
||||
IMM_0_OPCODE_SN = 0,
|
||||
IMM_0_OPCODE_X0 = 4,
|
||||
IMM_0_OPCODE_X1 = 6,
|
||||
IMM_1_OPCODE_SN = 1,
|
||||
IMM_OPCODE_0_X0 = 5,
|
||||
INTHB_SPECIAL_0_OPCODE_X0 = 11,
|
||||
INTHB_SPECIAL_0_OPCODE_X1 = 5,
|
||||
INTHH_SPECIAL_0_OPCODE_X0 = 12,
|
||||
INTHH_SPECIAL_0_OPCODE_X1 = 6,
|
||||
INTLB_SPECIAL_0_OPCODE_X0 = 13,
|
||||
INTLB_SPECIAL_0_OPCODE_X1 = 7,
|
||||
INTLH_SPECIAL_0_OPCODE_X0 = 14,
|
||||
INTLH_SPECIAL_0_OPCODE_X1 = 8,
|
||||
INV_UN_0_SHUN_0_OPCODE_X1 = 8,
|
||||
IRET_UN_0_SHUN_0_OPCODE_X1 = 9,
|
||||
JALB_OPCODE_X1 = 13,
|
||||
JALF_OPCODE_X1 = 12,
|
||||
JALRP_SPECIAL_0_OPCODE_X1 = 9,
|
||||
JALRR_IMM_1_OPCODE_SN = 3,
|
||||
JALR_RR_IMM_0_OPCODE_SN = 5,
|
||||
JALR_SPECIAL_0_OPCODE_X1 = 10,
|
||||
JB_OPCODE_X1 = 11,
|
||||
JF_OPCODE_X1 = 10,
|
||||
JRP_SPECIAL_0_OPCODE_X1 = 11,
|
||||
JRR_IMM_1_OPCODE_SN = 2,
|
||||
JR_RR_IMM_0_OPCODE_SN = 4,
|
||||
JR_SPECIAL_0_OPCODE_X1 = 12,
|
||||
LBADD_IMM_0_OPCODE_X1 = 22,
|
||||
LBADD_U_IMM_0_OPCODE_X1 = 23,
|
||||
LB_OPCODE_Y2 = 0,
|
||||
LB_UN_0_SHUN_0_OPCODE_X1 = 10,
|
||||
LB_U_OPCODE_Y2 = 1,
|
||||
LB_U_UN_0_SHUN_0_OPCODE_X1 = 11,
|
||||
LHADD_IMM_0_OPCODE_X1 = 24,
|
||||
LHADD_U_IMM_0_OPCODE_X1 = 25,
|
||||
LH_OPCODE_Y2 = 2,
|
||||
LH_UN_0_SHUN_0_OPCODE_X1 = 12,
|
||||
LH_U_OPCODE_Y2 = 3,
|
||||
LH_U_UN_0_SHUN_0_OPCODE_X1 = 13,
|
||||
LNK_SPECIAL_0_OPCODE_X1 = 13,
|
||||
LWADD_IMM_0_OPCODE_X1 = 26,
|
||||
LWADD_NA_IMM_0_OPCODE_X1 = 27,
|
||||
LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24,
|
||||
LW_OPCODE_Y2 = 4,
|
||||
LW_UN_0_SHUN_0_OPCODE_X1 = 14,
|
||||
MAXB_U_SPECIAL_0_OPCODE_X0 = 15,
|
||||
MAXB_U_SPECIAL_0_OPCODE_X1 = 14,
|
||||
MAXH_SPECIAL_0_OPCODE_X0 = 16,
|
||||
MAXH_SPECIAL_0_OPCODE_X1 = 15,
|
||||
MAXIB_U_IMM_0_OPCODE_X0 = 4,
|
||||
MAXIB_U_IMM_0_OPCODE_X1 = 5,
|
||||
MAXIH_IMM_0_OPCODE_X0 = 5,
|
||||
MAXIH_IMM_0_OPCODE_X1 = 6,
|
||||
MFSPR_IMM_0_OPCODE_X1 = 7,
|
||||
MF_UN_0_SHUN_0_OPCODE_X1 = 15,
|
||||
MINB_U_SPECIAL_0_OPCODE_X0 = 17,
|
||||
MINB_U_SPECIAL_0_OPCODE_X1 = 16,
|
||||
MINH_SPECIAL_0_OPCODE_X0 = 18,
|
||||
MINH_SPECIAL_0_OPCODE_X1 = 17,
|
||||
MINIB_U_IMM_0_OPCODE_X0 = 6,
|
||||
MINIB_U_IMM_0_OPCODE_X1 = 8,
|
||||
MINIH_IMM_0_OPCODE_X0 = 7,
|
||||
MINIH_IMM_0_OPCODE_X1 = 9,
|
||||
MM_OPCODE_X0 = 6,
|
||||
MM_OPCODE_X1 = 7,
|
||||
MNZB_SPECIAL_0_OPCODE_X0 = 19,
|
||||
MNZB_SPECIAL_0_OPCODE_X1 = 18,
|
||||
MNZH_SPECIAL_0_OPCODE_X0 = 20,
|
||||
MNZH_SPECIAL_0_OPCODE_X1 = 19,
|
||||
MNZ_SPECIAL_0_OPCODE_X0 = 21,
|
||||
MNZ_SPECIAL_0_OPCODE_X1 = 20,
|
||||
MNZ_SPECIAL_1_OPCODE_Y0 = 0,
|
||||
MNZ_SPECIAL_1_OPCODE_Y1 = 1,
|
||||
MOVEI_IMM_1_OPCODE_SN = 0,
|
||||
MOVE_RR_IMM_0_OPCODE_SN = 8,
|
||||
MTSPR_IMM_0_OPCODE_X1 = 10,
|
||||
MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22,
|
||||
MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0,
|
||||
MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23,
|
||||
MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24,
|
||||
MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1,
|
||||
MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25,
|
||||
MULHH_SS_SPECIAL_0_OPCODE_X0 = 26,
|
||||
MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0,
|
||||
MULHH_SU_SPECIAL_0_OPCODE_X0 = 27,
|
||||
MULHH_UU_SPECIAL_0_OPCODE_X0 = 28,
|
||||
MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1,
|
||||
MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29,
|
||||
MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30,
|
||||
MULHLA_US_SPECIAL_0_OPCODE_X0 = 31,
|
||||
MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32,
|
||||
MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33,
|
||||
MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0,
|
||||
MULHL_SS_SPECIAL_0_OPCODE_X0 = 34,
|
||||
MULHL_SU_SPECIAL_0_OPCODE_X0 = 35,
|
||||
MULHL_US_SPECIAL_0_OPCODE_X0 = 36,
|
||||
MULHL_UU_SPECIAL_0_OPCODE_X0 = 37,
|
||||
MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38,
|
||||
MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2,
|
||||
MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39,
|
||||
MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40,
|
||||
MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3,
|
||||
MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41,
|
||||
MULLL_SS_SPECIAL_0_OPCODE_X0 = 42,
|
||||
MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2,
|
||||
MULLL_SU_SPECIAL_0_OPCODE_X0 = 43,
|
||||
MULLL_UU_SPECIAL_0_OPCODE_X0 = 44,
|
||||
MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3,
|
||||
MVNZ_SPECIAL_0_OPCODE_X0 = 45,
|
||||
MVNZ_SPECIAL_1_OPCODE_Y0 = 1,
|
||||
MVZ_SPECIAL_0_OPCODE_X0 = 46,
|
||||
MVZ_SPECIAL_1_OPCODE_Y0 = 2,
|
||||
MZB_SPECIAL_0_OPCODE_X0 = 47,
|
||||
MZB_SPECIAL_0_OPCODE_X1 = 21,
|
||||
MZH_SPECIAL_0_OPCODE_X0 = 48,
|
||||
MZH_SPECIAL_0_OPCODE_X1 = 22,
|
||||
MZ_SPECIAL_0_OPCODE_X0 = 49,
|
||||
MZ_SPECIAL_0_OPCODE_X1 = 23,
|
||||
MZ_SPECIAL_1_OPCODE_Y0 = 3,
|
||||
MZ_SPECIAL_1_OPCODE_Y1 = 2,
|
||||
NAP_UN_0_SHUN_0_OPCODE_X1 = 16,
|
||||
NOP_NOREG_RR_IMM_0_OPCODE_SN = 2,
|
||||
NOP_UN_0_SHUN_0_OPCODE_X0 = 6,
|
||||
NOP_UN_0_SHUN_0_OPCODE_X1 = 17,
|
||||
NOP_UN_0_SHUN_0_OPCODE_Y0 = 6,
|
||||
NOP_UN_0_SHUN_0_OPCODE_Y1 = 3,
|
||||
NOREG_RR_IMM_0_OPCODE_SN = 0,
|
||||
NOR_SPECIAL_0_OPCODE_X0 = 50,
|
||||
NOR_SPECIAL_0_OPCODE_X1 = 24,
|
||||
NOR_SPECIAL_2_OPCODE_Y0 = 1,
|
||||
NOR_SPECIAL_2_OPCODE_Y1 = 1,
|
||||
ORI_IMM_0_OPCODE_X0 = 8,
|
||||
ORI_IMM_0_OPCODE_X1 = 11,
|
||||
ORI_OPCODE_Y0 = 11,
|
||||
ORI_OPCODE_Y1 = 9,
|
||||
OR_SPECIAL_0_OPCODE_X0 = 51,
|
||||
OR_SPECIAL_0_OPCODE_X1 = 25,
|
||||
OR_SPECIAL_2_OPCODE_Y0 = 2,
|
||||
OR_SPECIAL_2_OPCODE_Y1 = 2,
|
||||
PACKBS_U_SPECIAL_0_OPCODE_X0 = 103,
|
||||
PACKBS_U_SPECIAL_0_OPCODE_X1 = 73,
|
||||
PACKHB_SPECIAL_0_OPCODE_X0 = 52,
|
||||
PACKHB_SPECIAL_0_OPCODE_X1 = 26,
|
||||
PACKHS_SPECIAL_0_OPCODE_X0 = 102,
|
||||
PACKHS_SPECIAL_0_OPCODE_X1 = 72,
|
||||
PACKLB_SPECIAL_0_OPCODE_X0 = 53,
|
||||
PACKLB_SPECIAL_0_OPCODE_X1 = 27,
|
||||
PCNT_UN_0_SHUN_0_OPCODE_X0 = 7,
|
||||
PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7,
|
||||
RLI_SHUN_0_OPCODE_X0 = 1,
|
||||
RLI_SHUN_0_OPCODE_X1 = 1,
|
||||
RLI_SHUN_0_OPCODE_Y0 = 1,
|
||||
RLI_SHUN_0_OPCODE_Y1 = 1,
|
||||
RL_SPECIAL_0_OPCODE_X0 = 54,
|
||||
RL_SPECIAL_0_OPCODE_X1 = 28,
|
||||
RL_SPECIAL_3_OPCODE_Y0 = 0,
|
||||
RL_SPECIAL_3_OPCODE_Y1 = 0,
|
||||
RR_IMM_0_OPCODE_SN = 0,
|
||||
S1A_SPECIAL_0_OPCODE_X0 = 55,
|
||||
S1A_SPECIAL_0_OPCODE_X1 = 29,
|
||||
S1A_SPECIAL_0_OPCODE_Y0 = 1,
|
||||
S1A_SPECIAL_0_OPCODE_Y1 = 1,
|
||||
S2A_SPECIAL_0_OPCODE_X0 = 56,
|
||||
S2A_SPECIAL_0_OPCODE_X1 = 30,
|
||||
S2A_SPECIAL_0_OPCODE_Y0 = 2,
|
||||
S2A_SPECIAL_0_OPCODE_Y1 = 2,
|
||||
S3A_SPECIAL_0_OPCODE_X0 = 57,
|
||||
S3A_SPECIAL_0_OPCODE_X1 = 31,
|
||||
S3A_SPECIAL_5_OPCODE_Y0 = 1,
|
||||
S3A_SPECIAL_5_OPCODE_Y1 = 1,
|
||||
SADAB_U_SPECIAL_0_OPCODE_X0 = 58,
|
||||
SADAH_SPECIAL_0_OPCODE_X0 = 59,
|
||||
SADAH_U_SPECIAL_0_OPCODE_X0 = 60,
|
||||
SADB_U_SPECIAL_0_OPCODE_X0 = 61,
|
||||
SADH_SPECIAL_0_OPCODE_X0 = 62,
|
||||
SADH_U_SPECIAL_0_OPCODE_X0 = 63,
|
||||
SBADD_IMM_0_OPCODE_X1 = 28,
|
||||
SB_OPCODE_Y2 = 5,
|
||||
SB_SPECIAL_0_OPCODE_X1 = 32,
|
||||
SEQB_SPECIAL_0_OPCODE_X0 = 64,
|
||||
SEQB_SPECIAL_0_OPCODE_X1 = 33,
|
||||
SEQH_SPECIAL_0_OPCODE_X0 = 65,
|
||||
SEQH_SPECIAL_0_OPCODE_X1 = 34,
|
||||
SEQIB_IMM_0_OPCODE_X0 = 9,
|
||||
SEQIB_IMM_0_OPCODE_X1 = 12,
|
||||
SEQIH_IMM_0_OPCODE_X0 = 10,
|
||||
SEQIH_IMM_0_OPCODE_X1 = 13,
|
||||
SEQI_IMM_0_OPCODE_X0 = 11,
|
||||
SEQI_IMM_0_OPCODE_X1 = 14,
|
||||
SEQI_OPCODE_Y0 = 12,
|
||||
SEQI_OPCODE_Y1 = 10,
|
||||
SEQ_SPECIAL_0_OPCODE_X0 = 66,
|
||||
SEQ_SPECIAL_0_OPCODE_X1 = 35,
|
||||
SEQ_SPECIAL_5_OPCODE_Y0 = 2,
|
||||
SEQ_SPECIAL_5_OPCODE_Y1 = 2,
|
||||
SHADD_IMM_0_OPCODE_X1 = 29,
|
||||
SHL8II_IMM_0_OPCODE_SN = 3,
|
||||
SHLB_SPECIAL_0_OPCODE_X0 = 67,
|
||||
SHLB_SPECIAL_0_OPCODE_X1 = 36,
|
||||
SHLH_SPECIAL_0_OPCODE_X0 = 68,
|
||||
SHLH_SPECIAL_0_OPCODE_X1 = 37,
|
||||
SHLIB_SHUN_0_OPCODE_X0 = 2,
|
||||
SHLIB_SHUN_0_OPCODE_X1 = 2,
|
||||
SHLIH_SHUN_0_OPCODE_X0 = 3,
|
||||
SHLIH_SHUN_0_OPCODE_X1 = 3,
|
||||
SHLI_SHUN_0_OPCODE_X0 = 4,
|
||||
SHLI_SHUN_0_OPCODE_X1 = 4,
|
||||
SHLI_SHUN_0_OPCODE_Y0 = 2,
|
||||
SHLI_SHUN_0_OPCODE_Y1 = 2,
|
||||
SHL_SPECIAL_0_OPCODE_X0 = 69,
|
||||
SHL_SPECIAL_0_OPCODE_X1 = 38,
|
||||
SHL_SPECIAL_3_OPCODE_Y0 = 1,
|
||||
SHL_SPECIAL_3_OPCODE_Y1 = 1,
|
||||
SHR1_RR_IMM_0_OPCODE_SN = 9,
|
||||
SHRB_SPECIAL_0_OPCODE_X0 = 70,
|
||||
SHRB_SPECIAL_0_OPCODE_X1 = 39,
|
||||
SHRH_SPECIAL_0_OPCODE_X0 = 71,
|
||||
SHRH_SPECIAL_0_OPCODE_X1 = 40,
|
||||
SHRIB_SHUN_0_OPCODE_X0 = 5,
|
||||
SHRIB_SHUN_0_OPCODE_X1 = 5,
|
||||
SHRIH_SHUN_0_OPCODE_X0 = 6,
|
||||
SHRIH_SHUN_0_OPCODE_X1 = 6,
|
||||
SHRI_SHUN_0_OPCODE_X0 = 7,
|
||||
SHRI_SHUN_0_OPCODE_X1 = 7,
|
||||
SHRI_SHUN_0_OPCODE_Y0 = 3,
|
||||
SHRI_SHUN_0_OPCODE_Y1 = 3,
|
||||
SHR_SPECIAL_0_OPCODE_X0 = 72,
|
||||
SHR_SPECIAL_0_OPCODE_X1 = 41,
|
||||
SHR_SPECIAL_3_OPCODE_Y0 = 2,
|
||||
SHR_SPECIAL_3_OPCODE_Y1 = 2,
|
||||
SHUN_0_OPCODE_X0 = 7,
|
||||
SHUN_0_OPCODE_X1 = 8,
|
||||
SHUN_0_OPCODE_Y0 = 13,
|
||||
SHUN_0_OPCODE_Y1 = 11,
|
||||
SH_OPCODE_Y2 = 6,
|
||||
SH_SPECIAL_0_OPCODE_X1 = 42,
|
||||
SLTB_SPECIAL_0_OPCODE_X0 = 73,
|
||||
SLTB_SPECIAL_0_OPCODE_X1 = 43,
|
||||
SLTB_U_SPECIAL_0_OPCODE_X0 = 74,
|
||||
SLTB_U_SPECIAL_0_OPCODE_X1 = 44,
|
||||
SLTEB_SPECIAL_0_OPCODE_X0 = 75,
|
||||
SLTEB_SPECIAL_0_OPCODE_X1 = 45,
|
||||
SLTEB_U_SPECIAL_0_OPCODE_X0 = 76,
|
||||
SLTEB_U_SPECIAL_0_OPCODE_X1 = 46,
|
||||
SLTEH_SPECIAL_0_OPCODE_X0 = 77,
|
||||
SLTEH_SPECIAL_0_OPCODE_X1 = 47,
|
||||
SLTEH_U_SPECIAL_0_OPCODE_X0 = 78,
|
||||
SLTEH_U_SPECIAL_0_OPCODE_X1 = 48,
|
||||
SLTE_SPECIAL_0_OPCODE_X0 = 79,
|
||||
SLTE_SPECIAL_0_OPCODE_X1 = 49,
|
||||
SLTE_SPECIAL_4_OPCODE_Y0 = 0,
|
||||
SLTE_SPECIAL_4_OPCODE_Y1 = 0,
|
||||
SLTE_U_SPECIAL_0_OPCODE_X0 = 80,
|
||||
SLTE_U_SPECIAL_0_OPCODE_X1 = 50,
|
||||
SLTE_U_SPECIAL_4_OPCODE_Y0 = 1,
|
||||
SLTE_U_SPECIAL_4_OPCODE_Y1 = 1,
|
||||
SLTH_SPECIAL_0_OPCODE_X0 = 81,
|
||||
SLTH_SPECIAL_0_OPCODE_X1 = 51,
|
||||
SLTH_U_SPECIAL_0_OPCODE_X0 = 82,
|
||||
SLTH_U_SPECIAL_0_OPCODE_X1 = 52,
|
||||
SLTIB_IMM_0_OPCODE_X0 = 12,
|
||||
SLTIB_IMM_0_OPCODE_X1 = 15,
|
||||
SLTIB_U_IMM_0_OPCODE_X0 = 13,
|
||||
SLTIB_U_IMM_0_OPCODE_X1 = 16,
|
||||
SLTIH_IMM_0_OPCODE_X0 = 14,
|
||||
SLTIH_IMM_0_OPCODE_X1 = 17,
|
||||
SLTIH_U_IMM_0_OPCODE_X0 = 15,
|
||||
SLTIH_U_IMM_0_OPCODE_X1 = 18,
|
||||
SLTI_IMM_0_OPCODE_X0 = 16,
|
||||
SLTI_IMM_0_OPCODE_X1 = 19,
|
||||
SLTI_OPCODE_Y0 = 14,
|
||||
SLTI_OPCODE_Y1 = 12,
|
||||
SLTI_U_IMM_0_OPCODE_X0 = 17,
|
||||
SLTI_U_IMM_0_OPCODE_X1 = 20,
|
||||
SLTI_U_OPCODE_Y0 = 15,
|
||||
SLTI_U_OPCODE_Y1 = 13,
|
||||
SLT_SPECIAL_0_OPCODE_X0 = 83,
|
||||
SLT_SPECIAL_0_OPCODE_X1 = 53,
|
||||
SLT_SPECIAL_4_OPCODE_Y0 = 2,
|
||||
SLT_SPECIAL_4_OPCODE_Y1 = 2,
|
||||
SLT_U_SPECIAL_0_OPCODE_X0 = 84,
|
||||
SLT_U_SPECIAL_0_OPCODE_X1 = 54,
|
||||
SLT_U_SPECIAL_4_OPCODE_Y0 = 3,
|
||||
SLT_U_SPECIAL_4_OPCODE_Y1 = 3,
|
||||
SNEB_SPECIAL_0_OPCODE_X0 = 85,
|
||||
SNEB_SPECIAL_0_OPCODE_X1 = 55,
|
||||
SNEH_SPECIAL_0_OPCODE_X0 = 86,
|
||||
SNEH_SPECIAL_0_OPCODE_X1 = 56,
|
||||
SNE_SPECIAL_0_OPCODE_X0 = 87,
|
||||
SNE_SPECIAL_0_OPCODE_X1 = 57,
|
||||
SNE_SPECIAL_5_OPCODE_Y0 = 3,
|
||||
SNE_SPECIAL_5_OPCODE_Y1 = 3,
|
||||
SPECIAL_0_OPCODE_X0 = 0,
|
||||
SPECIAL_0_OPCODE_X1 = 1,
|
||||
SPECIAL_0_OPCODE_Y0 = 1,
|
||||
SPECIAL_0_OPCODE_Y1 = 1,
|
||||
SPECIAL_1_OPCODE_Y0 = 2,
|
||||
SPECIAL_1_OPCODE_Y1 = 2,
|
||||
SPECIAL_2_OPCODE_Y0 = 3,
|
||||
SPECIAL_2_OPCODE_Y1 = 3,
|
||||
SPECIAL_3_OPCODE_Y0 = 4,
|
||||
SPECIAL_3_OPCODE_Y1 = 4,
|
||||
SPECIAL_4_OPCODE_Y0 = 5,
|
||||
SPECIAL_4_OPCODE_Y1 = 5,
|
||||
SPECIAL_5_OPCODE_Y0 = 6,
|
||||
SPECIAL_5_OPCODE_Y1 = 6,
|
||||
SPECIAL_6_OPCODE_Y0 = 7,
|
||||
SPECIAL_7_OPCODE_Y0 = 8,
|
||||
SRAB_SPECIAL_0_OPCODE_X0 = 88,
|
||||
SRAB_SPECIAL_0_OPCODE_X1 = 58,
|
||||
SRAH_SPECIAL_0_OPCODE_X0 = 89,
|
||||
SRAH_SPECIAL_0_OPCODE_X1 = 59,
|
||||
SRAIB_SHUN_0_OPCODE_X0 = 8,
|
||||
SRAIB_SHUN_0_OPCODE_X1 = 8,
|
||||
SRAIH_SHUN_0_OPCODE_X0 = 9,
|
||||
SRAIH_SHUN_0_OPCODE_X1 = 9,
|
||||
SRAI_SHUN_0_OPCODE_X0 = 10,
|
||||
SRAI_SHUN_0_OPCODE_X1 = 10,
|
||||
SRAI_SHUN_0_OPCODE_Y0 = 4,
|
||||
SRAI_SHUN_0_OPCODE_Y1 = 4,
|
||||
SRA_SPECIAL_0_OPCODE_X0 = 90,
|
||||
SRA_SPECIAL_0_OPCODE_X1 = 60,
|
||||
SRA_SPECIAL_3_OPCODE_Y0 = 3,
|
||||
SRA_SPECIAL_3_OPCODE_Y1 = 3,
|
||||
SUBBS_U_SPECIAL_0_OPCODE_X0 = 100,
|
||||
SUBBS_U_SPECIAL_0_OPCODE_X1 = 70,
|
||||
SUBB_SPECIAL_0_OPCODE_X0 = 91,
|
||||
SUBB_SPECIAL_0_OPCODE_X1 = 61,
|
||||
SUBHS_SPECIAL_0_OPCODE_X0 = 101,
|
||||
SUBHS_SPECIAL_0_OPCODE_X1 = 71,
|
||||
SUBH_SPECIAL_0_OPCODE_X0 = 92,
|
||||
SUBH_SPECIAL_0_OPCODE_X1 = 62,
|
||||
SUBS_SPECIAL_0_OPCODE_X0 = 97,
|
||||
SUBS_SPECIAL_0_OPCODE_X1 = 67,
|
||||
SUB_SPECIAL_0_OPCODE_X0 = 93,
|
||||
SUB_SPECIAL_0_OPCODE_X1 = 63,
|
||||
SUB_SPECIAL_0_OPCODE_Y0 = 3,
|
||||
SUB_SPECIAL_0_OPCODE_Y1 = 3,
|
||||
SWADD_IMM_0_OPCODE_X1 = 30,
|
||||
SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18,
|
||||
SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19,
|
||||
SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20,
|
||||
SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21,
|
||||
SW_OPCODE_Y2 = 7,
|
||||
SW_SPECIAL_0_OPCODE_X1 = 64,
|
||||
TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8,
|
||||
TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8,
|
||||
TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9,
|
||||
TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9,
|
||||
TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10,
|
||||
TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10,
|
||||
TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11,
|
||||
TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11,
|
||||
TNS_UN_0_SHUN_0_OPCODE_X1 = 22,
|
||||
UN_0_SHUN_0_OPCODE_X0 = 11,
|
||||
UN_0_SHUN_0_OPCODE_X1 = 11,
|
||||
UN_0_SHUN_0_OPCODE_Y0 = 5,
|
||||
UN_0_SHUN_0_OPCODE_Y1 = 5,
|
||||
WH64_UN_0_SHUN_0_OPCODE_X1 = 23,
|
||||
XORI_IMM_0_OPCODE_X0 = 2,
|
||||
XORI_IMM_0_OPCODE_X1 = 21,
|
||||
XOR_SPECIAL_0_OPCODE_X0 = 94,
|
||||
XOR_SPECIAL_0_OPCODE_X1 = 65,
|
||||
XOR_SPECIAL_2_OPCODE_Y0 = 3,
|
||||
XOR_SPECIAL_2_OPCODE_Y1 = 3
|
||||
};
|
||||
|
||||
#endif /* !_TILE_OPCODE_CONSTANTS_H */
|
|
@ -0,0 +1,480 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* This file is machine-generated; DO NOT EDIT! */
|
||||
|
||||
|
||||
#ifndef _TILE_OPCODE_CONSTANTS_H
|
||||
#define _TILE_OPCODE_CONSTANTS_H
|
||||
enum
|
||||
{
|
||||
ADDBS_U_SPECIAL_0_OPCODE_X0 = 98,
|
||||
ADDBS_U_SPECIAL_0_OPCODE_X1 = 68,
|
||||
ADDB_SPECIAL_0_OPCODE_X0 = 1,
|
||||
ADDB_SPECIAL_0_OPCODE_X1 = 1,
|
||||
ADDHS_SPECIAL_0_OPCODE_X0 = 99,
|
||||
ADDHS_SPECIAL_0_OPCODE_X1 = 69,
|
||||
ADDH_SPECIAL_0_OPCODE_X0 = 2,
|
||||
ADDH_SPECIAL_0_OPCODE_X1 = 2,
|
||||
ADDIB_IMM_0_OPCODE_X0 = 1,
|
||||
ADDIB_IMM_0_OPCODE_X1 = 1,
|
||||
ADDIH_IMM_0_OPCODE_X0 = 2,
|
||||
ADDIH_IMM_0_OPCODE_X1 = 2,
|
||||
ADDI_IMM_0_OPCODE_X0 = 3,
|
||||
ADDI_IMM_0_OPCODE_X1 = 3,
|
||||
ADDI_IMM_1_OPCODE_SN = 1,
|
||||
ADDI_OPCODE_Y0 = 9,
|
||||
ADDI_OPCODE_Y1 = 7,
|
||||
ADDLIS_OPCODE_X0 = 1,
|
||||
ADDLIS_OPCODE_X1 = 2,
|
||||
ADDLI_OPCODE_X0 = 2,
|
||||
ADDLI_OPCODE_X1 = 3,
|
||||
ADDS_SPECIAL_0_OPCODE_X0 = 96,
|
||||
ADDS_SPECIAL_0_OPCODE_X1 = 66,
|
||||
ADD_SPECIAL_0_OPCODE_X0 = 3,
|
||||
ADD_SPECIAL_0_OPCODE_X1 = 3,
|
||||
ADD_SPECIAL_0_OPCODE_Y0 = 0,
|
||||
ADD_SPECIAL_0_OPCODE_Y1 = 0,
|
||||
ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4,
|
||||
ADIFFH_SPECIAL_0_OPCODE_X0 = 5,
|
||||
ANDI_IMM_0_OPCODE_X0 = 1,
|
||||
ANDI_IMM_0_OPCODE_X1 = 4,
|
||||
ANDI_OPCODE_Y0 = 10,
|
||||
ANDI_OPCODE_Y1 = 8,
|
||||
AND_SPECIAL_0_OPCODE_X0 = 6,
|
||||
AND_SPECIAL_0_OPCODE_X1 = 4,
|
||||
AND_SPECIAL_2_OPCODE_Y0 = 0,
|
||||
AND_SPECIAL_2_OPCODE_Y1 = 0,
|
||||
AULI_OPCODE_X0 = 3,
|
||||
AULI_OPCODE_X1 = 4,
|
||||
AVGB_U_SPECIAL_0_OPCODE_X0 = 7,
|
||||
AVGH_SPECIAL_0_OPCODE_X0 = 8,
|
||||
BBNST_BRANCH_OPCODE_X1 = 15,
|
||||
BBNS_BRANCH_OPCODE_X1 = 14,
|
||||
BBNS_OPCODE_SN = 63,
|
||||
BBST_BRANCH_OPCODE_X1 = 13,
|
||||
BBS_BRANCH_OPCODE_X1 = 12,
|
||||
BBS_OPCODE_SN = 62,
|
||||
BGEZT_BRANCH_OPCODE_X1 = 7,
|
||||
BGEZ_BRANCH_OPCODE_X1 = 6,
|
||||
BGEZ_OPCODE_SN = 61,
|
||||
BGZT_BRANCH_OPCODE_X1 = 5,
|
||||
BGZ_BRANCH_OPCODE_X1 = 4,
|
||||
BGZ_OPCODE_SN = 58,
|
||||
BITX_UN_0_SHUN_0_OPCODE_X0 = 1,
|
||||
BITX_UN_0_SHUN_0_OPCODE_Y0 = 1,
|
||||
BLEZT_BRANCH_OPCODE_X1 = 11,
|
||||
BLEZ_BRANCH_OPCODE_X1 = 10,
|
||||
BLEZ_OPCODE_SN = 59,
|
||||
BLZT_BRANCH_OPCODE_X1 = 9,
|
||||
BLZ_BRANCH_OPCODE_X1 = 8,
|
||||
BLZ_OPCODE_SN = 60,
|
||||
BNZT_BRANCH_OPCODE_X1 = 3,
|
||||
BNZ_BRANCH_OPCODE_X1 = 2,
|
||||
BNZ_OPCODE_SN = 57,
|
||||
BPT_NOREG_RR_IMM_0_OPCODE_SN = 1,
|
||||
BRANCH_OPCODE_X1 = 5,
|
||||
BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2,
|
||||
BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2,
|
||||
BZT_BRANCH_OPCODE_X1 = 1,
|
||||
BZ_BRANCH_OPCODE_X1 = 0,
|
||||
BZ_OPCODE_SN = 56,
|
||||
CLZ_UN_0_SHUN_0_OPCODE_X0 = 3,
|
||||
CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3,
|
||||
CRC32_32_SPECIAL_0_OPCODE_X0 = 9,
|
||||
CRC32_8_SPECIAL_0_OPCODE_X0 = 10,
|
||||
CTZ_UN_0_SHUN_0_OPCODE_X0 = 4,
|
||||
CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4,
|
||||
DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1,
|
||||
DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2,
|
||||
DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95,
|
||||
FINV_UN_0_SHUN_0_OPCODE_X1 = 3,
|
||||
FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4,
|
||||
FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_X0 = 5,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_X1 = 5,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5,
|
||||
FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1,
|
||||
HALT_NOREG_RR_IMM_0_OPCODE_SN = 0,
|
||||
ICOH_UN_0_SHUN_0_OPCODE_X1 = 6,
|
||||
ILL_UN_0_SHUN_0_OPCODE_X1 = 7,
|
||||
ILL_UN_0_SHUN_0_OPCODE_Y1 = 2,
|
||||
IMM_0_OPCODE_SN = 0,
|
||||
IMM_0_OPCODE_X0 = 4,
|
||||
IMM_0_OPCODE_X1 = 6,
|
||||
IMM_1_OPCODE_SN = 1,
|
||||
IMM_OPCODE_0_X0 = 5,
|
||||
INTHB_SPECIAL_0_OPCODE_X0 = 11,
|
||||
INTHB_SPECIAL_0_OPCODE_X1 = 5,
|
||||
INTHH_SPECIAL_0_OPCODE_X0 = 12,
|
||||
INTHH_SPECIAL_0_OPCODE_X1 = 6,
|
||||
INTLB_SPECIAL_0_OPCODE_X0 = 13,
|
||||
INTLB_SPECIAL_0_OPCODE_X1 = 7,
|
||||
INTLH_SPECIAL_0_OPCODE_X0 = 14,
|
||||
INTLH_SPECIAL_0_OPCODE_X1 = 8,
|
||||
INV_UN_0_SHUN_0_OPCODE_X1 = 8,
|
||||
IRET_UN_0_SHUN_0_OPCODE_X1 = 9,
|
||||
JALB_OPCODE_X1 = 13,
|
||||
JALF_OPCODE_X1 = 12,
|
||||
JALRP_SPECIAL_0_OPCODE_X1 = 9,
|
||||
JALRR_IMM_1_OPCODE_SN = 3,
|
||||
JALR_RR_IMM_0_OPCODE_SN = 5,
|
||||
JALR_SPECIAL_0_OPCODE_X1 = 10,
|
||||
JB_OPCODE_X1 = 11,
|
||||
JF_OPCODE_X1 = 10,
|
||||
JRP_SPECIAL_0_OPCODE_X1 = 11,
|
||||
JRR_IMM_1_OPCODE_SN = 2,
|
||||
JR_RR_IMM_0_OPCODE_SN = 4,
|
||||
JR_SPECIAL_0_OPCODE_X1 = 12,
|
||||
LBADD_IMM_0_OPCODE_X1 = 22,
|
||||
LBADD_U_IMM_0_OPCODE_X1 = 23,
|
||||
LB_OPCODE_Y2 = 0,
|
||||
LB_UN_0_SHUN_0_OPCODE_X1 = 10,
|
||||
LB_U_OPCODE_Y2 = 1,
|
||||
LB_U_UN_0_SHUN_0_OPCODE_X1 = 11,
|
||||
LHADD_IMM_0_OPCODE_X1 = 24,
|
||||
LHADD_U_IMM_0_OPCODE_X1 = 25,
|
||||
LH_OPCODE_Y2 = 2,
|
||||
LH_UN_0_SHUN_0_OPCODE_X1 = 12,
|
||||
LH_U_OPCODE_Y2 = 3,
|
||||
LH_U_UN_0_SHUN_0_OPCODE_X1 = 13,
|
||||
LNK_SPECIAL_0_OPCODE_X1 = 13,
|
||||
LWADD_IMM_0_OPCODE_X1 = 26,
|
||||
LWADD_NA_IMM_0_OPCODE_X1 = 27,
|
||||
LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24,
|
||||
LW_OPCODE_Y2 = 4,
|
||||
LW_UN_0_SHUN_0_OPCODE_X1 = 14,
|
||||
MAXB_U_SPECIAL_0_OPCODE_X0 = 15,
|
||||
MAXB_U_SPECIAL_0_OPCODE_X1 = 14,
|
||||
MAXH_SPECIAL_0_OPCODE_X0 = 16,
|
||||
MAXH_SPECIAL_0_OPCODE_X1 = 15,
|
||||
MAXIB_U_IMM_0_OPCODE_X0 = 4,
|
||||
MAXIB_U_IMM_0_OPCODE_X1 = 5,
|
||||
MAXIH_IMM_0_OPCODE_X0 = 5,
|
||||
MAXIH_IMM_0_OPCODE_X1 = 6,
|
||||
MFSPR_IMM_0_OPCODE_X1 = 7,
|
||||
MF_UN_0_SHUN_0_OPCODE_X1 = 15,
|
||||
MINB_U_SPECIAL_0_OPCODE_X0 = 17,
|
||||
MINB_U_SPECIAL_0_OPCODE_X1 = 16,
|
||||
MINH_SPECIAL_0_OPCODE_X0 = 18,
|
||||
MINH_SPECIAL_0_OPCODE_X1 = 17,
|
||||
MINIB_U_IMM_0_OPCODE_X0 = 6,
|
||||
MINIB_U_IMM_0_OPCODE_X1 = 8,
|
||||
MINIH_IMM_0_OPCODE_X0 = 7,
|
||||
MINIH_IMM_0_OPCODE_X1 = 9,
|
||||
MM_OPCODE_X0 = 6,
|
||||
MM_OPCODE_X1 = 7,
|
||||
MNZB_SPECIAL_0_OPCODE_X0 = 19,
|
||||
MNZB_SPECIAL_0_OPCODE_X1 = 18,
|
||||
MNZH_SPECIAL_0_OPCODE_X0 = 20,
|
||||
MNZH_SPECIAL_0_OPCODE_X1 = 19,
|
||||
MNZ_SPECIAL_0_OPCODE_X0 = 21,
|
||||
MNZ_SPECIAL_0_OPCODE_X1 = 20,
|
||||
MNZ_SPECIAL_1_OPCODE_Y0 = 0,
|
||||
MNZ_SPECIAL_1_OPCODE_Y1 = 1,
|
||||
MOVEI_IMM_1_OPCODE_SN = 0,
|
||||
MOVE_RR_IMM_0_OPCODE_SN = 8,
|
||||
MTSPR_IMM_0_OPCODE_X1 = 10,
|
||||
MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22,
|
||||
MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0,
|
||||
MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23,
|
||||
MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24,
|
||||
MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1,
|
||||
MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25,
|
||||
MULHH_SS_SPECIAL_0_OPCODE_X0 = 26,
|
||||
MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0,
|
||||
MULHH_SU_SPECIAL_0_OPCODE_X0 = 27,
|
||||
MULHH_UU_SPECIAL_0_OPCODE_X0 = 28,
|
||||
MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1,
|
||||
MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29,
|
||||
MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30,
|
||||
MULHLA_US_SPECIAL_0_OPCODE_X0 = 31,
|
||||
MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32,
|
||||
MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33,
|
||||
MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0,
|
||||
MULHL_SS_SPECIAL_0_OPCODE_X0 = 34,
|
||||
MULHL_SU_SPECIAL_0_OPCODE_X0 = 35,
|
||||
MULHL_US_SPECIAL_0_OPCODE_X0 = 36,
|
||||
MULHL_UU_SPECIAL_0_OPCODE_X0 = 37,
|
||||
MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38,
|
||||
MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2,
|
||||
MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39,
|
||||
MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40,
|
||||
MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3,
|
||||
MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41,
|
||||
MULLL_SS_SPECIAL_0_OPCODE_X0 = 42,
|
||||
MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2,
|
||||
MULLL_SU_SPECIAL_0_OPCODE_X0 = 43,
|
||||
MULLL_UU_SPECIAL_0_OPCODE_X0 = 44,
|
||||
MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3,
|
||||
MVNZ_SPECIAL_0_OPCODE_X0 = 45,
|
||||
MVNZ_SPECIAL_1_OPCODE_Y0 = 1,
|
||||
MVZ_SPECIAL_0_OPCODE_X0 = 46,
|
||||
MVZ_SPECIAL_1_OPCODE_Y0 = 2,
|
||||
MZB_SPECIAL_0_OPCODE_X0 = 47,
|
||||
MZB_SPECIAL_0_OPCODE_X1 = 21,
|
||||
MZH_SPECIAL_0_OPCODE_X0 = 48,
|
||||
MZH_SPECIAL_0_OPCODE_X1 = 22,
|
||||
MZ_SPECIAL_0_OPCODE_X0 = 49,
|
||||
MZ_SPECIAL_0_OPCODE_X1 = 23,
|
||||
MZ_SPECIAL_1_OPCODE_Y0 = 3,
|
||||
MZ_SPECIAL_1_OPCODE_Y1 = 2,
|
||||
NAP_UN_0_SHUN_0_OPCODE_X1 = 16,
|
||||
NOP_NOREG_RR_IMM_0_OPCODE_SN = 2,
|
||||
NOP_UN_0_SHUN_0_OPCODE_X0 = 6,
|
||||
NOP_UN_0_SHUN_0_OPCODE_X1 = 17,
|
||||
NOP_UN_0_SHUN_0_OPCODE_Y0 = 6,
|
||||
NOP_UN_0_SHUN_0_OPCODE_Y1 = 3,
|
||||
NOREG_RR_IMM_0_OPCODE_SN = 0,
|
||||
NOR_SPECIAL_0_OPCODE_X0 = 50,
|
||||
NOR_SPECIAL_0_OPCODE_X1 = 24,
|
||||
NOR_SPECIAL_2_OPCODE_Y0 = 1,
|
||||
NOR_SPECIAL_2_OPCODE_Y1 = 1,
|
||||
ORI_IMM_0_OPCODE_X0 = 8,
|
||||
ORI_IMM_0_OPCODE_X1 = 11,
|
||||
ORI_OPCODE_Y0 = 11,
|
||||
ORI_OPCODE_Y1 = 9,
|
||||
OR_SPECIAL_0_OPCODE_X0 = 51,
|
||||
OR_SPECIAL_0_OPCODE_X1 = 25,
|
||||
OR_SPECIAL_2_OPCODE_Y0 = 2,
|
||||
OR_SPECIAL_2_OPCODE_Y1 = 2,
|
||||
PACKBS_U_SPECIAL_0_OPCODE_X0 = 103,
|
||||
PACKBS_U_SPECIAL_0_OPCODE_X1 = 73,
|
||||
PACKHB_SPECIAL_0_OPCODE_X0 = 52,
|
||||
PACKHB_SPECIAL_0_OPCODE_X1 = 26,
|
||||
PACKHS_SPECIAL_0_OPCODE_X0 = 102,
|
||||
PACKHS_SPECIAL_0_OPCODE_X1 = 72,
|
||||
PACKLB_SPECIAL_0_OPCODE_X0 = 53,
|
||||
PACKLB_SPECIAL_0_OPCODE_X1 = 27,
|
||||
PCNT_UN_0_SHUN_0_OPCODE_X0 = 7,
|
||||
PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7,
|
||||
RLI_SHUN_0_OPCODE_X0 = 1,
|
||||
RLI_SHUN_0_OPCODE_X1 = 1,
|
||||
RLI_SHUN_0_OPCODE_Y0 = 1,
|
||||
RLI_SHUN_0_OPCODE_Y1 = 1,
|
||||
RL_SPECIAL_0_OPCODE_X0 = 54,
|
||||
RL_SPECIAL_0_OPCODE_X1 = 28,
|
||||
RL_SPECIAL_3_OPCODE_Y0 = 0,
|
||||
RL_SPECIAL_3_OPCODE_Y1 = 0,
|
||||
RR_IMM_0_OPCODE_SN = 0,
|
||||
S1A_SPECIAL_0_OPCODE_X0 = 55,
|
||||
S1A_SPECIAL_0_OPCODE_X1 = 29,
|
||||
S1A_SPECIAL_0_OPCODE_Y0 = 1,
|
||||
S1A_SPECIAL_0_OPCODE_Y1 = 1,
|
||||
S2A_SPECIAL_0_OPCODE_X0 = 56,
|
||||
S2A_SPECIAL_0_OPCODE_X1 = 30,
|
||||
S2A_SPECIAL_0_OPCODE_Y0 = 2,
|
||||
S2A_SPECIAL_0_OPCODE_Y1 = 2,
|
||||
S3A_SPECIAL_0_OPCODE_X0 = 57,
|
||||
S3A_SPECIAL_0_OPCODE_X1 = 31,
|
||||
S3A_SPECIAL_5_OPCODE_Y0 = 1,
|
||||
S3A_SPECIAL_5_OPCODE_Y1 = 1,
|
||||
SADAB_U_SPECIAL_0_OPCODE_X0 = 58,
|
||||
SADAH_SPECIAL_0_OPCODE_X0 = 59,
|
||||
SADAH_U_SPECIAL_0_OPCODE_X0 = 60,
|
||||
SADB_U_SPECIAL_0_OPCODE_X0 = 61,
|
||||
SADH_SPECIAL_0_OPCODE_X0 = 62,
|
||||
SADH_U_SPECIAL_0_OPCODE_X0 = 63,
|
||||
SBADD_IMM_0_OPCODE_X1 = 28,
|
||||
SB_OPCODE_Y2 = 5,
|
||||
SB_SPECIAL_0_OPCODE_X1 = 32,
|
||||
SEQB_SPECIAL_0_OPCODE_X0 = 64,
|
||||
SEQB_SPECIAL_0_OPCODE_X1 = 33,
|
||||
SEQH_SPECIAL_0_OPCODE_X0 = 65,
|
||||
SEQH_SPECIAL_0_OPCODE_X1 = 34,
|
||||
SEQIB_IMM_0_OPCODE_X0 = 9,
|
||||
SEQIB_IMM_0_OPCODE_X1 = 12,
|
||||
SEQIH_IMM_0_OPCODE_X0 = 10,
|
||||
SEQIH_IMM_0_OPCODE_X1 = 13,
|
||||
SEQI_IMM_0_OPCODE_X0 = 11,
|
||||
SEQI_IMM_0_OPCODE_X1 = 14,
|
||||
SEQI_OPCODE_Y0 = 12,
|
||||
SEQI_OPCODE_Y1 = 10,
|
||||
SEQ_SPECIAL_0_OPCODE_X0 = 66,
|
||||
SEQ_SPECIAL_0_OPCODE_X1 = 35,
|
||||
SEQ_SPECIAL_5_OPCODE_Y0 = 2,
|
||||
SEQ_SPECIAL_5_OPCODE_Y1 = 2,
|
||||
SHADD_IMM_0_OPCODE_X1 = 29,
|
||||
SHL8II_IMM_0_OPCODE_SN = 3,
|
||||
SHLB_SPECIAL_0_OPCODE_X0 = 67,
|
||||
SHLB_SPECIAL_0_OPCODE_X1 = 36,
|
||||
SHLH_SPECIAL_0_OPCODE_X0 = 68,
|
||||
SHLH_SPECIAL_0_OPCODE_X1 = 37,
|
||||
SHLIB_SHUN_0_OPCODE_X0 = 2,
|
||||
SHLIB_SHUN_0_OPCODE_X1 = 2,
|
||||
SHLIH_SHUN_0_OPCODE_X0 = 3,
|
||||
SHLIH_SHUN_0_OPCODE_X1 = 3,
|
||||
SHLI_SHUN_0_OPCODE_X0 = 4,
|
||||
SHLI_SHUN_0_OPCODE_X1 = 4,
|
||||
SHLI_SHUN_0_OPCODE_Y0 = 2,
|
||||
SHLI_SHUN_0_OPCODE_Y1 = 2,
|
||||
SHL_SPECIAL_0_OPCODE_X0 = 69,
|
||||
SHL_SPECIAL_0_OPCODE_X1 = 38,
|
||||
SHL_SPECIAL_3_OPCODE_Y0 = 1,
|
||||
SHL_SPECIAL_3_OPCODE_Y1 = 1,
|
||||
SHR1_RR_IMM_0_OPCODE_SN = 9,
|
||||
SHRB_SPECIAL_0_OPCODE_X0 = 70,
|
||||
SHRB_SPECIAL_0_OPCODE_X1 = 39,
|
||||
SHRH_SPECIAL_0_OPCODE_X0 = 71,
|
||||
SHRH_SPECIAL_0_OPCODE_X1 = 40,
|
||||
SHRIB_SHUN_0_OPCODE_X0 = 5,
|
||||
SHRIB_SHUN_0_OPCODE_X1 = 5,
|
||||
SHRIH_SHUN_0_OPCODE_X0 = 6,
|
||||
SHRIH_SHUN_0_OPCODE_X1 = 6,
|
||||
SHRI_SHUN_0_OPCODE_X0 = 7,
|
||||
SHRI_SHUN_0_OPCODE_X1 = 7,
|
||||
SHRI_SHUN_0_OPCODE_Y0 = 3,
|
||||
SHRI_SHUN_0_OPCODE_Y1 = 3,
|
||||
SHR_SPECIAL_0_OPCODE_X0 = 72,
|
||||
SHR_SPECIAL_0_OPCODE_X1 = 41,
|
||||
SHR_SPECIAL_3_OPCODE_Y0 = 2,
|
||||
SHR_SPECIAL_3_OPCODE_Y1 = 2,
|
||||
SHUN_0_OPCODE_X0 = 7,
|
||||
SHUN_0_OPCODE_X1 = 8,
|
||||
SHUN_0_OPCODE_Y0 = 13,
|
||||
SHUN_0_OPCODE_Y1 = 11,
|
||||
SH_OPCODE_Y2 = 6,
|
||||
SH_SPECIAL_0_OPCODE_X1 = 42,
|
||||
SLTB_SPECIAL_0_OPCODE_X0 = 73,
|
||||
SLTB_SPECIAL_0_OPCODE_X1 = 43,
|
||||
SLTB_U_SPECIAL_0_OPCODE_X0 = 74,
|
||||
SLTB_U_SPECIAL_0_OPCODE_X1 = 44,
|
||||
SLTEB_SPECIAL_0_OPCODE_X0 = 75,
|
||||
SLTEB_SPECIAL_0_OPCODE_X1 = 45,
|
||||
SLTEB_U_SPECIAL_0_OPCODE_X0 = 76,
|
||||
SLTEB_U_SPECIAL_0_OPCODE_X1 = 46,
|
||||
SLTEH_SPECIAL_0_OPCODE_X0 = 77,
|
||||
SLTEH_SPECIAL_0_OPCODE_X1 = 47,
|
||||
SLTEH_U_SPECIAL_0_OPCODE_X0 = 78,
|
||||
SLTEH_U_SPECIAL_0_OPCODE_X1 = 48,
|
||||
SLTE_SPECIAL_0_OPCODE_X0 = 79,
|
||||
SLTE_SPECIAL_0_OPCODE_X1 = 49,
|
||||
SLTE_SPECIAL_4_OPCODE_Y0 = 0,
|
||||
SLTE_SPECIAL_4_OPCODE_Y1 = 0,
|
||||
SLTE_U_SPECIAL_0_OPCODE_X0 = 80,
|
||||
SLTE_U_SPECIAL_0_OPCODE_X1 = 50,
|
||||
SLTE_U_SPECIAL_4_OPCODE_Y0 = 1,
|
||||
SLTE_U_SPECIAL_4_OPCODE_Y1 = 1,
|
||||
SLTH_SPECIAL_0_OPCODE_X0 = 81,
|
||||
SLTH_SPECIAL_0_OPCODE_X1 = 51,
|
||||
SLTH_U_SPECIAL_0_OPCODE_X0 = 82,
|
||||
SLTH_U_SPECIAL_0_OPCODE_X1 = 52,
|
||||
SLTIB_IMM_0_OPCODE_X0 = 12,
|
||||
SLTIB_IMM_0_OPCODE_X1 = 15,
|
||||
SLTIB_U_IMM_0_OPCODE_X0 = 13,
|
||||
SLTIB_U_IMM_0_OPCODE_X1 = 16,
|
||||
SLTIH_IMM_0_OPCODE_X0 = 14,
|
||||
SLTIH_IMM_0_OPCODE_X1 = 17,
|
||||
SLTIH_U_IMM_0_OPCODE_X0 = 15,
|
||||
SLTIH_U_IMM_0_OPCODE_X1 = 18,
|
||||
SLTI_IMM_0_OPCODE_X0 = 16,
|
||||
SLTI_IMM_0_OPCODE_X1 = 19,
|
||||
SLTI_OPCODE_Y0 = 14,
|
||||
SLTI_OPCODE_Y1 = 12,
|
||||
SLTI_U_IMM_0_OPCODE_X0 = 17,
|
||||
SLTI_U_IMM_0_OPCODE_X1 = 20,
|
||||
SLTI_U_OPCODE_Y0 = 15,
|
||||
SLTI_U_OPCODE_Y1 = 13,
|
||||
SLT_SPECIAL_0_OPCODE_X0 = 83,
|
||||
SLT_SPECIAL_0_OPCODE_X1 = 53,
|
||||
SLT_SPECIAL_4_OPCODE_Y0 = 2,
|
||||
SLT_SPECIAL_4_OPCODE_Y1 = 2,
|
||||
SLT_U_SPECIAL_0_OPCODE_X0 = 84,
|
||||
SLT_U_SPECIAL_0_OPCODE_X1 = 54,
|
||||
SLT_U_SPECIAL_4_OPCODE_Y0 = 3,
|
||||
SLT_U_SPECIAL_4_OPCODE_Y1 = 3,
|
||||
SNEB_SPECIAL_0_OPCODE_X0 = 85,
|
||||
SNEB_SPECIAL_0_OPCODE_X1 = 55,
|
||||
SNEH_SPECIAL_0_OPCODE_X0 = 86,
|
||||
SNEH_SPECIAL_0_OPCODE_X1 = 56,
|
||||
SNE_SPECIAL_0_OPCODE_X0 = 87,
|
||||
SNE_SPECIAL_0_OPCODE_X1 = 57,
|
||||
SNE_SPECIAL_5_OPCODE_Y0 = 3,
|
||||
SNE_SPECIAL_5_OPCODE_Y1 = 3,
|
||||
SPECIAL_0_OPCODE_X0 = 0,
|
||||
SPECIAL_0_OPCODE_X1 = 1,
|
||||
SPECIAL_0_OPCODE_Y0 = 1,
|
||||
SPECIAL_0_OPCODE_Y1 = 1,
|
||||
SPECIAL_1_OPCODE_Y0 = 2,
|
||||
SPECIAL_1_OPCODE_Y1 = 2,
|
||||
SPECIAL_2_OPCODE_Y0 = 3,
|
||||
SPECIAL_2_OPCODE_Y1 = 3,
|
||||
SPECIAL_3_OPCODE_Y0 = 4,
|
||||
SPECIAL_3_OPCODE_Y1 = 4,
|
||||
SPECIAL_4_OPCODE_Y0 = 5,
|
||||
SPECIAL_4_OPCODE_Y1 = 5,
|
||||
SPECIAL_5_OPCODE_Y0 = 6,
|
||||
SPECIAL_5_OPCODE_Y1 = 6,
|
||||
SPECIAL_6_OPCODE_Y0 = 7,
|
||||
SPECIAL_7_OPCODE_Y0 = 8,
|
||||
SRAB_SPECIAL_0_OPCODE_X0 = 88,
|
||||
SRAB_SPECIAL_0_OPCODE_X1 = 58,
|
||||
SRAH_SPECIAL_0_OPCODE_X0 = 89,
|
||||
SRAH_SPECIAL_0_OPCODE_X1 = 59,
|
||||
SRAIB_SHUN_0_OPCODE_X0 = 8,
|
||||
SRAIB_SHUN_0_OPCODE_X1 = 8,
|
||||
SRAIH_SHUN_0_OPCODE_X0 = 9,
|
||||
SRAIH_SHUN_0_OPCODE_X1 = 9,
|
||||
SRAI_SHUN_0_OPCODE_X0 = 10,
|
||||
SRAI_SHUN_0_OPCODE_X1 = 10,
|
||||
SRAI_SHUN_0_OPCODE_Y0 = 4,
|
||||
SRAI_SHUN_0_OPCODE_Y1 = 4,
|
||||
SRA_SPECIAL_0_OPCODE_X0 = 90,
|
||||
SRA_SPECIAL_0_OPCODE_X1 = 60,
|
||||
SRA_SPECIAL_3_OPCODE_Y0 = 3,
|
||||
SRA_SPECIAL_3_OPCODE_Y1 = 3,
|
||||
SUBBS_U_SPECIAL_0_OPCODE_X0 = 100,
|
||||
SUBBS_U_SPECIAL_0_OPCODE_X1 = 70,
|
||||
SUBB_SPECIAL_0_OPCODE_X0 = 91,
|
||||
SUBB_SPECIAL_0_OPCODE_X1 = 61,
|
||||
SUBHS_SPECIAL_0_OPCODE_X0 = 101,
|
||||
SUBHS_SPECIAL_0_OPCODE_X1 = 71,
|
||||
SUBH_SPECIAL_0_OPCODE_X0 = 92,
|
||||
SUBH_SPECIAL_0_OPCODE_X1 = 62,
|
||||
SUBS_SPECIAL_0_OPCODE_X0 = 97,
|
||||
SUBS_SPECIAL_0_OPCODE_X1 = 67,
|
||||
SUB_SPECIAL_0_OPCODE_X0 = 93,
|
||||
SUB_SPECIAL_0_OPCODE_X1 = 63,
|
||||
SUB_SPECIAL_0_OPCODE_Y0 = 3,
|
||||
SUB_SPECIAL_0_OPCODE_Y1 = 3,
|
||||
SWADD_IMM_0_OPCODE_X1 = 30,
|
||||
SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18,
|
||||
SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19,
|
||||
SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20,
|
||||
SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21,
|
||||
SW_OPCODE_Y2 = 7,
|
||||
SW_SPECIAL_0_OPCODE_X1 = 64,
|
||||
TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8,
|
||||
TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8,
|
||||
TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9,
|
||||
TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9,
|
||||
TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10,
|
||||
TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10,
|
||||
TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11,
|
||||
TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11,
|
||||
TNS_UN_0_SHUN_0_OPCODE_X1 = 22,
|
||||
UN_0_SHUN_0_OPCODE_X0 = 11,
|
||||
UN_0_SHUN_0_OPCODE_X1 = 11,
|
||||
UN_0_SHUN_0_OPCODE_Y0 = 5,
|
||||
UN_0_SHUN_0_OPCODE_Y1 = 5,
|
||||
WH64_UN_0_SHUN_0_OPCODE_X1 = 23,
|
||||
XORI_IMM_0_OPCODE_X0 = 2,
|
||||
XORI_IMM_0_OPCODE_X1 = 21,
|
||||
XOR_SPECIAL_0_OPCODE_X0 = 94,
|
||||
XOR_SPECIAL_0_OPCODE_X1 = 65,
|
||||
XOR_SPECIAL_2_OPCODE_Y0 = 3,
|
||||
XOR_SPECIAL_2_OPCODE_Y1 = 3
|
||||
};
|
||||
|
||||
#endif /* !_TILE_OPCODE_CONSTANTS_H */
|
|
@ -0,0 +1,339 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PAGE_H
|
||||
#define _ASM_TILE_PAGE_H
|
||||
|
||||
#include <linux/const.h>
|
||||
|
||||
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
|
||||
#define PAGE_SHIFT 16
|
||||
#define HPAGE_SHIFT 24
|
||||
|
||||
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
||||
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
||||
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/*
|
||||
* The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx
|
||||
* definitions in <hv/hypervisor.h>. We validate this at build time
|
||||
* here, and again at runtime during early boot. We provide a
|
||||
* separate definition since userspace doesn't have <hv/hypervisor.h>.
|
||||
*
|
||||
* Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since
|
||||
* they are the same on i386 but not TILE.
|
||||
*/
|
||||
#if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT
|
||||
# error Small page size mismatch in Linux
|
||||
#endif
|
||||
#if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT
|
||||
# error Huge page size mismatch in Linux
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
struct page;
|
||||
|
||||
static inline void clear_page(void *page)
|
||||
{
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void copy_page(void *to, void *from)
|
||||
{
|
||||
memcpy(to, from, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void clear_user_page(void *page, unsigned long vaddr,
|
||||
struct page *pg)
|
||||
{
|
||||
clear_page(page);
|
||||
}
|
||||
|
||||
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
struct page *topage)
|
||||
{
|
||||
copy_page(to, from);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypervisor page tables are made of the same basic structure.
|
||||
*/
|
||||
|
||||
typedef __u64 pteval_t;
|
||||
typedef __u64 pmdval_t;
|
||||
typedef __u64 pudval_t;
|
||||
typedef __u64 pgdval_t;
|
||||
typedef __u64 pgprotval_t;
|
||||
|
||||
typedef HV_PTE pte_t;
|
||||
typedef HV_PTE pgd_t;
|
||||
typedef HV_PTE pgprot_t;
|
||||
|
||||
/*
|
||||
* User L2 page tables are managed as one L2 page table per page,
|
||||
* because we use the page allocator for them. This keeps the allocation
|
||||
* simple and makes it potentially useful to implement HIGHPTE at some point.
|
||||
* However, it's also inefficient, since L2 page tables are much smaller
|
||||
* than pages (currently 2KB vs 64KB). So we should revisit this.
|
||||
*/
|
||||
typedef struct page *pgtable_t;
|
||||
|
||||
/* Must be a macro since it is used to create constants. */
|
||||
#define __pgprot(val) hv_pte(val)
|
||||
|
||||
static inline u64 pgprot_val(pgprot_t pgprot)
|
||||
{
|
||||
return hv_pte_val(pgprot);
|
||||
}
|
||||
|
||||
static inline u64 pte_val(pte_t pte)
|
||||
{
|
||||
return hv_pte_val(pte);
|
||||
}
|
||||
|
||||
static inline u64 pgd_val(pgd_t pgd)
|
||||
{
|
||||
return hv_pte_val(pgd);
|
||||
}
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
typedef HV_PTE pmd_t;
|
||||
|
||||
static inline u64 pmd_val(pmd_t pmd)
|
||||
{
|
||||
return hv_pte_val(pmd);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
#define HUGE_MAX_HSTATE 2
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
#endif
|
||||
|
||||
/* Each memory controller has PAs distinct in their high bits. */
|
||||
#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
|
||||
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
|
||||
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
|
||||
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
|
||||
|
||||
#ifdef __tilegx__
|
||||
|
||||
/*
|
||||
* We reserve the lower half of memory for user-space programs, and the
|
||||
* upper half for system code. We re-map all of physical memory in the
|
||||
* upper half, which takes a quarter of our VA space. Then we have
|
||||
* the vmalloc regions. The supervisor code lives at 0xfffffff700000000,
|
||||
* with the hypervisor above that.
|
||||
*
|
||||
* Loadable kernel modules are placed immediately after the static
|
||||
* supervisor code, with each being allocated a 256MB region of
|
||||
* address space, so we don't have to worry about the range of "jal"
|
||||
* and other branch instructions.
|
||||
*
|
||||
* For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
|
||||
* Similarly, for now we don't play any struct page mapping games.
|
||||
*/
|
||||
|
||||
#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
|
||||
# error Too much PA to map with the VA available!
|
||||
#endif
|
||||
#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
|
||||
|
||||
#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */
|
||||
#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */
|
||||
#define PAGE_OFFSET MEM_HIGH_START
|
||||
#define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */
|
||||
#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */
|
||||
#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */
|
||||
#define MEM_SV_INTRPT MEM_SV_START
|
||||
#define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */
|
||||
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
|
||||
#define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */
|
||||
|
||||
/* Highest DTLB address we will use */
|
||||
#define KERNEL_HIGH_VADDR MEM_SV_START
|
||||
|
||||
/* Since we don't currently provide any fixmaps, we use an impossible VA. */
|
||||
#define FIXADDR_TOP MEM_HV_START
|
||||
|
||||
#else /* !__tilegx__ */
|
||||
|
||||
/*
|
||||
* A PAGE_OFFSET of 0xC0000000 means that the kernel has
|
||||
* a virtual address space of one gigabyte, which limits the
|
||||
* amount of physical memory you can use to about 768MB.
|
||||
* If you want more physical memory than this then see the CONFIG_HIGHMEM
|
||||
* option in the kernel configuration.
|
||||
*
|
||||
* The top two 16MB chunks in the table below (VIRT and HV) are
|
||||
* unavailable to Linux. Since the kernel interrupt vectors must live
|
||||
* at 0xfd000000, we map all of the bottom of RAM at this address with
|
||||
* a huge page table entry to minimize its ITLB footprint (as well as
|
||||
* at PAGE_OFFSET). The last architected requirement is that user
|
||||
* interrupt vectors live at 0xfc000000, so we make that range of
|
||||
* memory available to user processes. The remaining regions are sized
|
||||
* as shown; after the first four addresses, we show "typical" values,
|
||||
* since the actual addresses depend on kernel #defines.
|
||||
*
|
||||
* MEM_VIRT_INTRPT 0xff000000
|
||||
* MEM_HV_INTRPT 0xfe000000
|
||||
* MEM_SV_INTRPT (kernel code) 0xfd000000
|
||||
* MEM_USER_INTRPT (user vector) 0xfc000000
|
||||
* FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR)
|
||||
* PKMAP_BASE 0xf7000000 (via LAST_PKMAP)
|
||||
* HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
|
||||
* VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
|
||||
* mapped LOWMEM 0xc0000000
|
||||
*/
|
||||
|
||||
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
|
||||
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
|
||||
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
|
||||
#define MEM_VIRT_INTRPT _AC(0xff000000, UL)
|
||||
|
||||
#define INTRPT_SIZE 0x4000
|
||||
|
||||
/* Tolerate page size larger than the architecture interrupt region size. */
|
||||
#if PAGE_SIZE > INTRPT_SIZE
|
||||
#undef INTRPT_SIZE
|
||||
#define INTRPT_SIZE PAGE_SIZE
|
||||
#endif
|
||||
|
||||
#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
|
||||
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
|
||||
|
||||
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
|
||||
/* On 32-bit architectures we mix kernel modules in with other vmaps. */
|
||||
#define MEM_MODULE_START VMALLOC_START
|
||||
#define MEM_MODULE_END VMALLOC_END
|
||||
|
||||
#endif /* __tilegx__ */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
||||
/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
|
||||
extern unsigned long pbase_map[];
|
||||
extern void *vbase_map[];
|
||||
|
||||
static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr)
|
||||
{
|
||||
unsigned long kaddr = (unsigned long)_kaddr;
|
||||
return pbase_map[kaddr >> HPAGE_SHIFT] +
|
||||
((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void *pfn_to_kaddr(unsigned long pfn)
|
||||
{
|
||||
return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
|
||||
{
|
||||
unsigned long pfn = kaddr_to_pfn(kaddr);
|
||||
return ((phys_addr_t)pfn << PAGE_SHIFT) +
|
||||
((unsigned long)kaddr & (PAGE_SIZE-1));
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t paddr)
|
||||
{
|
||||
return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1));
|
||||
}
|
||||
|
||||
/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
|
||||
static inline int virt_addr_valid(const volatile void *kaddr)
|
||||
{
|
||||
extern void *high_memory; /* copied from <linux/mm.h> */
|
||||
return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HIGHMEM */
|
||||
|
||||
static inline unsigned long kaddr_to_pfn(const volatile void *kaddr)
|
||||
{
|
||||
return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void *pfn_to_kaddr(unsigned long pfn)
|
||||
{
|
||||
return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET);
|
||||
}
|
||||
|
||||
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
|
||||
{
|
||||
return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET);
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t paddr)
|
||||
{
|
||||
return (void *)((unsigned long)paddr + PAGE_OFFSET);
|
||||
}
|
||||
|
||||
/* Check that the given address is within some mapped range of PAs. */
|
||||
#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
|
||||
|
||||
#endif /* !CONFIG_HIGHMEM */
|
||||
|
||||
/* All callers are not consistent in how they call these functions. */
|
||||
#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
|
||||
#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
|
||||
|
||||
extern int devmem_is_allowed(unsigned long pagenr);
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
return pfn < max_mapnr;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Provide as macros since these require some other headers included. */
|
||||
#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
|
||||
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr))
|
||||
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
|
||||
|
||||
struct mm_struct;
|
||||
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS \
|
||||
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
#include <asm-generic/getorder.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_TILE_PAGE_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/param.h>
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PCI_BRIDGE_H
|
||||
#define _ASM_TILE_PCI_BRIDGE_H
|
||||
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
struct device_node;
|
||||
struct pci_controller;
|
||||
|
||||
/*
|
||||
* pci_io_base returns the memory address at which you can access
|
||||
* the I/O space for PCI bus number `bus' (or NULL on error).
|
||||
*/
|
||||
extern void __iomem *pci_bus_io_base(unsigned int bus);
|
||||
extern unsigned long pci_bus_io_base_phys(unsigned int bus);
|
||||
extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
|
||||
|
||||
/* Allocate a new PCI host bridge structure */
|
||||
extern struct pci_controller *pcibios_alloc_controller(void);
|
||||
|
||||
/* Helper function for setting up resources */
|
||||
extern void pci_init_resource(struct resource *res, unsigned long start,
|
||||
unsigned long end, int flags, char *name);
|
||||
|
||||
/* Get the PCI host controller for a bus */
|
||||
extern struct pci_controller *pci_bus_to_hose(int bus);
|
||||
|
||||
/*
|
||||
* Structure of a PCI controller (host bridge)
|
||||
*/
|
||||
struct pci_controller {
|
||||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
int first_busno;
|
||||
int last_busno;
|
||||
|
||||
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
|
||||
int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
|
||||
|
||||
struct pci_ops *ops;
|
||||
|
||||
int irq_base; /* Base IRQ from the Hypervisor */
|
||||
int plx_gen1; /* flag for PLX Gen 1 configuration */
|
||||
|
||||
/* Address ranges that are routed to this controller/bridge. */
|
||||
struct resource mem_resources[3];
|
||||
};
|
||||
|
||||
static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
|
||||
{
|
||||
return bus->sysdata;
|
||||
}
|
||||
|
||||
extern void setup_indirect_pci_nomap(struct pci_controller *hose,
|
||||
void __iomem *cfg_addr, void __iomem *cfg_data);
|
||||
extern void setup_indirect_pci(struct pci_controller *hose,
|
||||
u32 cfg_addr, u32 cfg_data);
|
||||
extern void setup_grackle(struct pci_controller *hose);
|
||||
|
||||
extern unsigned char common_swizzle(struct pci_dev *, unsigned char *);
|
||||
|
||||
/*
|
||||
* The following code swizzles for exactly one bridge. The routine
|
||||
* common_swizzle below handles multiple bridges. But there are a
|
||||
* some boards that don't follow the PCI spec's suggestion so we
|
||||
* break this piece out separately.
|
||||
*/
|
||||
static inline unsigned char bridge_swizzle(unsigned char pin,
|
||||
unsigned char idsel)
|
||||
{
|
||||
return (((pin-1) + idsel) % 4) + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following macro is used to lookup irqs in a standard table
|
||||
* format for those PPC systems that do not already have PCI
|
||||
* interrupts properly routed.
|
||||
*/
|
||||
/* FIXME - double check this */
|
||||
#define PCI_IRQ_TABLE_LOOKUP ({ \
|
||||
long _ctl_ = -1; \
|
||||
if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \
|
||||
_ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \
|
||||
_ctl_; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Scan the buses below a given PCI host bridge and assign suitable
|
||||
* resources to all devices found.
|
||||
*/
|
||||
extern int pciauto_bus_scan(struct pci_controller *, int);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern unsigned long pci_address_to_pio(phys_addr_t address);
|
||||
#else
|
||||
static inline unsigned long pci_address_to_pio(phys_addr_t address)
|
||||
{
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_PCI_BRIDGE_H */
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PCI_H
|
||||
#define _ASM_TILE_PCI_H
|
||||
|
||||
#include <asm/pci-bridge.h>
|
||||
|
||||
/*
|
||||
* The hypervisor maps the entirety of CPA-space as bus addresses, so
|
||||
* bus addresses are physical addresses. The networking and block
|
||||
* device layers use this boolean for bounce buffer decisions.
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS 1
|
||||
|
||||
struct pci_controller *pci_bus_to_hose(int bus);
|
||||
unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp);
|
||||
int __init tile_pci_init(void);
|
||||
void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
|
||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
|
||||
void __devinit pcibios_fixup_bus(struct pci_bus *bus);
|
||||
|
||||
int __devinit _tile_cfg_read(struct pci_controller *hose,
|
||||
int bus,
|
||||
int slot,
|
||||
int function,
|
||||
int offset,
|
||||
int size,
|
||||
u32 *val);
|
||||
int __devinit _tile_cfg_write(struct pci_controller *hose,
|
||||
int bus,
|
||||
int slot,
|
||||
int function,
|
||||
int offset,
|
||||
int size,
|
||||
u32 val);
|
||||
|
||||
/*
|
||||
* These are used to to config reads and writes in the early stages of
|
||||
* setup before the driver infrastructure has been set up enough to be
|
||||
* able to do config reads and writes.
|
||||
*/
|
||||
#define early_cfg_read(where, size, value) \
|
||||
_tile_cfg_read(controller, \
|
||||
current_bus, \
|
||||
pci_slot, \
|
||||
pci_fn, \
|
||||
where, \
|
||||
size, \
|
||||
value)
|
||||
|
||||
#define early_cfg_write(where, size, value) \
|
||||
_tile_cfg_write(controller, \
|
||||
current_bus, \
|
||||
pci_slot, \
|
||||
pci_fn, \
|
||||
where, \
|
||||
size, \
|
||||
value)
|
||||
|
||||
|
||||
|
||||
#define PCICFG_BYTE 1
|
||||
#define PCICFG_WORD 2
|
||||
#define PCICFG_DWORD 4
|
||||
|
||||
#define TILE_NUM_PCIE 2
|
||||
|
||||
#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
|
||||
|
||||
/*
|
||||
* This decides whether to display the domain number in /proc.
|
||||
*/
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* I/O space is currently not supported.
|
||||
*/
|
||||
|
||||
#define TILE_PCIE_LOWER_IO 0x0
|
||||
#define TILE_PCIE_UPPER_IO 0x10000
|
||||
#define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF
|
||||
|
||||
#define _PAGE_NO_CACHE 0
|
||||
#define _PAGE_GUARDED 0
|
||||
|
||||
|
||||
#define pcibios_assign_all_busses() pci_assign_all_buses
|
||||
extern int pci_assign_all_buses;
|
||||
|
||||
static inline void pcibios_set_master(struct pci_dev *dev)
|
||||
{
|
||||
/* No special bus mastering setup handling */
|
||||
}
|
||||
|
||||
#define PCIBIOS_MIN_MEM 0
|
||||
#define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO
|
||||
|
||||
/*
|
||||
* This flag tells if the platform is TILEmpower that needs
|
||||
* special configuration for the PLX switch chip.
|
||||
*/
|
||||
extern int blade_pci;
|
||||
|
||||
/* implement the pci_ DMA API in terms of the generic device dma_ one */
|
||||
#include <asm-generic/pci-dma-compat.h>
|
||||
|
||||
/* generic pci stuff */
|
||||
#include <asm-generic/pci.h>
|
||||
|
||||
/* Use any cpu for PCI. */
|
||||
#define cpumask_of_pcibus(bus) cpu_online_mask
|
||||
|
||||
#endif /* _ASM_TILE_PCI_H */
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PERCPU_H
|
||||
#define _ASM_TILE_PERCPU_H
|
||||
|
||||
register unsigned long __my_cpu_offset __asm__("tp");
|
||||
#define __my_cpu_offset __my_cpu_offset
|
||||
#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* _ASM_TILE_PERCPU_H */
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGALLOC_H
|
||||
#define _ASM_TILE_PGALLOC_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Bits for the size of the second-level page table. */
|
||||
#define L2_KERNEL_PGTABLE_SHIFT \
|
||||
(HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
|
||||
|
||||
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
|
||||
#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
|
||||
#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
|
||||
#else
|
||||
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
|
||||
#endif
|
||||
|
||||
/* How many pages do we need, as an "order", for a user L2 page table? */
|
||||
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
|
||||
|
||||
/* How big is a kernel L2 page table? */
|
||||
#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
|
||||
|
||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER);
|
||||
#else
|
||||
set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
||||
pmd_t *pmd, pte_t *ptep)
|
||||
{
|
||||
set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
|
||||
__pgprot(_PAGE_PRESENT)));
|
||||
}
|
||||
|
||||
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
||||
pgtable_t page)
|
||||
{
|
||||
set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
|
||||
__pgprot(_PAGE_PRESENT)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and free page tables.
|
||||
*/
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
|
||||
extern void pte_free(struct mm_struct *mm, struct page *pte);
|
||||
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline pte_t *
|
||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
|
||||
}
|
||||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
|
||||
pte_free(mm, virt_to_page(pte));
|
||||
}
|
||||
|
||||
extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
|
||||
unsigned long address);
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
/*
|
||||
* Get the small-page pte_t lowmem entry for a given pfn.
|
||||
* This may or may not be in use, depending on whether the initial
|
||||
* huge-page entry for the page has already been shattered.
|
||||
*/
|
||||
pte_t *get_prealloc_pte(unsigned long pfn);
|
||||
|
||||
/* During init, we can shatter kernel huge pages if needed. */
|
||||
void shatter_pmd(pmd_t *pmd);
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* We share a single page allocator for both L1 and L2 page tables. */
|
||||
#if HV_L1_SIZE != HV_L2_SIZE
|
||||
# error Rework assumption that L1 and L2 page tables are same size.
|
||||
#endif
|
||||
#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
|
||||
#define pud_populate(mm, pud, pmd) \
|
||||
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
|
||||
#define pmd_alloc_one(mm, addr) \
|
||||
((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
|
||||
#define pmd_free(mm, pmdp) \
|
||||
pte_free((mm), virt_to_page(pmdp))
|
||||
#define __pmd_free_tlb(tlb, pmdp, address) \
|
||||
__pte_free_tlb((tlb), virt_to_page(pmdp), (address))
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_PGALLOC_H */
|
|
@ -0,0 +1,480 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* This file contains the functions and defines necessary to modify and use
|
||||
* the TILE page table tree.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGTABLE_H
|
||||
#define _ASM_TILE_PGTABLE_H
|
||||
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
extern pgd_t swapper_pg_dir[];
|
||||
extern pgprot_t swapper_pgprot;
|
||||
extern struct kmem_cache *pgd_cache;
|
||||
extern spinlock_t pgd_lock;
|
||||
extern struct list_head pgd_list;
|
||||
|
||||
/*
|
||||
* The very last slots in the pgd_t are for addresses unusable by Linux
|
||||
* (pgd_addr_invalid() returns true). So we use them for the list structure.
|
||||
* The x86 code we are modelled on uses the page->private/index fields
|
||||
* (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
|
||||
* our pgds are so much smaller than a page, it seems a waste to
|
||||
* spend a whole page on each pgd.
|
||||
*/
|
||||
#define PGD_LIST_OFFSET \
|
||||
((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
|
||||
#define pgd_to_list(pgd) \
|
||||
((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
|
||||
#define list_to_pgd(list) \
|
||||
((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
|
||||
|
||||
extern void pgtable_cache_init(void);
|
||||
extern void paging_init(void);
|
||||
extern void set_page_homes(void);
|
||||
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
#define _PAGE_PRESENT HV_PTE_PRESENT
|
||||
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
|
||||
#define _PAGE_READABLE HV_PTE_READABLE
|
||||
#define _PAGE_WRITABLE HV_PTE_WRITABLE
|
||||
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
|
||||
#define _PAGE_ACCESSED HV_PTE_ACCESSED
|
||||
#define _PAGE_DIRTY HV_PTE_DIRTY
|
||||
#define _PAGE_GLOBAL HV_PTE_GLOBAL
|
||||
#define _PAGE_USER HV_PTE_USER
|
||||
|
||||
/*
|
||||
* All the "standard" bits. Cache-control bits are managed elsewhere.
|
||||
* This is used to test for valid level-2 page table pointers by checking
|
||||
* all the bits, and to mask away the cache control bits for mprotect.
|
||||
*/
|
||||
#define _PAGE_ALL (\
|
||||
_PAGE_PRESENT | \
|
||||
_PAGE_HUGE_PAGE | \
|
||||
_PAGE_READABLE | \
|
||||
_PAGE_WRITABLE | \
|
||||
_PAGE_EXECUTABLE | \
|
||||
_PAGE_ACCESSED | \
|
||||
_PAGE_DIRTY | \
|
||||
_PAGE_GLOBAL | \
|
||||
_PAGE_USER \
|
||||
)
|
||||
|
||||
#define PAGE_NONE \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#define PAGE_SHARED \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
|
||||
_PAGE_USER | _PAGE_ACCESSED)
|
||||
|
||||
#define PAGE_SHARED_EXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
|
||||
_PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
|
||||
#define PAGE_COPY_NOEXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
|
||||
#define PAGE_COPY_EXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_READABLE | _PAGE_EXECUTABLE)
|
||||
#define PAGE_COPY \
|
||||
PAGE_COPY_NOEXEC
|
||||
#define PAGE_READONLY \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
|
||||
#define PAGE_READONLY_EXEC \
|
||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_READABLE | _PAGE_EXECUTABLE)
|
||||
|
||||
#define _PAGE_KERNEL_RO \
|
||||
(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
|
||||
#define _PAGE_KERNEL \
|
||||
(_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
|
||||
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
|
||||
|
||||
#define page_to_kpgprot(p) PAGE_KERNEL
|
||||
|
||||
/*
|
||||
* We could tighten these up, but for now writable or executable
|
||||
* implies readable.
|
||||
*/
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_COPY /* this is write-only, which we won't support */
|
||||
#define __P011 PAGE_COPY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_COPY_EXEC
|
||||
#define __P111 PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
||||
/*
|
||||
* All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
|
||||
* and PAGE_HUGE_PAGE, which must be one and zero, respectively.
|
||||
* We set the ignored bits to zero.
|
||||
*/
|
||||
#define _PAGE_TABLE _PAGE_PRESENT
|
||||
|
||||
/* Inherit the caching flags from the old protection bits. */
|
||||
#define pgprot_modify(oldprot, newprot) \
|
||||
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
|
||||
|
||||
/* Just setting the PFN to zero suffices. */
|
||||
#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
|
||||
|
||||
/*
|
||||
* For PTEs and PDEs, we must clear the Present bit first when
|
||||
* clearing a page table entry, so clear the bottom half first and
|
||||
* enforce ordering with a barrier.
|
||||
*/
|
||||
static inline void __pte_clear(pte_t *ptep)
|
||||
{
|
||||
#ifdef __tilegx__
|
||||
ptep->val = 0;
|
||||
#else
|
||||
u32 *tmp = (u32 *)ptep;
|
||||
tmp[0] = 0;
|
||||
barrier();
|
||||
tmp[1] = 0;
|
||||
#endif
|
||||
}
|
||||
#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
|
||||
|
||||
/*
|
||||
* The following only work if pte_present() is true.
|
||||
* Undefined behaviour if not..
|
||||
*/
|
||||
#define pte_present hv_pte_get_present
|
||||
#define pte_user hv_pte_get_user
|
||||
#define pte_read hv_pte_get_readable
|
||||
#define pte_dirty hv_pte_get_dirty
|
||||
#define pte_young hv_pte_get_accessed
|
||||
#define pte_write hv_pte_get_writable
|
||||
#define pte_exec hv_pte_get_executable
|
||||
#define pte_huge hv_pte_get_page
|
||||
#define pte_rdprotect hv_pte_clear_readable
|
||||
#define pte_exprotect hv_pte_clear_executable
|
||||
#define pte_mkclean hv_pte_clear_dirty
|
||||
#define pte_mkold hv_pte_clear_accessed
|
||||
#define pte_wrprotect hv_pte_clear_writable
|
||||
#define pte_mksmall hv_pte_clear_page
|
||||
#define pte_mkread hv_pte_set_readable
|
||||
#define pte_mkexec hv_pte_set_executable
|
||||
#define pte_mkdirty hv_pte_set_dirty
|
||||
#define pte_mkyoung hv_pte_set_accessed
|
||||
#define pte_mkwrite hv_pte_set_writable
|
||||
#define pte_mkhuge hv_pte_set_page
|
||||
|
||||
#define pte_special(pte) 0
|
||||
#define pte_mkspecial(pte) (pte)
|
||||
|
||||
/*
|
||||
* Use some spare bits in the PTE for user-caching tags.
|
||||
*/
|
||||
#define pte_set_forcecache hv_pte_set_client0
|
||||
#define pte_get_forcecache hv_pte_get_client0
|
||||
#define pte_clear_forcecache hv_pte_clear_client0
|
||||
#define pte_set_anyhome hv_pte_set_client1
|
||||
#define pte_get_anyhome hv_pte_get_client1
|
||||
#define pte_clear_anyhome hv_pte_clear_client1
|
||||
|
||||
/*
|
||||
* A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
|
||||
*/
|
||||
#define pte_migrating hv_pte_get_migrating
|
||||
#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
|
||||
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/*
|
||||
* set_pte_order() sets the given PTE and also sanity-checks the
|
||||
* requested PTE against the page homecaching. Unspecified parts
|
||||
* of the PTE are filled in when it is written to memory, i.e. all
|
||||
* caching attributes if "!forcecache", or the home cpu if "anyhome".
|
||||
*/
|
||||
extern void set_pte_order(pte_t *ptep, pte_t pte, int order);
|
||||
|
||||
#define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0)
|
||||
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
|
||||
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
|
||||
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.val;
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return hv_pte_get_pfn(pte);
|
||||
}
|
||||
|
||||
/* Set or get the remote cache cpu in a pgprot with remote caching. */
|
||||
extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
|
||||
extern int get_remote_cache_cpu(pgprot_t prot);
|
||||
|
||||
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return hv_pte_set_pfn(prot, pfn);
|
||||
}
|
||||
|
||||
/* Support for priority mappings. */
|
||||
extern void start_mm_caching(struct mm_struct *mm);
|
||||
extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
|
||||
|
||||
/*
|
||||
* Support non-linear file mappings (see sys_remap_file_pages).
|
||||
* This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
|
||||
* file offset in the 32 high bits.
|
||||
*/
|
||||
#define _PAGE_FILE HV_PTE_CLIENT1
|
||||
#define PTE_FILE_MAX_BITS 32
|
||||
#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
|
||||
#define pte_to_pgoff(pte) ((pte).val >> 32)
|
||||
#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
|
||||
|
||||
/*
|
||||
* Encode and de-code a swap entry (see <linux/swapops.h>).
|
||||
* We put the swap file type+offset in the 32 high bits;
|
||||
* I believe we can just leave the low bits clear.
|
||||
*/
|
||||
#define __swp_type(swp) ((swp).val & 0x1f)
|
||||
#define __swp_offset(swp) ((swp).val >> 5)
|
||||
#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
|
||||
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
|
||||
|
||||
/*
|
||||
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
|
||||
*
|
||||
* dst - pointer to pgd range anwhere on a pgd page
|
||||
* src - ""
|
||||
* count - the number of pgds to copy.
|
||||
*
|
||||
* dst and src can be on the same page, but the range must not overlap,
|
||||
* and must not cross a page boundary.
|
||||
*/
|
||||
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
|
||||
{
|
||||
memcpy(dst, src, count * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*/
|
||||
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
/*
|
||||
* If we are doing an mprotect(), just accept the new vma->vm_page_prot
|
||||
* value and combine it with the PFN from the old PTE to get a new PTE.
|
||||
*/
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
return pfn_pte(hv_pte_get_pfn(pte), newprot);
|
||||
}
|
||||
|
||||
/*
|
||||
* The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
|
||||
*
|
||||
* This macro returns the index of the entry in the pgd page which would
|
||||
* control the given virtual address.
|
||||
*/
|
||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
||||
|
||||
/*
|
||||
* pgd_offset() returns a (pgd_t *)
|
||||
* pgd_index() is used get the offset into the pgd page's array of pgd_t's.
|
||||
*/
|
||||
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
||||
|
||||
/*
|
||||
* A shortcut which implies the use of the kernel's pgd, instead
|
||||
* of a process's.
|
||||
*/
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||
|
||||
#if defined(CONFIG_HIGHPTE)
|
||||
extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
|
||||
#define pte_offset_map(dir, address) \
|
||||
_pte_offset_map(dir, address, KM_PTE0)
|
||||
#define pte_offset_map_nested(dir, address) \
|
||||
_pte_offset_map(dir, address, KM_PTE1)
|
||||
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
||||
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
|
||||
#else
|
||||
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
||||
#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
#define pte_unmap_nested(pte) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Clear a non-executable kernel PTE and flush it from the TLB. */
|
||||
#define kpte_clear_flush(ptep, vaddr) \
|
||||
do { \
|
||||
pte_clear(&init_mm, (vaddr), (ptep)); \
|
||||
local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The kernel page tables contain what we need, and we flush when we
|
||||
* change specific page table entries.
|
||||
*/
|
||||
#define update_mmu_cache(vma, address, pte) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define kern_addr_valid(addr) (1)
|
||||
#endif /* CONFIG_FLATMEM */
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
extern void vmalloc_sync_all(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef __tilegx__
|
||||
#include <asm/pgtable_64.h>
|
||||
#else
|
||||
#include <asm/pgtable_32.h>
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
{
|
||||
/*
|
||||
* Only check low word on 32-bit platforms, since it might be
|
||||
* out of sync with upper half.
|
||||
*/
|
||||
return (unsigned long)pmd_val(pmd) == 0;
|
||||
}
|
||||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
|
||||
}
|
||||
|
||||
static inline unsigned long pages_to_mb(unsigned long npg)
|
||||
{
|
||||
return npg >> (20 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* This function returns the index of the entry in the pmd which would
|
||||
* control the given virtual address.
|
||||
*/
|
||||
static inline unsigned long pmd_index(unsigned long address)
|
||||
{
|
||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* A given kernel pmd_t maps to a specific virtual address (either a
|
||||
* kernel huge page or a kernel pte_t table). Since kernel pte_t
|
||||
* tables can be aligned at sub-page granularity, this function can
|
||||
* return non-page-aligned pointers, despite its name.
|
||||
*/
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
phys_addr_t pa =
|
||||
(phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
|
||||
return (unsigned long)__va(pa);
|
||||
}
|
||||
|
||||
/*
|
||||
* A pmd_t points to the base of a huge page or to a pte_t array.
|
||||
* If a pte_t array, since we can have multiple per page, we don't
|
||||
* have a one-to-one mapping of pmd_t's to pages. However, this is
|
||||
* OK for pte_lockptr(), since we just end up with potentially one
|
||||
* lock being used for several pte_t arrays.
|
||||
*/
|
||||
#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
|
||||
|
||||
/*
|
||||
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* This macro returns the index of the entry in the pte page which would
|
||||
* control the given virtual address.
|
||||
*/
|
||||
static inline unsigned long pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
|
||||
}
|
||||
|
||||
static inline int pmd_huge_page(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) & _PAGE_HUGE_PAGE;
|
||||
}
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* Support /proc/NN/pgtable API. */
|
||||
struct seq_file;
|
||||
int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
|
||||
unsigned long vaddr, pte_t *ptep, void **datap);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGTABLE_H */
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PGTABLE_32_H
|
||||
#define _ASM_TILE_PGTABLE_32_H
|
||||
|
||||
/*
|
||||
* The level-1 index is defined by the huge page size. A PGD is composed
|
||||
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
|
||||
*/
|
||||
#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE
|
||||
#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
|
||||
|
||||
/*
|
||||
* The level-2 index is defined by the difference between the huge
|
||||
* page size and the normal page size. A PTE is composed of
|
||||
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
|
||||
* Note that the hypervisor docs use PTE for what we call pte_t, so
|
||||
* this nomenclature is somewhat confusing.
|
||||
*/
|
||||
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Right now we initialize only a single pte table. It can be extended
|
||||
* easily, subsequent pte tables have to be allocated in one physical
|
||||
* chunk of RAM.
|
||||
*
|
||||
* HOWEVER, if we are using an allocation scheme with slop after the
|
||||
* end of the page table (e.g. where our L2 page tables are 2KB but
|
||||
* our pages are 64KB and we are allocating via the page allocator)
|
||||
* we can't extend it easily.
|
||||
*/
|
||||
#define LAST_PKMAP PTRS_PER_PTE
|
||||
|
||||
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
# define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
|
||||
#else
|
||||
# define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HUGEVMAP
|
||||
#define HUGE_VMAP_END __VMAPPING_END
|
||||
#define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE)
|
||||
#define _VMALLOC_END HUGE_VMAP_BASE
|
||||
#else
|
||||
#define _VMALLOC_END __VMAPPING_END
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Align the vmalloc area to an L2 page table, and leave a guard page
|
||||
* at the beginning and end. The vmalloc code also puts in an internal
|
||||
* guard page between each allocation.
|
||||
*/
|
||||
#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
|
||||
extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
|
||||
#define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE)
|
||||
#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
|
||||
|
||||
/* This is the maximum possible amount of lowmem. */
|
||||
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
|
||||
|
||||
/* We have no pmd or pud since we are strictly a two-level page table */
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/* We don't define any pgds for these addresses. */
|
||||
static inline int pgd_addr_invalid(unsigned long addr)
|
||||
{
|
||||
return addr >= MEM_HV_INTRPT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide versions of these routines that can be used safely when
|
||||
* the hypervisor may be asynchronously modifying dirty/accessed bits.
|
||||
* ptep_get_and_clear() matches the generic one but we provide it to
|
||||
* be parallel with the 64-bit code.
|
||||
*/
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
|
||||
extern int ptep_test_and_clear_young(struct vm_area_struct *,
|
||||
unsigned long addr, pte_t *);
|
||||
extern void ptep_set_wrprotect(struct mm_struct *,
|
||||
unsigned long addr, pte_t *);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = *ptep;
|
||||
pte_clear(_mm, addr, ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
/* Create a pmd from a PTFN. */
|
||||
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
|
||||
{
|
||||
return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
|
||||
}
|
||||
|
||||
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
|
||||
#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
|
||||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
__pte_clear(&pmdp->pud.pgd);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGTABLE_32_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/poll.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/posix_types.h>
|
|
@ -0,0 +1,338 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PROCESSOR_H
|
||||
#define _ASM_TILE_PROCESSOR_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
|
||||
* normally would, due to #include dependencies.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#include <arch/chip.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
struct task_struct;
|
||||
struct thread_struct;
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
} mm_segment_t;
|
||||
|
||||
/*
|
||||
* Default implementation of macro that returns current
|
||||
* instruction pointer ("program counter").
|
||||
*/
|
||||
void *current_text_addr(void);
|
||||
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/* Capture the state of a suspended DMA. */
|
||||
struct tile_dma_state {
|
||||
int enabled;
|
||||
unsigned long src;
|
||||
unsigned long dest;
|
||||
unsigned long strides;
|
||||
unsigned long chunk_size;
|
||||
unsigned long src_chunk;
|
||||
unsigned long dest_chunk;
|
||||
unsigned long byte;
|
||||
unsigned long status;
|
||||
};
|
||||
|
||||
/*
|
||||
* A mask of the DMA status register for selecting only the 'running'
|
||||
* and 'done' bits.
|
||||
*/
|
||||
#define DMA_STATUS_MASK \
|
||||
(SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Track asynchronous TLB events (faults and access violations)
|
||||
* that occur while we are in kernel mode from DMA or the SN processor.
|
||||
*/
|
||||
struct async_tlb {
|
||||
short fault_num; /* original fault number; 0 if none */
|
||||
char is_fault; /* was it a fault (vs an access violation) */
|
||||
char is_write; /* for fault: was it caused by a write? */
|
||||
unsigned long address; /* what address faulted? */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
struct hardwall_info;
|
||||
#endif
|
||||
|
||||
struct thread_struct {
|
||||
/* kernel stack pointer */
|
||||
unsigned long ksp;
|
||||
/* kernel PC */
|
||||
unsigned long pc;
|
||||
/* starting user stack pointer (for page migration) */
|
||||
unsigned long usp0;
|
||||
/* pid of process that created this one */
|
||||
pid_t creator_pid;
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/* DMA info for suspended threads (byte == 0 means no DMA state) */
|
||||
struct tile_dma_state tile_dma_state;
|
||||
#endif
|
||||
/* User EX_CONTEXT registers */
|
||||
unsigned long ex_context[2];
|
||||
/* User SYSTEM_SAVE registers */
|
||||
unsigned long system_save[4];
|
||||
/* User interrupt mask */
|
||||
unsigned long long interrupt_mask;
|
||||
/* User interrupt-control 0 state */
|
||||
unsigned long intctrl_0;
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
/* Any other miscellaneous processor state bits */
|
||||
unsigned long proc_status;
|
||||
#endif
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* Is this task tied to an activated hardwall? */
|
||||
struct hardwall_info *hardwall;
|
||||
/* Chains this task into the list at hardwall->list. */
|
||||
struct list_head hardwall_list;
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/* Async DMA TLB fault information */
|
||||
struct async_tlb dma_async_tlb;
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/* Was static network processor when we were switched out? */
|
||||
int sn_proc_running;
|
||||
/* Async SNI TLB fault information */
|
||||
struct async_tlb sn_async_tlb;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Start with "sp" this many bytes below the top of the kernel stack.
|
||||
* This preserves the invariant that a called function may write to *sp.
|
||||
*/
|
||||
#define STACK_TOP_DELTA 8
|
||||
|
||||
/*
|
||||
* When entering the kernel via a fault, start with the top of the
|
||||
* pt_regs structure this many bytes below the top of the page.
|
||||
* This aligns the pt_regs structure optimally for cache-line access.
|
||||
*/
|
||||
#ifdef __tilegx__
|
||||
#define KSTK_PTREGS_GAP 48
|
||||
#else
|
||||
#define KSTK_PTREGS_GAP 56
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define TASK_SIZE_MAX (MEM_LOW_END + 1)
|
||||
#else
|
||||
#define TASK_SIZE_MAX PAGE_OFFSET
|
||||
#endif
|
||||
|
||||
/* TASK_SIZE and related variables are always checked in "current" context. */
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define COMPAT_TASK_SIZE (1UL << 31)
|
||||
#define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
|
||||
COMPAT_TASK_SIZE : TASK_SIZE_MAX)
|
||||
#else
|
||||
#define TASK_SIZE TASK_SIZE_MAX
|
||||
#endif
|
||||
|
||||
/* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */
|
||||
#define VDSO_BASE (TASK_SIZE - PAGE_SIZE)
|
||||
|
||||
#define STACK_TOP VDSO_BASE
|
||||
|
||||
/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
|
||||
#define STACK_TOP_MAX TASK_SIZE_MAX
|
||||
|
||||
/*
|
||||
* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's, if it is using bottom-up mapping.
|
||||
*/
|
||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
||||
|
||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
|
||||
#define INIT_THREAD { \
|
||||
.ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
|
||||
.interrupt_mask = -1ULL \
|
||||
}
|
||||
|
||||
/* Kernel stack top for the task that first boots on this cpu. */
|
||||
DECLARE_PER_CPU(unsigned long, boot_sp);
|
||||
|
||||
/* PC to boot from on this cpu. */
|
||||
DECLARE_PER_CPU(unsigned long, boot_pc);
|
||||
|
||||
/* Do necessary setup to start up a newly executed thread. */
|
||||
static inline void start_thread(struct pt_regs *regs,
|
||||
unsigned long pc, unsigned long usp)
|
||||
{
|
||||
regs->pc = pc;
|
||||
regs->sp = usp;
|
||||
}
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
/* Nothing for now */
|
||||
}
|
||||
|
||||
/* Prepare to copy thread state - unlazy all lazy status. */
|
||||
#define prepare_to_copy(tsk) do { } while (0)
|
||||
|
||||
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
|
||||
|
||||
|
||||
/*
|
||||
* Return saved (kernel) PC of a blocked thread.
|
||||
* Only used in a printk() in kernel/sched.c, so don't work too hard.
|
||||
*/
|
||||
#define thread_saved_pc(t) ((t)->thread.pc)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
/* Return initial ksp value for given task. */
|
||||
#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE)
|
||||
|
||||
/* Return some info about the user process TASK. */
|
||||
#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
|
||||
#define task_pt_regs(task) \
|
||||
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
|
||||
#define task_sp(task) (task_pt_regs(task)->sp)
|
||||
#define task_pc(task) (task_pt_regs(task)->pc)
|
||||
/* Aliases for pc and sp (used in fs/proc/array.c) */
|
||||
#define KSTK_EIP(task) task_pc(task)
|
||||
#define KSTK_ESP(task) task_sp(task)
|
||||
|
||||
/* Standard format for printing registers and other word-size data. */
|
||||
#ifdef __tilegx__
|
||||
# define REGFMT "0x%016lx"
|
||||
#else
|
||||
# define REGFMT "0x%08lx"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do some slow action (e.g. read a slow SPR).
|
||||
* Note that this must also have compiler-barrier semantics since
|
||||
* it may be used in a busy loop reading memory.
|
||||
*/
|
||||
static inline void cpu_relax(void)
|
||||
{
|
||||
__insn_mfspr(SPR_PASS);
|
||||
barrier();
|
||||
}
|
||||
|
||||
struct siginfo;
|
||||
extern void arch_coredump_signal(struct siginfo *, struct pt_regs *);
|
||||
#define arch_coredump_signal arch_coredump_signal
|
||||
|
||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
||||
struct seq_operations;
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
/* Provide information about the chip model. */
|
||||
extern char chip_model[64];
|
||||
|
||||
/* Data on which physical memory controller corresponds to which NUMA node. */
|
||||
extern int node_controller[];
|
||||
|
||||
|
||||
/* Do we dump information to the console when a user application crashes? */
|
||||
extern int show_crashinfo;
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Does the heap allocator return hash-for-home pages by default? */
|
||||
extern int hash_default;
|
||||
|
||||
/* Should kernel stack pages be hash-for-home? */
|
||||
extern int kstack_hash;
|
||||
|
||||
/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
|
||||
#define uheap_hash hash_default
|
||||
|
||||
#else
|
||||
#define hash_default 0
|
||||
#define kstack_hash 0
|
||||
#define uheap_hash 0
|
||||
#endif
|
||||
|
||||
/* Are we using huge pages in the TLB for kernel data? */
|
||||
extern int kdata_huge;
|
||||
|
||||
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/* Do some slow action (e.g. read a slow SPR). */
|
||||
#define CPU_RELAX mfspr zero, SPR_PASS
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* Assembly code assumes that the PL is in the low bits. */
|
||||
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
|
||||
# error Fix assembly assumptions about PL
|
||||
#endif
|
||||
|
||||
/* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
|
||||
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
|
||||
SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
|
||||
SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
|
||||
SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
|
||||
# error Fix assumptions that EX1 macros work for both PL0 and PL1
|
||||
#endif
|
||||
|
||||
/* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
|
||||
#define EX1_PL(ex1) \
|
||||
(((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
|
||||
#define EX1_ICS(ex1) \
|
||||
(((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
|
||||
#define PL_ICS_EX1(pl, ics) \
|
||||
(((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
|
||||
((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
|
||||
|
||||
/*
|
||||
* Provide symbolic constants for PLs.
|
||||
* Note that assembly code assumes that USER_PL is zero.
|
||||
*/
|
||||
#define USER_PL 0
|
||||
#define KERNEL_PL 1
|
||||
|
||||
/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */
|
||||
#define CPU_LOG_MASK_VALUE 12
|
||||
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
|
||||
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
|
||||
# error Too many cpus!
|
||||
#endif
|
||||
#define raw_smp_processor_id() \
|
||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE)
|
||||
#define get_current_ksp0() \
|
||||
(__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE)
|
||||
#define next_current_ksp0(task) ({ \
|
||||
unsigned long __ksp0 = task_ksp0(task); \
|
||||
int __cpu = raw_smp_processor_id(); \
|
||||
BUG_ON(__ksp0 & CPU_MASK_VALUE); \
|
||||
__ksp0 | __cpu; \
|
||||
})
|
||||
|
||||
#endif /* _ASM_TILE_PROCESSOR_H */
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PTRACE_H
|
||||
#define _ASM_TILE_PTRACE_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
#include <arch/abi.h>
|
||||
|
||||
/* These must match struct pt_regs, below. */
|
||||
#if CHIP_WORD_SIZE() == 32
|
||||
#define PTREGS_OFFSET_REG(n) ((n)*4)
|
||||
#else
|
||||
#define PTREGS_OFFSET_REG(n) ((n)*8)
|
||||
#endif
|
||||
#define PTREGS_OFFSET_BASE 0
|
||||
#define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53)
|
||||
#define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54)
|
||||
#define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55)
|
||||
#define PTREGS_NR_GPRS 56
|
||||
#define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56)
|
||||
#define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57)
|
||||
#define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58)
|
||||
#define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59)
|
||||
#define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60)
|
||||
#if CHIP_HAS_CMPEXCH()
|
||||
#define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61)
|
||||
#endif
|
||||
#define PTREGS_SIZE PTREGS_OFFSET_REG(64)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/* Benefit from consistent use of "long" on all chips. */
|
||||
typedef unsigned long pt_reg_t;
|
||||
#else
|
||||
/* Provide appropriate length type to userspace regardless of -m32/-m64. */
|
||||
typedef uint_reg_t pt_reg_t;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This struct defines the way the registers are stored on the stack during a
|
||||
* system call/exception. It should be a multiple of 8 bytes to preserve
|
||||
* normal stack alignment rules.
|
||||
*
|
||||
* Must track <sys/ucontext.h> and <sys/procfs.h>
|
||||
*/
|
||||
struct pt_regs {
|
||||
/* Saved main processor registers; 56..63 are special. */
|
||||
/* tp, sp, and lr must immediately follow regs[] for aliasing. */
|
||||
pt_reg_t regs[53];
|
||||
pt_reg_t tp; /* aliases regs[TREG_TP] */
|
||||
pt_reg_t sp; /* aliases regs[TREG_SP] */
|
||||
pt_reg_t lr; /* aliases regs[TREG_LR] */
|
||||
|
||||
/* Saved special registers. */
|
||||
pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */
|
||||
pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */
|
||||
pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
|
||||
pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
|
||||
pt_reg_t flags; /* flags (see below) */
|
||||
#if !CHIP_HAS_CMPEXCH()
|
||||
pt_reg_t pad[3];
|
||||
#else
|
||||
pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */
|
||||
pt_reg_t pad[2];
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/* Flag bits in pt_regs.flags */
|
||||
#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
|
||||
#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
|
||||
#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
|
||||
|
||||
#define PTRACE_GETREGS 12
|
||||
#define PTRACE_SETREGS 13
|
||||
#define PTRACE_GETFPREGS 14
|
||||
#define PTRACE_SETFPREGS 15
|
||||
|
||||
/* Support TILE-specific ptrace options, with events starting at 16. */
|
||||
#define PTRACE_O_TRACEMIGRATE 0x00010000
|
||||
#define PTRACE_EVENT_MIGRATE 16
|
||||
#ifdef __KERNEL__
|
||||
#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE)
|
||||
#define PT_TRACE_MIGRATE 0x00080000
|
||||
#define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE)
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->pc)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
|
||||
/* Does the process account for user or for system time? */
|
||||
#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL)
|
||||
|
||||
/* Fill in a struct pt_regs with the current kernel registers. */
|
||||
struct pt_regs *get_pt_regs(struct pt_regs *);
|
||||
|
||||
/* Trace the current syscall. */
|
||||
extern void do_syscall_trace(void);
|
||||
|
||||
extern void show_regs(struct pt_regs *);
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
||||
/*
|
||||
* A structure for all single-stepper state.
|
||||
*
|
||||
* Also update defines in assembler section if it changes
|
||||
*/
|
||||
struct single_step_state {
|
||||
/* the page to which we will write hacked-up bundles */
|
||||
void __user *buffer;
|
||||
|
||||
union {
|
||||
int flags;
|
||||
struct {
|
||||
unsigned long is_enabled:1, update:1, update_reg:6;
|
||||
};
|
||||
};
|
||||
|
||||
unsigned long orig_pc; /* the original PC */
|
||||
unsigned long next_pc; /* return PC if no branch (PC + 1) */
|
||||
unsigned long branch_next_pc; /* return PC if we did branch/jump */
|
||||
unsigned long update_value; /* value to restore to update_target */
|
||||
};
|
||||
|
||||
/* Single-step the instruction at regs->pc */
|
||||
extern void single_step_once(struct pt_regs *regs);
|
||||
|
||||
struct task_struct;
|
||||
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
||||
int error_code);
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
|
||||
#define __ARCH_WANT_COMPAT_SYS_PTRACE
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1
|
||||
#define SINGLESTEP_STATE_MASK_UPDATE 0x2
|
||||
#define SINGLESTEP_STATE_TARGET_LB 2
|
||||
#define SINGLESTEP_STATE_TARGET_UB 7
|
||||
|
||||
#endif /* !__KERNEL__ */
|
||||
|
||||
#endif /* _ASM_TILE_PTRACE_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/resource.h>
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SCATTERLIST_H
|
||||
#define _ASM_TILE_SCATTERLIST_H
|
||||
|
||||
#define ISA_DMA_THRESHOLD (~0UL)
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#endif /* _ASM_TILE_SCATTERLIST_H */
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SECTIONS_H
|
||||
#define _ASM_TILE_SECTIONS_H
|
||||
|
||||
#define arch_is_kernel_data arch_is_kernel_data
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
/* Text and data are at different areas in the kernel VA space. */
|
||||
extern char _sinitdata[], _einitdata[];
|
||||
|
||||
/* Write-once data is writable only till the end of initialization. */
|
||||
extern char __w1data_begin[], __w1data_end[];
|
||||
|
||||
|
||||
/* Not exactly sections, but PC comparison points in the code. */
|
||||
extern char __rt_sigreturn[], __rt_sigreturn_end[];
|
||||
#ifndef __tilegx__
|
||||
extern char sys_cmpxchg[], __sys_cmpxchg_end[];
|
||||
extern char __sys_cmpxchg_grab_lock[];
|
||||
extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
|
||||
#endif
|
||||
|
||||
/* Handle the discontiguity between _sdata and _stext. */
|
||||
static inline int arch_is_kernel_data(unsigned long addr)
|
||||
{
|
||||
return addr >= (unsigned long)_sdata &&
|
||||
addr < (unsigned long)_end;
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_SECTIONS_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/sembuf.h>
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_SETUP_H
|
||||
#define _ASM_TILE_SETUP_H
|
||||
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
/*
|
||||
* Reserved space for vmalloc and iomap - defined in asm/page.h
|
||||
*/
|
||||
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
|
||||
|
||||
#define COMMAND_LINE_SIZE 2048
|
||||
|
||||
void early_panic(const char *fmt, ...);
|
||||
void warn_early_printk(void);
|
||||
void __init disable_early_printk(void);
|
||||
|
||||
#endif /* _ASM_TILE_SETUP_H */
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/shmbuf.h>
|
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/shmparam.h>
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче