Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull Tile arch updates from Chris Metcalf: "These changes bring in a bunch of new functionality that has been maintained internally at Tilera over the last year, plus other stray bits of work that I've taken into the tile tree from other folks. The changes include some PCI root complex work, interrupt-driven console support, support for performing fast-path unaligned data fixups by kernel-based JIT code generation, CONFIG_PREEMPT support, vDSO support for gettimeofday(), a serial driver for the tilegx on-chip UART, KGDB support, more optimized string routines, support for ftrace and kprobes, improved ASLR, and many bug fixes. We also remove support for the old TILE64 chip, which is no longer buildable" * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (85 commits) tile: refresh tile defconfig files tile: rework <asm/cmpxchg.h> tile PCI RC: make default consistent DMA mask 32-bit tile: add null check for kzalloc in tile/kernel/setup.c tile: make __write_once a synonym for __read_mostly tile: remove support for TILE64 tile: use asm-generic/bitops/builtin-*.h tile: eliminate no-op "noatomichash" boot argument tile: use standard tile_bundle_bits type in traps.c tile: simplify code referencing hypervisor API addresses tile: change <asm/system.h> to <asm/switch_to.h> in comments tile: mark pcibios_init() as __init tile: check for correct compiler earlier in asm-offsets.c tile: use standard 'generic-y' model for <asm/hw_irq.h> tile: use asm-generic version of <asm/local64.h> tile PCI RC: add comment about "PCI hole" problem tile: remove DEBUG_EXTRA_FLAGS kernel config option tile: add virt_to_kpte() API and clean up and document behavior tile: support FRAME_POINTER tile: support reporting Tilera hypervisor statistics ...
This commit is contained in:
Коммит
4de9ad9bc0
|
@ -8373,9 +8373,14 @@ M: Chris Metcalf <cmetcalf@tilera.com>
|
|||
W: http://www.tilera.com/scm/
|
||||
S: Supported
|
||||
F: arch/tile/
|
||||
F: drivers/tty/hvc/hvc_tile.c
|
||||
F: drivers/net/ethernet/tile/
|
||||
F: drivers/char/tile-srom.c
|
||||
F: drivers/edac/tile_edac.c
|
||||
F: drivers/net/ethernet/tile/
|
||||
F: drivers/rtc/rtc-tile.c
|
||||
F: drivers/tty/hvc/hvc_tile.c
|
||||
F: drivers/tty/serial/tilegx.c
|
||||
F: drivers/usb/host/*-tilegx.c
|
||||
F: include/linux/usb/tilegx.h
|
||||
|
||||
TLAN NETWORK DRIVER
|
||||
M: Samuel Chessman <chessman@tux.org>
|
||||
|
|
|
@ -26,6 +26,7 @@ config TILE
|
|||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
|
||||
# FIXME: investigate whether we need/want these options.
|
||||
# select HAVE_IOREMAP_PROT
|
||||
|
@ -64,6 +65,9 @@ config HUGETLB_SUPER_PAGES
|
|||
depends on HUGETLB_PAGE && TILEGX
|
||||
def_bool y
|
||||
|
||||
config GENERIC_TIME_VSYSCALL
|
||||
def_bool y
|
||||
|
||||
# FIXME: tilegx can implement a more efficient rwsem.
|
||||
config RWSEM_GENERIC_SPINLOCK
|
||||
def_bool y
|
||||
|
@ -112,10 +116,19 @@ config SMP
|
|||
config HVC_TILE
|
||||
depends on TTY
|
||||
select HVC_DRIVER
|
||||
select HVC_IRQ if TILEGX
|
||||
def_bool y
|
||||
|
||||
config TILEGX
|
||||
bool "Building with TILE-Gx (64-bit) compiler and toolchain"
|
||||
bool "Building for TILE-Gx (64-bit) processor"
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_ARCH_KGDB
|
||||
|
||||
config TILEPRO
|
||||
def_bool !TILEGX
|
||||
|
@ -194,7 +207,7 @@ config SYSVIPC_COMPAT
|
|||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
# We do not currently support disabling HIGHMEM on tile64 and tilepro.
|
||||
# We do not currently support disabling HIGHMEM on tilepro.
|
||||
config HIGHMEM
|
||||
bool # "Support for more than 512 MB of RAM"
|
||||
default !TILEGX
|
||||
|
@ -300,6 +313,8 @@ config PAGE_OFFSET
|
|||
|
||||
source "mm/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.preempt"
|
||||
|
||||
config CMDLINE_BOOL
|
||||
bool "Built-in kernel command line"
|
||||
default n
|
||||
|
@ -396,8 +411,20 @@ config NO_IOMEM
|
|||
config NO_IOPORT
|
||||
def_bool !PCI
|
||||
|
||||
config TILE_PCI_IO
|
||||
bool "PCI I/O space support"
|
||||
default n
|
||||
depends on PCI
|
||||
depends on TILEGX
|
||||
---help---
|
||||
Enable PCI I/O space support on TILEGx. Since the PCI I/O space
|
||||
is used by few modern PCIe endpoint devices, its support is disabled
|
||||
by default to save the TRIO PIO Region resource for other purposes.
|
||||
|
||||
source "drivers/pci/Kconfig"
|
||||
|
||||
source "drivers/pci/pcie/Kconfig"
|
||||
|
||||
config TILE_USB
|
||||
tristate "Tilera USB host adapter support"
|
||||
default y
|
||||
|
|
|
@ -14,14 +14,12 @@ config EARLY_PRINTK
|
|||
with klogd/syslogd. You should normally N here,
|
||||
unless you want to debug such a crash.
|
||||
|
||||
config DEBUG_EXTRA_FLAGS
|
||||
string "Additional compiler arguments when building with '-g'"
|
||||
depends on DEBUG_INFO
|
||||
default ""
|
||||
config TILE_HVGLUE_TRACE
|
||||
bool "Provide wrapper functions for hypervisor ABI calls"
|
||||
default n
|
||||
help
|
||||
Debug info can be large, and flags like
|
||||
`-femit-struct-debug-baseonly' can reduce the kernel file
|
||||
size and build time noticeably. Such flags are often
|
||||
helpful if the main use of debug info is line number info.
|
||||
Provide wrapper functions for the hypervisor ABI calls
|
||||
defined in arch/tile/kernel/hvglue.S. This allows tracing
|
||||
mechanisms, etc., to have visibility into those calls.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -30,10 +30,6 @@ endif
|
|||
# In kernel modules, this causes load failures due to unsupported relocations.
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
|
||||
ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
|
||||
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
|
||||
endif
|
||||
|
||||
LIBGCC_PATH := \
|
||||
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
|
||||
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
CONFIG_TILEGX=y
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_TASKSTATS=y
|
||||
CONFIG_TASK_DELAY_ACCT=y
|
||||
CONFIG_TASK_XACCT=y
|
||||
CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_LOG_BUF_SHIFT=19
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_DEBUG=y
|
||||
|
@ -18,18 +17,18 @@ CONFIG_CGROUP_DEVICE=y
|
|||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_RESOURCE_COUNTERS=y
|
||||
CONFIG_CGROUP_MEMCG=y
|
||||
CONFIG_CGROUP_MEMCG_SWAP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_RELAY=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_RD_XZ=y
|
||||
CONFIG_SYSCTL_SYSCALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_FORCE_LOAD=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
|
@ -45,12 +44,12 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_SGI_PARTITION=y
|
||||
CONFIG_SUN_PARTITION=y
|
||||
CONFIG_KARMA_PARTITION=y
|
||||
CONFIG_EFI_PARTITION=y
|
||||
CONFIG_CFQ_GROUP_IOSCHED=y
|
||||
CONFIG_NR_CPUS=100
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_PREEMPT_VOLUNTARY=y
|
||||
CONFIG_TILE_PCI_IO=y
|
||||
CONFIG_PCI_DEBUG=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
CONFIG_BINFMT_MISC=y
|
||||
|
@ -108,150 +107,9 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
|
|||
CONFIG_IPV6_MROUTE=y
|
||||
CONFIG_IPV6_PIMSM_V2=y
|
||||
CONFIG_NETLABEL=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_ZONES=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CT_PROTO_DCCP=m
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=m
|
||||
CONFIG_NF_CONNTRACK_AMANDA=m
|
||||
CONFIG_NF_CONNTRACK_FTP=m
|
||||
CONFIG_NF_CONNTRACK_H323=m
|
||||
CONFIG_NF_CONNTRACK_IRC=m
|
||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
|
||||
CONFIG_NF_CONNTRACK_PPTP=m
|
||||
CONFIG_NF_CONNTRACK_SANE=m
|
||||
CONFIG_NF_CONNTRACK_SIP=m
|
||||
CONFIG_NF_CONNTRACK_TFTP=m
|
||||
CONFIG_NETFILTER_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
||||
CONFIG_NETFILTER_XT_MATCH_DCCP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=m
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=m
|
||||
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_OSF=m
|
||||
CONFIG_NETFILTER_XT_MATCH_OWNER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=m
|
||||
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
|
||||
CONFIG_NETFILTER_XT_MATCH_REALM=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RECENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=m
|
||||
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
|
||||
CONFIG_NETFILTER_XT_MATCH_TIME=m
|
||||
CONFIG_NETFILTER_XT_MATCH_U32=m
|
||||
CONFIG_IP_VS=m
|
||||
CONFIG_IP_VS_IPV6=y
|
||||
CONFIG_IP_VS_PROTO_TCP=y
|
||||
CONFIG_IP_VS_PROTO_UDP=y
|
||||
CONFIG_IP_VS_PROTO_ESP=y
|
||||
CONFIG_IP_VS_PROTO_AH=y
|
||||
CONFIG_IP_VS_PROTO_SCTP=y
|
||||
CONFIG_IP_VS_RR=m
|
||||
CONFIG_IP_VS_WRR=m
|
||||
CONFIG_IP_VS_LC=m
|
||||
CONFIG_IP_VS_WLC=m
|
||||
CONFIG_IP_VS_LBLC=m
|
||||
CONFIG_IP_VS_LBLCR=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
|
||||
CONFIG_IP_NF_QUEUE=m
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MATCH_AH=m
|
||||
CONFIG_IP_NF_MATCH_ECN=m
|
||||
CONFIG_IP_NF_MATCH_TTL=m
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_TARGET_LOG=m
|
||||
CONFIG_IP_NF_TARGET_ULOG=m
|
||||
CONFIG_IP_NF_MANGLE=m
|
||||
CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
CONFIG_IP_NF_SECURITY=m
|
||||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_IP6_NF_QUEUE=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
CONFIG_IP6_NF_MATCH_EUI64=m
|
||||
CONFIG_IP6_NF_MATCH_FRAG=m
|
||||
CONFIG_IP6_NF_MATCH_OPTS=m
|
||||
CONFIG_IP6_NF_MATCH_HL=m
|
||||
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
|
||||
CONFIG_IP6_NF_MATCH_MH=m
|
||||
CONFIG_IP6_NF_MATCH_RT=m
|
||||
CONFIG_IP6_NF_TARGET_HL=m
|
||||
CONFIG_IP6_NF_TARGET_LOG=m
|
||||
CONFIG_IP6_NF_FILTER=m
|
||||
CONFIG_IP6_NF_TARGET_REJECT=m
|
||||
CONFIG_IP6_NF_MANGLE=m
|
||||
CONFIG_IP6_NF_RAW=m
|
||||
CONFIG_IP6_NF_SECURITY=m
|
||||
CONFIG_BRIDGE_NF_EBTABLES=m
|
||||
CONFIG_BRIDGE_EBT_BROUTE=m
|
||||
CONFIG_BRIDGE_EBT_T_FILTER=m
|
||||
CONFIG_BRIDGE_EBT_T_NAT=m
|
||||
CONFIG_BRIDGE_EBT_802_3=m
|
||||
CONFIG_BRIDGE_EBT_AMONG=m
|
||||
CONFIG_BRIDGE_EBT_ARP=m
|
||||
CONFIG_BRIDGE_EBT_IP=m
|
||||
CONFIG_BRIDGE_EBT_IP6=m
|
||||
CONFIG_BRIDGE_EBT_LIMIT=m
|
||||
CONFIG_BRIDGE_EBT_MARK=m
|
||||
CONFIG_BRIDGE_EBT_PKTTYPE=m
|
||||
CONFIG_BRIDGE_EBT_STP=m
|
||||
CONFIG_BRIDGE_EBT_VLAN=m
|
||||
CONFIG_BRIDGE_EBT_ARPREPLY=m
|
||||
CONFIG_BRIDGE_EBT_DNAT=m
|
||||
CONFIG_BRIDGE_EBT_MARK_T=m
|
||||
CONFIG_BRIDGE_EBT_REDIRECT=m
|
||||
CONFIG_BRIDGE_EBT_SNAT=m
|
||||
CONFIG_BRIDGE_EBT_LOG=m
|
||||
CONFIG_BRIDGE_EBT_ULOG=m
|
||||
CONFIG_BRIDGE_EBT_NFLOG=m
|
||||
CONFIG_RDS=m
|
||||
CONFIG_RDS_TCP=m
|
||||
CONFIG_BRIDGE=m
|
||||
CONFIG_NET_DSA=y
|
||||
CONFIG_VLAN_8021Q=m
|
||||
CONFIG_VLAN_8021Q_GVRP=y
|
||||
CONFIG_PHONET=m
|
||||
|
@ -292,13 +150,13 @@ CONFIG_NET_ACT_POLICE=m
|
|||
CONFIG_NET_ACT_GACT=m
|
||||
CONFIG_GACT_PROB=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_IPT=m
|
||||
CONFIG_NET_ACT_NAT=m
|
||||
CONFIG_NET_ACT_PEDIT=m
|
||||
CONFIG_NET_ACT_SIMP=m
|
||||
CONFIG_NET_ACT_SKBEDIT=m
|
||||
CONFIG_NET_CLS_IND=y
|
||||
CONFIG_DCB=y
|
||||
CONFIG_DNS_RESOLVER=y
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -317,10 +175,12 @@ CONFIG_BLK_DEV_SD=y
|
|||
CONFIG_SCSI_CONSTANTS=y
|
||||
CONFIG_SCSI_LOGGING=y
|
||||
CONFIG_SCSI_SAS_ATA=y
|
||||
CONFIG_ISCSI_TCP=m
|
||||
CONFIG_SCSI_MVSAS=y
|
||||
# CONFIG_SCSI_MVSAS_DEBUG is not set
|
||||
CONFIG_SCSI_MVSAS_TASKLET=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_SATA_AHCI=y
|
||||
CONFIG_SATA_SIL24=y
|
||||
# CONFIG_ATA_SFF is not set
|
||||
CONFIG_MD=y
|
||||
|
@ -343,6 +203,12 @@ CONFIG_DM_MULTIPATH_QL=m
|
|||
CONFIG_DM_MULTIPATH_ST=m
|
||||
CONFIG_DM_DELAY=m
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_TARGET_CORE=m
|
||||
CONFIG_TCM_IBLOCK=m
|
||||
CONFIG_TCM_FILEIO=m
|
||||
CONFIG_TCM_PSCSI=m
|
||||
CONFIG_LOOPBACK_TARGET=m
|
||||
CONFIG_ISCSI_TARGET=m
|
||||
CONFIG_FUSION=y
|
||||
CONFIG_FUSION_SAS=y
|
||||
CONFIG_NETDEVICES=y
|
||||
|
@ -359,42 +225,8 @@ CONFIG_VETH=m
|
|||
CONFIG_NET_DSA_MV88E6060=y
|
||||
CONFIG_NET_DSA_MV88E6131=y
|
||||
CONFIG_NET_DSA_MV88E6123_61_65=y
|
||||
# CONFIG_NET_VENDOR_3COM is not set
|
||||
# CONFIG_NET_VENDOR_ADAPTEC is not set
|
||||
# CONFIG_NET_VENDOR_ALTEON is not set
|
||||
# CONFIG_NET_VENDOR_AMD is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CHELSIO is not set
|
||||
# CONFIG_NET_VENDOR_CISCO is not set
|
||||
# CONFIG_NET_VENDOR_DEC is not set
|
||||
# CONFIG_NET_VENDOR_DLINK is not set
|
||||
# CONFIG_NET_VENDOR_EMULEX is not set
|
||||
# CONFIG_NET_VENDOR_EXAR is not set
|
||||
# CONFIG_NET_VENDOR_HP is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
# CONFIG_NET_VENDOR_MELLANOX is not set
|
||||
# CONFIG_NET_VENDOR_MICREL is not set
|
||||
# CONFIG_NET_VENDOR_MYRI is not set
|
||||
# CONFIG_NET_VENDOR_NATSEMI is not set
|
||||
# CONFIG_NET_VENDOR_NVIDIA is not set
|
||||
# CONFIG_NET_VENDOR_OKI is not set
|
||||
# CONFIG_NET_PACKET_ENGINE is not set
|
||||
# CONFIG_NET_VENDOR_QLOGIC is not set
|
||||
# CONFIG_NET_VENDOR_REALTEK is not set
|
||||
# CONFIG_NET_VENDOR_RDC is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SILAN is not set
|
||||
# CONFIG_NET_VENDOR_SIS is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SUN is not set
|
||||
# CONFIG_NET_VENDOR_TEHUTI is not set
|
||||
# CONFIG_NET_VENDOR_TI is not set
|
||||
# CONFIG_TILE_NET is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
CONFIG_SKY2=y
|
||||
CONFIG_PTP_1588_CLOCK_TILEGX=y
|
||||
# CONFIG_WLAN is not set
|
||||
# CONFIG_INPUT_MOUSEDEV is not set
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
|
@ -402,6 +234,7 @@ CONFIG_NET_DSA_MV88E6123_61_65=y
|
|||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
CONFIG_SERIAL_TILEGX=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_TIMERIOMEM=m
|
||||
CONFIG_I2C=y
|
||||
|
@ -410,13 +243,16 @@ CONFIG_I2C_CHARDEV=y
|
|||
CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
# CONFIG_VGA_ARB is not set
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
CONFIG_DRM=m
|
||||
CONFIG_DRM_TDFX=m
|
||||
CONFIG_DRM_R128=m
|
||||
CONFIG_DRM_MGA=m
|
||||
CONFIG_DRM_VIA=m
|
||||
CONFIG_DRM_SAVAGE=m
|
||||
CONFIG_USB=y
|
||||
# CONFIG_USB_DEVICE_CLASS is not set
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_USB_LIBUSUAL=y
|
||||
CONFIG_EDAC=y
|
||||
CONFIG_EDAC_MM_EDAC=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
|
@ -464,9 +300,8 @@ CONFIG_ECRYPT_FS=m
|
|||
CONFIG_CRAMFS=m
|
||||
CONFIG_SQUASHFS=m
|
||||
CONFIG_NFS_FS=m
|
||||
CONFIG_NFS_V3=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=y
|
||||
CONFIG_NFS_V4=m
|
||||
CONFIG_NFS_V4_1=y
|
||||
CONFIG_NFS_FSCACHE=y
|
||||
CONFIG_NFSD=m
|
||||
|
@ -519,25 +354,28 @@ CONFIG_NLS_ISO8859_15=m
|
|||
CONFIG_NLS_KOI8_R=m
|
||||
CONFIG_NLS_KOI8_U=m
|
||||
CONFIG_NLS_UTF8=m
|
||||
CONFIG_DLM=m
|
||||
CONFIG_DLM_DEBUG=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_REDUCED=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_HEADERS_CHECK=y
|
||||
# CONFIG_FRAME_POINTER is not set
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_REDUCED=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_CREDENTIALS=y
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_KGDB=y
|
||||
CONFIG_KEYS_DEBUG_PROC_KEYS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITYFS=y
|
||||
|
@ -546,7 +384,6 @@ CONFIG_SECURITY_NETWORK_XFRM=y
|
|||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
||||
CONFIG_SECURITY_SELINUX_DISABLE=y
|
||||
CONFIG_CRYPTO_NULL=m
|
||||
CONFIG_CRYPTO_PCRYPT=m
|
||||
CONFIG_CRYPTO_CRYPTD=m
|
||||
CONFIG_CRYPTO_TEST=m
|
||||
|
@ -559,14 +396,12 @@ CONFIG_CRYPTO_XTS=m
|
|||
CONFIG_CRYPTO_HMAC=y
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32C=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA1=y
|
||||
CONFIG_CRYPTO_SHA256=m
|
||||
CONFIG_CRYPTO_SHA512=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_FHANDLE=y
|
||||
CONFIG_TASKSTATS=y
|
||||
CONFIG_TASK_DELAY_ACCT=y
|
||||
CONFIG_TASK_XACCT=y
|
||||
CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_LOG_BUF_SHIFT=19
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_DEBUG=y
|
||||
|
@ -17,14 +16,13 @@ CONFIG_CGROUP_DEVICE=y
|
|||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_RESOURCE_COUNTERS=y
|
||||
CONFIG_CGROUP_MEMCG=y
|
||||
CONFIG_CGROUP_MEMCG_SWAP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_RELAY=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_RD_XZ=y
|
||||
CONFIG_SYSCTL_SYSCALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
@ -44,11 +42,10 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_SGI_PARTITION=y
|
||||
CONFIG_SUN_PARTITION=y
|
||||
CONFIG_KARMA_PARTITION=y
|
||||
CONFIG_EFI_PARTITION=y
|
||||
CONFIG_CFQ_GROUP_IOSCHED=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_PREEMPT_VOLUNTARY=y
|
||||
CONFIG_PCI_DEBUG=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
CONFIG_BINFMT_MISC=y
|
||||
|
@ -122,16 +119,15 @@ CONFIG_NF_CONNTRACK_PPTP=m
|
|||
CONFIG_NF_CONNTRACK_SANE=m
|
||||
CONFIG_NF_CONNTRACK_SIP=m
|
||||
CONFIG_NF_CONNTRACK_TFTP=m
|
||||
CONFIG_NETFILTER_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
||||
|
@ -189,14 +185,12 @@ CONFIG_IP_VS_SED=m
|
|||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
|
||||
CONFIG_IP_NF_QUEUE=m
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MATCH_AH=m
|
||||
CONFIG_IP_NF_MATCH_ECN=m
|
||||
CONFIG_IP_NF_MATCH_TTL=m
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_TARGET_LOG=m
|
||||
CONFIG_IP_NF_TARGET_ULOG=m
|
||||
CONFIG_IP_NF_MANGLE=m
|
||||
CONFIG_IP_NF_TARGET_ECN=m
|
||||
|
@ -207,8 +201,6 @@ CONFIG_IP_NF_ARPTABLES=m
|
|||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_IP6_NF_QUEUE=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
CONFIG_IP6_NF_MATCH_EUI64=m
|
||||
CONFIG_IP6_NF_MATCH_FRAG=m
|
||||
|
@ -218,7 +210,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
|
|||
CONFIG_IP6_NF_MATCH_MH=m
|
||||
CONFIG_IP6_NF_MATCH_RT=m
|
||||
CONFIG_IP6_NF_TARGET_HL=m
|
||||
CONFIG_IP6_NF_TARGET_LOG=m
|
||||
CONFIG_IP6_NF_FILTER=m
|
||||
CONFIG_IP6_NF_TARGET_REJECT=m
|
||||
CONFIG_IP6_NF_MANGLE=m
|
||||
|
@ -249,7 +240,6 @@ CONFIG_BRIDGE_EBT_NFLOG=m
|
|||
CONFIG_RDS=m
|
||||
CONFIG_RDS_TCP=m
|
||||
CONFIG_BRIDGE=m
|
||||
CONFIG_NET_DSA=y
|
||||
CONFIG_VLAN_8021Q=m
|
||||
CONFIG_VLAN_8021Q_GVRP=y
|
||||
CONFIG_PHONET=m
|
||||
|
@ -297,6 +287,7 @@ CONFIG_NET_ACT_SIMP=m
|
|||
CONFIG_NET_ACT_SKBEDIT=m
|
||||
CONFIG_NET_CLS_IND=y
|
||||
CONFIG_DCB=y
|
||||
CONFIG_DNS_RESOLVER=y
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
@ -354,40 +345,7 @@ CONFIG_NET_DSA_MV88E6060=y
|
|||
CONFIG_NET_DSA_MV88E6131=y
|
||||
CONFIG_NET_DSA_MV88E6123_61_65=y
|
||||
# CONFIG_NET_VENDOR_3COM is not set
|
||||
# CONFIG_NET_VENDOR_ADAPTEC is not set
|
||||
# CONFIG_NET_VENDOR_ALTEON is not set
|
||||
# CONFIG_NET_VENDOR_AMD is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CHELSIO is not set
|
||||
# CONFIG_NET_VENDOR_CISCO is not set
|
||||
# CONFIG_NET_VENDOR_DEC is not set
|
||||
# CONFIG_NET_VENDOR_DLINK is not set
|
||||
# CONFIG_NET_VENDOR_EMULEX is not set
|
||||
# CONFIG_NET_VENDOR_EXAR is not set
|
||||
# CONFIG_NET_VENDOR_HP is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
# CONFIG_NET_VENDOR_MELLANOX is not set
|
||||
# CONFIG_NET_VENDOR_MICREL is not set
|
||||
# CONFIG_NET_VENDOR_MYRI is not set
|
||||
# CONFIG_NET_VENDOR_NATSEMI is not set
|
||||
# CONFIG_NET_VENDOR_NVIDIA is not set
|
||||
# CONFIG_NET_VENDOR_OKI is not set
|
||||
# CONFIG_NET_PACKET_ENGINE is not set
|
||||
# CONFIG_NET_VENDOR_QLOGIC is not set
|
||||
# CONFIG_NET_VENDOR_REALTEK is not set
|
||||
# CONFIG_NET_VENDOR_RDC is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SILAN is not set
|
||||
# CONFIG_NET_VENDOR_SIS is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SUN is not set
|
||||
# CONFIG_NET_VENDOR_TEHUTI is not set
|
||||
# CONFIG_NET_VENDOR_TI is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
CONFIG_E1000E=y
|
||||
# CONFIG_WLAN is not set
|
||||
# CONFIG_INPUT_MOUSEDEV is not set
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
|
@ -403,7 +361,6 @@ CONFIG_I2C_CHARDEV=y
|
|||
CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
# CONFIG_VGA_ARB is not set
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
# CONFIG_USB_SUPPORT is not set
|
||||
CONFIG_EDAC=y
|
||||
CONFIG_EDAC_MM_EDAC=y
|
||||
|
@ -448,13 +405,13 @@ CONFIG_PROC_KCORE=y
|
|||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
CONFIG_CRAMFS=m
|
||||
CONFIG_SQUASHFS=m
|
||||
CONFIG_NFS_FS=m
|
||||
CONFIG_NFS_V3=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=y
|
||||
CONFIG_NFS_V4=m
|
||||
CONFIG_NFS_V4_1=y
|
||||
CONFIG_NFS_FSCACHE=y
|
||||
CONFIG_NFSD=m
|
||||
|
@ -508,26 +465,29 @@ CONFIG_NLS_ISO8859_15=m
|
|||
CONFIG_NLS_KOI8_R=m
|
||||
CONFIG_NLS_KOI8_U=m
|
||||
CONFIG_NLS_UTF8=m
|
||||
CONFIG_DLM=m
|
||||
CONFIG_DLM_DEBUG=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_REDUCED=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
CONFIG_FRAME_WARN=2048
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_HEADERS_CHECK=y
|
||||
# CONFIG_FRAME_POINTER is not set
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_REDUCED=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_CREDENTIALS=y
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_ASYNC_RAID6_TEST=m
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_KEYS_DEBUG_PROC_KEYS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITYFS=y
|
||||
|
@ -536,7 +496,6 @@ CONFIG_SECURITY_NETWORK_XFRM=y
|
|||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
||||
CONFIG_SECURITY_SELINUX_DISABLE=y
|
||||
CONFIG_CRYPTO_NULL=m
|
||||
CONFIG_CRYPTO_PCRYPT=m
|
||||
CONFIG_CRYPTO_CRYPTD=m
|
||||
CONFIG_CRYPTO_TEST=m
|
||||
|
@ -549,14 +508,12 @@ CONFIG_CRYPTO_XTS=m
|
|||
CONFIG_CRYPTO_HMAC=y
|
||||
CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32C=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA1=y
|
||||
CONFIG_CRYPTO_SHA256=m
|
||||
CONFIG_CRYPTO_SHA512=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
|
|
|
@ -26,3 +26,8 @@ config TILE_GXIO_TRIO
|
|||
config TILE_GXIO_USB_HOST
|
||||
bool
|
||||
select TILE_GXIO
|
||||
|
||||
# Support direct access to the TILE-Gx UART hardware from kernel space.
|
||||
config TILE_GXIO_UART
|
||||
bool
|
||||
select TILE_GXIO
|
||||
|
|
|
@ -6,4 +6,5 @@ obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
|
|||
obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
|
||||
obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
|
||||
obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
|
||||
obj-$(CONFIG_TILE_GXIO_UART) += uart.o iorpc_uart.o
|
||||
obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
|
||||
|
|
|
@ -61,6 +61,29 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
|
|||
|
||||
EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
|
||||
|
||||
struct alloc_scatter_queues_param {
|
||||
unsigned int count;
|
||||
unsigned int first;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
|
||||
unsigned int count, unsigned int first,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct alloc_scatter_queues_param temp;
|
||||
struct alloc_scatter_queues_param *params = &temp;
|
||||
|
||||
params->count = count;
|
||||
params->first = first;
|
||||
params->flags = flags;
|
||||
|
||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
||||
sizeof(*params),
|
||||
GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gxio_trio_alloc_scatter_queues);
|
||||
|
||||
struct alloc_pio_regions_param {
|
||||
unsigned int count;
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* This file is machine-generated; DO NOT EDIT! */
|
||||
#include "gxio/iorpc_uart.h"
|
||||
|
||||
struct cfg_interrupt_param {
|
||||
union iorpc_interrupt interrupt;
|
||||
};
|
||||
|
||||
int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
|
||||
int inter_y, int inter_ipi, int inter_event)
|
||||
{
|
||||
struct cfg_interrupt_param temp;
|
||||
struct cfg_interrupt_param *params = &temp;
|
||||
|
||||
params->interrupt.kernel.x = inter_x;
|
||||
params->interrupt.kernel.y = inter_y;
|
||||
params->interrupt.kernel.ipi = inter_ipi;
|
||||
params->interrupt.kernel.event = inter_event;
|
||||
|
||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
||||
sizeof(*params), GXIO_UART_OP_CFG_INTERRUPT);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gxio_uart_cfg_interrupt);
|
||||
|
||||
struct get_mmio_base_param {
|
||||
HV_PTE base;
|
||||
};
|
||||
|
||||
int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base)
|
||||
{
|
||||
int __result;
|
||||
struct get_mmio_base_param temp;
|
||||
struct get_mmio_base_param *params = &temp;
|
||||
|
||||
__result =
|
||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
||||
GXIO_UART_OP_GET_MMIO_BASE);
|
||||
*base = params->base;
|
||||
|
||||
return __result;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gxio_uart_get_mmio_base);
|
||||
|
||||
struct check_mmio_offset_param {
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
|
||||
unsigned long offset, unsigned long size)
|
||||
{
|
||||
struct check_mmio_offset_param temp;
|
||||
struct check_mmio_offset_param *params = &temp;
|
||||
|
||||
params->offset = offset;
|
||||
params->size = size;
|
||||
|
||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
||||
sizeof(*params), GXIO_UART_OP_CHECK_MMIO_OFFSET);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(gxio_uart_check_mmio_offset);
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Implementation of UART gxio calls.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <gxio/uart.h>
|
||||
#include <gxio/iorpc_globals.h>
|
||||
#include <gxio/iorpc_uart.h>
|
||||
#include <gxio/kiorpc.h>
|
||||
|
||||
int gxio_uart_init(gxio_uart_context_t *context, int uart_index)
|
||||
{
|
||||
char file[32];
|
||||
int fd;
|
||||
|
||||
snprintf(file, sizeof(file), "uart/%d/iorpc", uart_index);
|
||||
fd = hv_dev_open((HV_VirtAddr) file, 0);
|
||||
if (fd < 0) {
|
||||
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
|
||||
return fd;
|
||||
else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
context->fd = fd;
|
||||
|
||||
/* Map in the MMIO space. */
|
||||
context->mmio_base = (void __force *)
|
||||
iorpc_ioremap(fd, HV_UART_MMIO_OFFSET, HV_UART_MMIO_SIZE);
|
||||
|
||||
if (context->mmio_base == NULL) {
|
||||
hv_dev_close(context->fd);
|
||||
context->fd = -1;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gxio_uart_init);
|
||||
|
||||
int gxio_uart_destroy(gxio_uart_context_t *context)
|
||||
{
|
||||
iounmap((void __force __iomem *)(context->mmio_base));
|
||||
hv_dev_close(context->fd);
|
||||
|
||||
context->mmio_base = NULL;
|
||||
context->fd = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gxio_uart_destroy);
|
||||
|
||||
/* UART register write wrapper. */
|
||||
void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
|
||||
uint64_t word)
|
||||
{
|
||||
__gxio_mmio_write(context->mmio_base + offset, word);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gxio_uart_write);
|
||||
|
||||
/* UART register read wrapper. */
|
||||
uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset)
|
||||
{
|
||||
return __gxio_mmio_read(context->mmio_base + offset);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gxio_uart_read);
|
|
@ -22,6 +22,45 @@
|
|||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
/*
|
||||
* Map SQ Doorbell Format.
|
||||
* This describes the format of the write-only doorbell register that exists
|
||||
* in the last 8-bytes of the MAP_SQ_BASE/LIM range. This register is only
|
||||
* writable from PCIe space. Writes to this register will not be written to
|
||||
* Tile memory space and thus no IO VA translation is required if the last
|
||||
* page of the BASE/LIM range is not otherwise written.
|
||||
*/
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/*
|
||||
* When written with a 1, the associated MAP_SQ region's doorbell
|
||||
* interrupt will be triggered once all previous writes are visible to
|
||||
* Tile software.
|
||||
*/
|
||||
uint_reg_t doorbell : 1;
|
||||
/*
|
||||
* When written with a 1, the descriptor at the head of the associated
|
||||
* MAP_SQ's FIFO will be dequeued.
|
||||
*/
|
||||
uint_reg_t pop : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved : 62;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved : 62;
|
||||
uint_reg_t pop : 1;
|
||||
uint_reg_t doorbell : 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} TRIO_MAP_SQ_DOORBELL_FMT_t;
|
||||
|
||||
|
||||
/*
|
||||
* Tile PIO Region Configuration - CFG Address Format.
|
||||
* This register describes the address format for PIO accesses when the
|
||||
|
|
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* Machine-generated file; do not edit. */
|
||||
|
||||
#ifndef __ARCH_UART_H__
|
||||
#define __ARCH_UART_H__
|
||||
|
||||
#include <arch/abi.h>
|
||||
#include <arch/uart_def.h>
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
/* Divisor. */
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/*
|
||||
* Baud Rate Divisor. Desired_baud_rate = REF_CLK frequency / (baud *
|
||||
* 16).
|
||||
* Note: REF_CLK is always 125 MHz, the default
|
||||
* divisor = 68, baud rate = 125M/(68*16) = 115200 baud.
|
||||
*/
|
||||
uint_reg_t divisor : 12;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved : 52;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved : 52;
|
||||
uint_reg_t divisor : 12;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} UART_DIVISOR_t;
|
||||
|
||||
/* FIFO Count. */
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/*
|
||||
* n: n active entries in the receive FIFO (max is 2**8). Each entry has
|
||||
* 8 bits.
|
||||
* 0: no active entry in the receive FIFO (that is empty).
|
||||
*/
|
||||
uint_reg_t rfifo_count : 9;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_0 : 7;
|
||||
/*
|
||||
* n: n active entries in the transmit FIFO (max is 2**8). Each entry has
|
||||
* 8 bits.
|
||||
* 0: no active entry in the transmit FIFO (that is empty).
|
||||
*/
|
||||
uint_reg_t tfifo_count : 9;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_1 : 7;
|
||||
/*
|
||||
* n: n active entries in the write FIFO (max is 2**2). Each entry has 8
|
||||
* bits.
|
||||
* 0: no active entry in the write FIFO (that is empty).
|
||||
*/
|
||||
uint_reg_t wfifo_count : 3;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_2 : 29;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved_2 : 29;
|
||||
uint_reg_t wfifo_count : 3;
|
||||
uint_reg_t __reserved_1 : 7;
|
||||
uint_reg_t tfifo_count : 9;
|
||||
uint_reg_t __reserved_0 : 7;
|
||||
uint_reg_t rfifo_count : 9;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} UART_FIFO_COUNT_t;
|
||||
|
||||
/* FLAG. */
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
/* 1: receive FIFO is empty */
|
||||
uint_reg_t rfifo_empty : 1;
|
||||
/* 1: write FIFO is empty. */
|
||||
uint_reg_t wfifo_empty : 1;
|
||||
/* 1: transmit FIFO is empty. */
|
||||
uint_reg_t tfifo_empty : 1;
|
||||
/* 1: receive FIFO is full. */
|
||||
uint_reg_t rfifo_full : 1;
|
||||
/* 1: write FIFO is full. */
|
||||
uint_reg_t wfifo_full : 1;
|
||||
/* 1: transmit FIFO is full. */
|
||||
uint_reg_t tfifo_full : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_1 : 57;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved_1 : 57;
|
||||
uint_reg_t tfifo_full : 1;
|
||||
uint_reg_t wfifo_full : 1;
|
||||
uint_reg_t rfifo_full : 1;
|
||||
uint_reg_t tfifo_empty : 1;
|
||||
uint_reg_t wfifo_empty : 1;
|
||||
uint_reg_t rfifo_empty : 1;
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} UART_FLAG_t;
|
||||
|
||||
/*
|
||||
* Interrupt Vector Mask.
|
||||
* Each bit in this register corresponds to a specific interrupt. When set,
|
||||
* the associated interrupt will not be dispatched.
|
||||
*/
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/* Read data FIFO read and no data available */
|
||||
uint_reg_t rdat_err : 1;
|
||||
/* Write FIFO was written but it was full */
|
||||
uint_reg_t wdat_err : 1;
|
||||
/* Stop bit not found when current data was received */
|
||||
uint_reg_t frame_err : 1;
|
||||
/* Parity error was detected when current data was received */
|
||||
uint_reg_t parity_err : 1;
|
||||
/* Data was received but the receive FIFO was full */
|
||||
uint_reg_t rfifo_overflow : 1;
|
||||
/*
|
||||
* An almost full event is reached when data is to be written to the
|
||||
* receive FIFO, and the receive FIFO has more than or equal to
|
||||
* BUFFER_THRESHOLD.RFIFO_AFULL bytes.
|
||||
*/
|
||||
uint_reg_t rfifo_afull : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
/* An entry in the transmit FIFO was popped */
|
||||
uint_reg_t tfifo_re : 1;
|
||||
/* An entry has been pushed into the receive FIFO */
|
||||
uint_reg_t rfifo_we : 1;
|
||||
/* An entry of the write FIFO has been popped */
|
||||
uint_reg_t wfifo_re : 1;
|
||||
/* Rshim read receive FIFO in protocol mode */
|
||||
uint_reg_t rfifo_err : 1;
|
||||
/*
|
||||
* An almost empty event is reached when data is to be read from the
|
||||
* transmit FIFO, and the transmit FIFO has less than or equal to
|
||||
* BUFFER_THRESHOLD.TFIFO_AEMPTY bytes.
|
||||
*/
|
||||
uint_reg_t tfifo_aempty : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_1 : 52;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved_1 : 52;
|
||||
uint_reg_t tfifo_aempty : 1;
|
||||
uint_reg_t rfifo_err : 1;
|
||||
uint_reg_t wfifo_re : 1;
|
||||
uint_reg_t rfifo_we : 1;
|
||||
uint_reg_t tfifo_re : 1;
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
uint_reg_t rfifo_afull : 1;
|
||||
uint_reg_t rfifo_overflow : 1;
|
||||
uint_reg_t parity_err : 1;
|
||||
uint_reg_t frame_err : 1;
|
||||
uint_reg_t wdat_err : 1;
|
||||
uint_reg_t rdat_err : 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} UART_INTERRUPT_MASK_t;
|
||||
|
||||
/*
|
||||
* Interrupt vector, write-one-to-clear.
|
||||
* Each bit in this register corresponds to a specific interrupt. Hardware
|
||||
* sets the bit when the associated condition has occurred. Writing a 1
|
||||
* clears the status bit.
|
||||
*/
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/* Read data FIFO read and no data available */
|
||||
uint_reg_t rdat_err : 1;
|
||||
/* Write FIFO was written but it was full */
|
||||
uint_reg_t wdat_err : 1;
|
||||
/* Stop bit not found when current data was received */
|
||||
uint_reg_t frame_err : 1;
|
||||
/* Parity error was detected when current data was received */
|
||||
uint_reg_t parity_err : 1;
|
||||
/* Data was received but the receive FIFO was full */
|
||||
uint_reg_t rfifo_overflow : 1;
|
||||
/*
|
||||
* Data was received and the receive FIFO is now almost full (more than
|
||||
* BUFFER_THRESHOLD.RFIFO_AFULL bytes in it)
|
||||
*/
|
||||
uint_reg_t rfifo_afull : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
/* An entry in the transmit FIFO was popped */
|
||||
uint_reg_t tfifo_re : 1;
|
||||
/* An entry has been pushed into the receive FIFO */
|
||||
uint_reg_t rfifo_we : 1;
|
||||
/* An entry of the write FIFO has been popped */
|
||||
uint_reg_t wfifo_re : 1;
|
||||
/* Rshim read receive FIFO in protocol mode */
|
||||
uint_reg_t rfifo_err : 1;
|
||||
/*
|
||||
* Data was read from the transmit FIFO and now it is almost empty (less
|
||||
* than or equal to BUFFER_THRESHOLD.TFIFO_AEMPTY bytes in it).
|
||||
*/
|
||||
uint_reg_t tfifo_aempty : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_1 : 52;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved_1 : 52;
|
||||
uint_reg_t tfifo_aempty : 1;
|
||||
uint_reg_t rfifo_err : 1;
|
||||
uint_reg_t wfifo_re : 1;
|
||||
uint_reg_t rfifo_we : 1;
|
||||
uint_reg_t tfifo_re : 1;
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
uint_reg_t rfifo_afull : 1;
|
||||
uint_reg_t rfifo_overflow : 1;
|
||||
uint_reg_t parity_err : 1;
|
||||
uint_reg_t frame_err : 1;
|
||||
uint_reg_t wdat_err : 1;
|
||||
uint_reg_t rdat_err : 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} UART_INTERRUPT_STATUS_t;
|
||||
|
||||
/* Type. */
|
||||
|
||||
__extension__
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/* Number of stop bits, rx and tx */
|
||||
uint_reg_t sbits : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
/* Data word size, rx and tx */
|
||||
uint_reg_t dbits : 1;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_1 : 1;
|
||||
/* Parity selection, rx and tx */
|
||||
uint_reg_t ptype : 3;
|
||||
/* Reserved. */
|
||||
uint_reg_t __reserved_2 : 57;
|
||||
#else /* __BIG_ENDIAN__ */
|
||||
uint_reg_t __reserved_2 : 57;
|
||||
uint_reg_t ptype : 3;
|
||||
uint_reg_t __reserved_1 : 1;
|
||||
uint_reg_t dbits : 1;
|
||||
uint_reg_t __reserved_0 : 1;
|
||||
uint_reg_t sbits : 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
uint_reg_t word;
|
||||
} UART_TYPE_t;
|
||||
#endif /* !defined(__ASSEMBLER__) */
|
||||
|
||||
#endif /* !defined(__ARCH_UART_H__) */
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* Machine-generated file; do not edit. */
|
||||
|
||||
#ifndef __ARCH_UART_DEF_H__
|
||||
#define __ARCH_UART_DEF_H__
|
||||
#define UART_DIVISOR 0x0158
|
||||
#define UART_FIFO_COUNT 0x0110
|
||||
#define UART_FLAG 0x0108
|
||||
#define UART_INTERRUPT_MASK 0x0208
|
||||
#define UART_INTERRUPT_MASK__RDAT_ERR_SHIFT 0
|
||||
#define UART_INTERRUPT_MASK__RDAT_ERR_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__RDAT_ERR_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__RDAT_ERR_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__RDAT_ERR_MASK 0x1
|
||||
#define UART_INTERRUPT_MASK__RDAT_ERR_FIELD 0,0
|
||||
#define UART_INTERRUPT_MASK__WDAT_ERR_SHIFT 1
|
||||
#define UART_INTERRUPT_MASK__WDAT_ERR_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__WDAT_ERR_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__WDAT_ERR_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__WDAT_ERR_MASK 0x2
|
||||
#define UART_INTERRUPT_MASK__WDAT_ERR_FIELD 1,1
|
||||
#define UART_INTERRUPT_MASK__FRAME_ERR_SHIFT 2
|
||||
#define UART_INTERRUPT_MASK__FRAME_ERR_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__FRAME_ERR_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__FRAME_ERR_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__FRAME_ERR_MASK 0x4
|
||||
#define UART_INTERRUPT_MASK__FRAME_ERR_FIELD 2,2
|
||||
#define UART_INTERRUPT_MASK__PARITY_ERR_SHIFT 3
|
||||
#define UART_INTERRUPT_MASK__PARITY_ERR_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__PARITY_ERR_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__PARITY_ERR_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__PARITY_ERR_MASK 0x8
|
||||
#define UART_INTERRUPT_MASK__PARITY_ERR_FIELD 3,3
|
||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_SHIFT 4
|
||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_MASK 0x10
|
||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_FIELD 4,4
|
||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_SHIFT 5
|
||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_MASK 0x20
|
||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_FIELD 5,5
|
||||
#define UART_INTERRUPT_MASK__TFIFO_RE_SHIFT 7
|
||||
#define UART_INTERRUPT_MASK__TFIFO_RE_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__TFIFO_RE_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__TFIFO_RE_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__TFIFO_RE_MASK 0x80
|
||||
#define UART_INTERRUPT_MASK__TFIFO_RE_FIELD 7,7
|
||||
#define UART_INTERRUPT_MASK__RFIFO_WE_SHIFT 8
|
||||
#define UART_INTERRUPT_MASK__RFIFO_WE_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_WE_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_WE_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_WE_MASK 0x100
|
||||
#define UART_INTERRUPT_MASK__RFIFO_WE_FIELD 8,8
|
||||
#define UART_INTERRUPT_MASK__WFIFO_RE_SHIFT 9
|
||||
#define UART_INTERRUPT_MASK__WFIFO_RE_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__WFIFO_RE_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__WFIFO_RE_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__WFIFO_RE_MASK 0x200
|
||||
#define UART_INTERRUPT_MASK__WFIFO_RE_FIELD 9,9
|
||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_SHIFT 10
|
||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_MASK 0x400
|
||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_FIELD 10,10
|
||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_SHIFT 11
|
||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_WIDTH 1
|
||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RESET_VAL 1
|
||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RMASK 0x1
|
||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_MASK 0x800
|
||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_FIELD 11,11
|
||||
#define UART_INTERRUPT_STATUS 0x0200
|
||||
#define UART_RECEIVE_DATA 0x0148
|
||||
#define UART_TRANSMIT_DATA 0x0140
|
||||
#define UART_TYPE 0x0160
|
||||
#define UART_TYPE__SBITS_SHIFT 0
|
||||
#define UART_TYPE__SBITS_WIDTH 1
|
||||
#define UART_TYPE__SBITS_RESET_VAL 1
|
||||
#define UART_TYPE__SBITS_RMASK 0x1
|
||||
#define UART_TYPE__SBITS_MASK 0x1
|
||||
#define UART_TYPE__SBITS_FIELD 0,0
|
||||
#define UART_TYPE__SBITS_VAL_ONE_SBITS 0x0
|
||||
#define UART_TYPE__SBITS_VAL_TWO_SBITS 0x1
|
||||
#define UART_TYPE__DBITS_SHIFT 2
|
||||
#define UART_TYPE__DBITS_WIDTH 1
|
||||
#define UART_TYPE__DBITS_RESET_VAL 0
|
||||
#define UART_TYPE__DBITS_RMASK 0x1
|
||||
#define UART_TYPE__DBITS_MASK 0x4
|
||||
#define UART_TYPE__DBITS_FIELD 2,2
|
||||
#define UART_TYPE__DBITS_VAL_EIGHT_DBITS 0x0
|
||||
#define UART_TYPE__DBITS_VAL_SEVEN_DBITS 0x1
|
||||
#define UART_TYPE__PTYPE_SHIFT 4
|
||||
#define UART_TYPE__PTYPE_WIDTH 3
|
||||
#define UART_TYPE__PTYPE_RESET_VAL 3
|
||||
#define UART_TYPE__PTYPE_RMASK 0x7
|
||||
#define UART_TYPE__PTYPE_MASK 0x70
|
||||
#define UART_TYPE__PTYPE_FIELD 4,6
|
||||
#define UART_TYPE__PTYPE_VAL_NONE 0x0
|
||||
#define UART_TYPE__PTYPE_VAL_MARK 0x1
|
||||
#define UART_TYPE__PTYPE_VAL_SPACE 0x2
|
||||
#define UART_TYPE__PTYPE_VAL_EVEN 0x3
|
||||
#define UART_TYPE__PTYPE_VAL_ODD 0x4
|
||||
#endif /* !defined(__ARCH_UART_DEF_H__) */
|
|
@ -11,12 +11,13 @@ generic-y += errno.h
|
|||
generic-y += exec.h
|
||||
generic-y += fb.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += hw_irq.h
|
||||
generic-y += ioctl.h
|
||||
generic-y += ioctls.h
|
||||
generic-y += ipcbuf.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += kdebug.h
|
||||
generic-y += local.h
|
||||
generic-y += local64.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += mutex.h
|
||||
generic-y += param.h
|
||||
|
|
|
@ -113,6 +113,32 @@ static inline int atomic_read(const atomic_t *v)
|
|||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
/**
|
||||
* atomic_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline int atomic_xchg(atomic_t *v, int n)
|
||||
{
|
||||
return xchg(&v->counter, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
||||
{
|
||||
return cmpxchg(&v->counter, o, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_negative - add and test if negative
|
||||
* @v: pointer of type atomic_t
|
||||
|
@ -133,6 +159,32 @@ static inline int atomic_read(const atomic_t *v)
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/**
|
||||
* atomic64_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
|
||||
{
|
||||
return xchg64(&v->counter, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic64_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
|
||||
{
|
||||
return cmpxchg64(&v->counter, o, n);
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long c, old, dec;
|
||||
|
|
|
@ -22,40 +22,6 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Tile-specific routines to support <linux/atomic.h>. */
|
||||
int _atomic_xchg(atomic_t *v, int n);
|
||||
int _atomic_xchg_add(atomic_t *v, int i);
|
||||
int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
|
||||
int _atomic_cmpxchg(atomic_t *v, int o, int n);
|
||||
|
||||
/**
|
||||
* atomic_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline int atomic_xchg(atomic_t *v, int n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg(v, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_cmpxchg(v, o, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
|
@ -65,7 +31,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
|||
*/
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
_atomic_xchg_add(v, i);
|
||||
_atomic_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -78,7 +44,7 @@ static inline void atomic_add(int i, atomic_t *v)
|
|||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg_add(v, i) + i;
|
||||
return _atomic_xchg_add(&v->counter, i) + i;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -93,7 +59,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic_xchg_add_unless(v, a, u);
|
||||
return _atomic_xchg_add_unless(&v->counter, a, u);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -108,7 +74,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
*/
|
||||
static inline void atomic_set(atomic_t *v, int n)
|
||||
{
|
||||
_atomic_xchg(v, n);
|
||||
_atomic_xchg(&v->counter, n);
|
||||
}
|
||||
|
||||
/* A 64bit atomic type */
|
||||
|
@ -119,11 +85,6 @@ typedef struct {
|
|||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
||||
u64 _atomic64_xchg(atomic64_t *v, u64 n);
|
||||
u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
|
||||
u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
|
||||
u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic variable
|
||||
* @v: pointer of type atomic64_t
|
||||
|
@ -137,35 +98,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|||
* Casting away const is safe since the atomic support routines
|
||||
* do not write to memory if the value has not been modified.
|
||||
*/
|
||||
return _atomic64_xchg_add((atomic64_t *)v, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_xchg - atomically exchange contents of memory with a new value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @i: integer value to store in memory
|
||||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg(v, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
|
||||
* @v: pointer of type atomic64_t
|
||||
* @o: old value that memory should have
|
||||
* @n: new value to write to memory if it matches
|
||||
*
|
||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_cmpxchg(v, o, n);
|
||||
return _atomic64_xchg_add((u64 *)&v->counter, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -177,7 +110,7 @@ static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
|
|||
*/
|
||||
static inline void atomic64_add(u64 i, atomic64_t *v)
|
||||
{
|
||||
_atomic64_xchg_add(v, i);
|
||||
_atomic64_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -190,7 +123,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|||
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add(v, i) + i;
|
||||
return _atomic64_xchg_add(&v->counter, i) + i;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -205,7 +138,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|||
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add_unless(v, a, u) != u;
|
||||
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -220,7 +153,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
|||
*/
|
||||
static inline void atomic64_set(atomic64_t *v, u64 n)
|
||||
{
|
||||
_atomic64_xchg(v, n);
|
||||
_atomic64_xchg(&v->counter, n);
|
||||
}
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
|
@ -252,21 +185,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
|
|||
* Internal definitions only beyond this point.
|
||||
*/
|
||||
|
||||
#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
|
||||
(!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
|
||||
|
||||
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
|
||||
/* Number of entries in atomic_lock_ptr[]. */
|
||||
#define ATOMIC_HASH_L1_SHIFT 6
|
||||
#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
|
||||
|
||||
/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
|
||||
#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
|
||||
#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
|
||||
|
||||
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/*
|
||||
* Number of atomic locks in atomic_locks[]. Must be a power of two.
|
||||
* There is no reason for more than PAGE_SIZE / 8 entries, since that
|
||||
|
@ -281,8 +199,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
|
|||
extern int atomic_locks[];
|
||||
#endif
|
||||
|
||||
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/*
|
||||
* All the code that may fault while holding an atomic lock must
|
||||
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
|
||||
|
|
|
@ -32,25 +32,6 @@
|
|||
* on any routine which updates memory and returns a value.
|
||||
*/
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
||||
{
|
||||
int val;
|
||||
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = __insn_cmpexch4((void *)&v->counter, n);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int n)
|
||||
{
|
||||
int val;
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = __insn_exch4((void *)&v->counter, n);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__insn_fetchadd4((void *)&v->counter, i);
|
||||
|
@ -72,7 +53,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
if (oldval == u)
|
||||
break;
|
||||
guess = oldval;
|
||||
oldval = atomic_cmpxchg(v, guess, guess + a);
|
||||
oldval = cmpxchg(&v->counter, guess, guess + a);
|
||||
} while (guess != oldval);
|
||||
return oldval;
|
||||
}
|
||||
|
@ -84,25 +65,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
#define atomic64_read(v) ((v)->counter)
|
||||
#define atomic64_set(v, i) ((v)->counter = (i))
|
||||
|
||||
static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
|
||||
{
|
||||
long val;
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
__insn_mtspr(SPR_CMPEXCH_VALUE, o);
|
||||
val = __insn_cmpexch((void *)&v->counter, n);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline long atomic64_xchg(atomic64_t *v, long n)
|
||||
{
|
||||
long val;
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
val = __insn_exch((void *)&v->counter, n);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void atomic64_add(long i, atomic64_t *v)
|
||||
{
|
||||
__insn_fetchadd((void *)&v->counter, i);
|
||||
|
@ -124,7 +86,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
if (oldval == u)
|
||||
break;
|
||||
guess = oldval;
|
||||
oldval = atomic64_cmpxchg(v, guess, guess + a);
|
||||
oldval = cmpxchg(&v->counter, guess, guess + a);
|
||||
} while (guess != oldval);
|
||||
return oldval != u;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,6 @@
|
|||
|
||||
#define __sync() __insn_mf()
|
||||
|
||||
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
|
||||
#include <hv/syscall_public.h>
|
||||
/*
|
||||
* Issue an uncacheable load to each memory controller, then
|
||||
|
@ -96,7 +95,6 @@ static inline void __mb_incoherent(void)
|
|||
"r20", "r21", "r22", "r23", "r24",
|
||||
"r25", "r26", "r27", "r28", "r29");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Fence to guarantee visibility of stores to incoherent memory. */
|
||||
static inline void
|
||||
|
@ -104,7 +102,6 @@ mb_incoherent(void)
|
|||
{
|
||||
__insn_mf();
|
||||
|
||||
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
|
||||
{
|
||||
#if CHIP_HAS_TILE_WRITE_PENDING()
|
||||
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
|
||||
|
@ -116,7 +113,6 @@ mb_incoherent(void)
|
|||
#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
|
||||
(void) __mb_incoherent();
|
||||
}
|
||||
#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
|
||||
}
|
||||
|
||||
#define fast_wmb() __sync()
|
||||
|
|
|
@ -28,17 +28,6 @@
|
|||
#include <asm/bitops_32.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __ffs - find first set bit in word
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no set bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long word)
|
||||
{
|
||||
return __builtin_ctzl(word);
|
||||
}
|
||||
|
||||
/**
|
||||
* ffz - find first zero bit in word
|
||||
* @word: The word to search
|
||||
|
@ -50,33 +39,6 @@ static inline unsigned long ffz(unsigned long word)
|
|||
return __builtin_ctzl(~word);
|
||||
}
|
||||
|
||||
/**
|
||||
* __fls - find last set bit in word
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no set bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static inline unsigned long __fls(unsigned long word)
|
||||
{
|
||||
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
|
||||
}
|
||||
|
||||
/**
|
||||
* ffs - find first set bit in word
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined the same way as the libc and compiler builtin ffs
|
||||
* routines, therefore differs in spirit from the other bitops.
|
||||
*
|
||||
* ffs(value) returns 0 if value is 0 or the position of the first
|
||||
* set bit if value is nonzero. The first (least significant) bit
|
||||
* is at position 1.
|
||||
*/
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
return __builtin_ffs(x);
|
||||
}
|
||||
|
||||
static inline int fls64(__u64 w)
|
||||
{
|
||||
return (sizeof(__u64) * 8) - __builtin_clzll(w);
|
||||
|
@ -118,6 +80,9 @@ static inline unsigned long __arch_hweight64(__u64 w)
|
|||
return __builtin_popcountll(w);
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/builtin-__ffs.h>
|
||||
#include <asm-generic/bitops/builtin-__fls.h>
|
||||
#include <asm-generic/bitops/builtin-ffs.h>
|
||||
#include <asm-generic/bitops/const_hweight.h>
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define _ASM_TILE_BITOPS_32_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Tile-specific routines to support <asm/bitops.h>. */
|
||||
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define _ASM_TILE_BITOPS_64_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
/* See <asm/bitops.h> for API comments. */
|
||||
|
||||
|
@ -44,8 +44,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
|||
oldval = *addr;
|
||||
do {
|
||||
guess = oldval;
|
||||
oldval = atomic64_cmpxchg((atomic64_t *)addr,
|
||||
guess, guess ^ mask);
|
||||
oldval = cmpxchg(addr, guess, guess ^ mask);
|
||||
} while (guess != oldval);
|
||||
}
|
||||
|
||||
|
@ -90,8 +89,7 @@ static inline int test_and_change_bit(unsigned nr,
|
|||
oldval = *addr;
|
||||
do {
|
||||
guess = oldval;
|
||||
oldval = atomic64_cmpxchg((atomic64_t *)addr,
|
||||
guess, guess ^ mask);
|
||||
oldval = cmpxchg(addr, guess, guess ^ mask);
|
||||
} while (guess != oldval);
|
||||
return (oldval & mask) != 0;
|
||||
}
|
||||
|
|
|
@ -49,9 +49,16 @@
|
|||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
/*
|
||||
* Attribute for data that is kept read/write coherent until the end of
|
||||
* initialization, then bumped to read/only incoherent for performance.
|
||||
* Originally we used small TLB pages for kernel data and grouped some
|
||||
* things together as "write once", enforcing the property at the end
|
||||
* of initialization by making those pages read-only and non-coherent.
|
||||
* This allowed better cache utilization since cache inclusion did not
|
||||
* need to be maintained. However, to do this requires an extra TLB
|
||||
* entry, which on balance is more of a performance hit than the
|
||||
* non-coherence is a performance gain, so we now just make "read
|
||||
* mostly" and "write once" be synonyms. We keep the attribute
|
||||
* separate in case we change our minds at a future date.
|
||||
*/
|
||||
#define __write_once __attribute__((__section__(".w1data")))
|
||||
#define __write_once __read_mostly
|
||||
|
||||
#endif /* _ASM_TILE_CACHE_H */
|
||||
|
|
|
@ -75,23 +75,6 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
|
|||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
|
||||
/*
|
||||
* Invalidate a VA range; pads to L2 cacheline boundaries.
|
||||
*
|
||||
* Note that on TILE64, __inv_buffer() actually flushes modified
|
||||
* cache lines in addition to invalidating them, i.e., it's the
|
||||
* same as __finv_buffer().
|
||||
*/
|
||||
static inline void __inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_inv(next);
|
||||
next += CHIP_INV_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __flush_buffer(void *buffer, size_t size)
|
||||
{
|
||||
|
@ -115,13 +98,6 @@ static inline void __finv_buffer(void *buffer, size_t size)
|
|||
}
|
||||
|
||||
|
||||
/* Invalidate a VA range and wait for it to be complete. */
|
||||
static inline void inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
__inv_buffer(buffer, size);
|
||||
mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush a locally-homecached VA range and wait for the evicted
|
||||
* cachelines to hit memory.
|
||||
|
@ -142,6 +118,26 @@ static inline void finv_buffer_local(void *buffer, size_t size)
|
|||
mb_incoherent();
|
||||
}
|
||||
|
||||
#ifdef __tilepro__
|
||||
/* Invalidate a VA range; pads to L2 cacheline boundaries. */
|
||||
static inline void __inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
||||
while (next < finish) {
|
||||
__insn_inv(next);
|
||||
next += CHIP_INV_STRIDE();
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidate a VA range and wait for it to be complete. */
|
||||
static inline void inv_buffer(void *buffer, size_t size)
|
||||
{
|
||||
__inv_buffer(buffer, size);
|
||||
mb();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Flush and invalidate a VA range that is homed remotely, waiting
|
||||
* until the memory controller holds the flushed values. If "hfh" is
|
||||
|
|
|
@ -20,53 +20,108 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Nonexistent functions intended to cause link errors. */
|
||||
extern unsigned long __xchg_called_with_bad_pointer(void);
|
||||
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define xchg(ptr, x) \
|
||||
/* Nonexistent functions intended to cause compile errors. */
|
||||
extern void __xchg_called_with_bad_pointer(void)
|
||||
__compiletime_error("Bad argument size for xchg");
|
||||
extern void __cmpxchg_called_with_bad_pointer(void)
|
||||
__compiletime_error("Bad argument size for cmpxchg");
|
||||
|
||||
#ifndef __tilegx__
|
||||
|
||||
/* Note the _atomic_xxx() routines include a final mb(). */
|
||||
int _atomic_xchg(int *ptr, int n);
|
||||
int _atomic_xchg_add(int *v, int i);
|
||||
int _atomic_xchg_add_unless(int *v, int a, int u);
|
||||
int _atomic_cmpxchg(int *ptr, int o, int n);
|
||||
u64 _atomic64_xchg(u64 *v, u64 n);
|
||||
u64 _atomic64_xchg_add(u64 *v, u64 i);
|
||||
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
|
||||
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
||||
|
||||
#define xchg(ptr, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 4) \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 4) \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
|
||||
})
|
||||
|
||||
#define xchg64(ptr, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 8) \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
if (sizeof(*(ptr)) != 8) \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
#define xchg(ptr, n) \
|
||||
({ \
|
||||
typeof(*(ptr)) __x; \
|
||||
smp_mb(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 4: \
|
||||
__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
|
||||
(atomic_t *)(ptr), \
|
||||
(u32)(typeof((x)-(x)))(x)); \
|
||||
__x = (typeof(__x))(unsigned long) \
|
||||
__insn_exch4((ptr), (u32)(unsigned long)(n)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
|
||||
(atomic64_t *)(ptr), \
|
||||
(u64)(typeof((x)-(x)))(x)); \
|
||||
__x = (typeof(__x)) \
|
||||
__insn_exch((ptr), (unsigned long)(n)); \
|
||||
break; \
|
||||
default: \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
break; \
|
||||
} \
|
||||
smp_mb(); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
typeof(*(ptr)) __x; \
|
||||
__insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
|
||||
smp_mb(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 4: \
|
||||
__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
|
||||
(atomic_t *)(ptr), \
|
||||
(u32)(typeof((o)-(o)))(o), \
|
||||
(u32)(typeof((n)-(n)))(n)); \
|
||||
__x = (typeof(__x))(unsigned long) \
|
||||
__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
|
||||
(atomic64_t *)(ptr), \
|
||||
(u64)(typeof((o)-(o)))(o), \
|
||||
(u64)(typeof((n)-(n)))(n)); \
|
||||
__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
|
||||
break; \
|
||||
default: \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
break; \
|
||||
} \
|
||||
smp_mb(); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define tas(ptr) (xchg((ptr), 1))
|
||||
#define xchg64 xchg
|
||||
#define cmpxchg64 cmpxchg
|
||||
|
||||
#endif
|
||||
|
||||
#define tas(ptr) xchg((ptr), 1)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -23,7 +23,10 @@ struct dev_archdata {
|
|||
/* Offset of the DMA address from the PA. */
|
||||
dma_addr_t dma_offset;
|
||||
|
||||
/* Highest DMA address that can be generated by this device. */
|
||||
/*
|
||||
* Highest DMA address that can be generated by devices that
|
||||
* have limited DMA capability, i.e. non 64-bit capable.
|
||||
*/
|
||||
dma_addr_t max_direct_dma_addr;
|
||||
};
|
||||
|
||||
|
|
|
@ -20,9 +20,14 @@
|
|||
#include <linux/cache.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
#endif
|
||||
|
||||
extern struct dma_map_ops *tile_dma_map_ops;
|
||||
extern struct dma_map_ops *gx_pci_dma_map_ops;
|
||||
extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
|
||||
extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
|
@ -44,12 +49,12 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
|||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_offset(dev);
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - get_dma_offset(dev);
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
@ -87,11 +92,19 @@ dma_set_mask(struct device *dev, u64 mask)
|
|||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
/* Handle legacy PCI devices with limited memory addressability. */
|
||||
if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) {
|
||||
set_dma_ops(dev, gx_legacy_pci_dma_map_ops);
|
||||
set_dma_offset(dev, 0);
|
||||
if (mask > dev->archdata.max_direct_dma_addr)
|
||||
/*
|
||||
* For PCI devices with 64-bit DMA addressing capability, promote
|
||||
* the dma_ops to hybrid, with the consistent memory DMA space limited
|
||||
* to 32-bit. For 32-bit capable devices, limit the streaming DMA
|
||||
* address range to max_direct_dma_addr.
|
||||
*/
|
||||
if (dma_ops == gx_pci_dma_map_ops ||
|
||||
dma_ops == gx_hybrid_pci_dma_map_ops ||
|
||||
dma_ops == gx_legacy_pci_dma_map_ops) {
|
||||
if (mask == DMA_BIT_MASK(64) &&
|
||||
dma_ops == gx_legacy_pci_dma_map_ops)
|
||||
set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
|
||||
else if (mask > dev->archdata.max_direct_dma_addr)
|
||||
mask = dev->archdata.max_direct_dma_addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ typedef unsigned long elf_greg_t;
|
|||
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
#define EM_TILE64 187
|
||||
#define EM_TILEPRO 188
|
||||
#define EM_TILEGX 191
|
||||
|
||||
|
@ -132,6 +131,15 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
|
|||
struct linux_binprm;
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
|
||||
} while (0)
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_ELF_PLATFORM "tilegx-m32"
|
||||
|
|
|
@ -78,14 +78,6 @@ enum fixed_addresses {
|
|||
#endif
|
||||
};
|
||||
|
||||
extern void __set_fixmap(enum fixed_addresses idx,
|
||||
unsigned long phys, pgprot_t flags);
|
||||
|
||||
#define set_fixmap(idx, phys) \
|
||||
__set_fixmap(idx, phys, PAGE_KERNEL)
|
||||
#define clear_fixmap(idx) \
|
||||
__set_fixmap(idx, 0, __pgprot(0))
|
||||
|
||||
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
||||
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
|
||||
|
|
|
@ -15,6 +15,26 @@
|
|||
#ifndef _ASM_TILE_FTRACE_H
|
||||
#define _ASM_TILE_FTRACE_H
|
||||
|
||||
/* empty */
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
#define MCOUNT_ADDR ((unsigned long)(__mcount))
|
||||
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void __mcount(void);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct dyn_arch_ftrace {
|
||||
};
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#endif /* _ASM_TILE_FTRACE_H */
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %0, %5; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
".align 8\n" \
|
||||
".quad 1b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
|
|
|
@ -33,8 +33,7 @@ struct zone;
|
|||
|
||||
/*
|
||||
* Is this page immutable (unwritable) and thus able to be cached more
|
||||
* widely than would otherwise be possible? On tile64 this means we
|
||||
* mark the PTE to cache locally; on tilepro it means we have "nc" set.
|
||||
* widely than would otherwise be possible? This means we have "nc" set.
|
||||
*/
|
||||
#define PAGE_HOME_IMMUTABLE -2
|
||||
|
||||
|
@ -44,16 +43,8 @@ struct zone;
|
|||
*/
|
||||
#define PAGE_HOME_INCOHERENT -3
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Home for the page is distributed via hash-for-home. */
|
||||
#define PAGE_HOME_HASH -4
|
||||
#endif
|
||||
|
||||
/* Homing is unknown or unspecified. Not valid for page_home(). */
|
||||
#define PAGE_HOME_UNKNOWN -5
|
||||
|
||||
/* Home on the current cpu. Not valid for page_home(). */
|
||||
#define PAGE_HOME_HERE -6
|
||||
|
||||
/* Support wrapper to use instead of explicit hv_flush_remote(). */
|
||||
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
#include <linux/bug.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define IO_SPACE_LIMIT 0xfffffffful
|
||||
/* Maximum PCI I/O space address supported. */
|
||||
#define IO_SPACE_LIMIT 0xffffffff
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
|
@ -254,7 +255,7 @@ static inline void writeq(u64 val, unsigned long addr)
|
|||
|
||||
static inline void memset_io(volatile void *dst, int val, size_t len)
|
||||
{
|
||||
int x;
|
||||
size_t x;
|
||||
BUG_ON((unsigned long)dst & 0x3);
|
||||
val = (val & 0xff) * 0x01010101;
|
||||
for (x = 0; x < len; x += 4)
|
||||
|
@ -264,7 +265,7 @@ static inline void memset_io(volatile void *dst, int val, size_t len)
|
|||
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
|
||||
size_t len)
|
||||
{
|
||||
int x;
|
||||
size_t x;
|
||||
BUG_ON((unsigned long)src & 0x3);
|
||||
for (x = 0; x < len; x += 4)
|
||||
*(u32 *)(dst + x) = readl(src + x);
|
||||
|
@ -273,7 +274,7 @@ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
|
|||
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
||||
size_t len)
|
||||
{
|
||||
int x;
|
||||
size_t x;
|
||||
BUG_ON((unsigned long)dst & 0x3);
|
||||
for (x = 0; x < len; x += 4)
|
||||
writel(*(u32 *)(src + x), dst + x);
|
||||
|
@ -281,8 +282,108 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
|||
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return readb((volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return readw((volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return readl((volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void outb(u8 b, unsigned long addr)
|
||||
{
|
||||
writeb(b, (volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void outw(u16 b, unsigned long addr)
|
||||
{
|
||||
writew(b, (volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void outl(u32 b, unsigned long addr)
|
||||
{
|
||||
writel(b, (volatile void __iomem *) addr);
|
||||
}
|
||||
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
u8 *buf = buffer;
|
||||
do {
|
||||
u8 x = inb(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
u16 *buf = buffer;
|
||||
do {
|
||||
u16 x = inw(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
u32 *buf = buffer;
|
||||
do {
|
||||
u32 x = inl(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
const u8 *buf = buffer;
|
||||
do {
|
||||
outb(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
const u16 *buf = buffer;
|
||||
do {
|
||||
outw(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
if (count) {
|
||||
const u32 *buf = buffer;
|
||||
do {
|
||||
outl(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
|
||||
extern void __iomem *ioport_map(unsigned long port, unsigned int len);
|
||||
extern void ioport_unmap(void __iomem *addr);
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* The Tile architecture does not support IOPORT, even with PCI.
|
||||
* The TilePro architecture does not support IOPORT, even with PCI.
|
||||
* Unfortunately we can't yet simply not declare these methods,
|
||||
* since some generic code that compiles into the kernel, but
|
||||
* we never run, uses them unconditionally.
|
||||
|
@ -290,7 +391,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
|||
|
||||
static inline long ioport_panic(void)
|
||||
{
|
||||
#ifdef __tilegx__
|
||||
panic("PCI IO space support is disabled. Configure the kernel with"
|
||||
" CONFIG_TILE_PCI_IO to enable it");
|
||||
#else
|
||||
panic("inb/outb and friends do not exist on tile");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -335,13 +441,6 @@ static inline void outl(u32 b, unsigned long addr)
|
|||
ioport_panic();
|
||||
}
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
#define outb_p(x, addr) outb((x), (addr))
|
||||
#define outw_p(x, addr) outw((x), (addr))
|
||||
#define outl_p(x, addr) outl((x), (addr))
|
||||
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
ioport_panic();
|
||||
|
@ -372,6 +471,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
|
|||
ioport_panic();
|
||||
}
|
||||
|
||||
#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
#define outb_p(x, addr) outb((x), (addr))
|
||||
#define outw_p(x, addr) outw((x), (addr))
|
||||
#define outl_p(x, addr) outl((x), (addr))
|
||||
|
||||
#define ioread16be(addr) be16_to_cpu(ioread16(addr))
|
||||
#define ioread32be(addr) be32_to_cpu(ioread32(addr))
|
||||
#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
|
||||
|
|
|
@ -124,6 +124,12 @@
|
|||
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
||||
#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
|
||||
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
|
||||
extern unsigned int debug_smp_processor_id(void);
|
||||
# define smp_processor_id() debug_smp_processor_id()
|
||||
#endif
|
||||
|
||||
/* Disable interrupts. */
|
||||
#define arch_local_irq_disable() \
|
||||
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
|
||||
|
@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
#define arch_local_irq_disable_all() \
|
||||
interrupt_mask_set_mask(-1ULL)
|
||||
|
||||
/*
|
||||
* Read the set of maskable interrupts.
|
||||
* We avoid the preemption warning here via __this_cpu_ptr since even
|
||||
* if irqs are already enabled, it's harmless to read the wrong cpu's
|
||||
* enabled mask.
|
||||
*/
|
||||
#define arch_local_irqs_enabled() \
|
||||
(*__this_cpu_ptr(&interrupts_enabled_mask))
|
||||
|
||||
/* Re-enable all maskable interrupts. */
|
||||
#define arch_local_irq_enable() \
|
||||
interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
|
||||
interrupt_mask_reset_mask(arch_local_irqs_enabled())
|
||||
|
||||
/* Disable or enable interrupts based on flag argument. */
|
||||
#define arch_local_irq_restore(disabled) do { \
|
||||
|
@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
|
||||
/* Prevent the given interrupt from being enabled next time we enable irqs. */
|
||||
#define arch_local_irq_mask(interrupt) \
|
||||
(__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
|
||||
this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
|
||||
|
||||
/* Prevent the given interrupt from being enabled immediately. */
|
||||
#define arch_local_irq_mask_now(interrupt) do { \
|
||||
|
@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|||
|
||||
/* Allow the given interrupt to be enabled next time we enable irqs. */
|
||||
#define arch_local_irq_unmask(interrupt) \
|
||||
(__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
|
||||
this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
|
||||
|
||||
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
|
||||
#define arch_local_irq_unmask_now(interrupt) do { \
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
@ -12,7 +12,17 @@
|
|||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_HW_IRQ_H
|
||||
#define _ASM_TILE_HW_IRQ_H
|
||||
#ifndef _ASM_TILE_KDEBUG_H
|
||||
#define _ASM_TILE_KDEBUG_H
|
||||
|
||||
#endif /* _ASM_TILE_HW_IRQ_H */
|
||||
#include <linux/notifier.h>
|
||||
|
||||
enum die_val {
|
||||
DIE_OOPS = 1,
|
||||
DIE_BREAK,
|
||||
DIE_SSTEPBP,
|
||||
DIE_PAGE_FAULT,
|
||||
DIE_COMPILED_BPT
|
||||
};
|
||||
|
||||
#endif /* _ASM_TILE_KDEBUG_H */
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE-Gx KGDB support.
|
||||
*/
|
||||
|
||||
#ifndef __TILE_KGDB_H__
|
||||
#define __TILE_KGDB_H__
|
||||
|
||||
#include <linux/kdebug.h>
|
||||
#include <arch/opcode.h>
|
||||
|
||||
#define GDB_SIZEOF_REG sizeof(unsigned long)
|
||||
|
||||
/*
|
||||
* TILE-Gx gdb is expecting the following register layout:
|
||||
* 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
|
||||
* plus the PC and the faultnum.
|
||||
*
|
||||
* Even though kernel not use the 8 special GPRs, they need to be present
|
||||
* in the registers sent for correct processing in the host-side gdb.
|
||||
*
|
||||
*/
|
||||
#define DBG_MAX_REG_NUM (56+8+2)
|
||||
#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
|
||||
|
||||
/*
|
||||
* BUFMAX defines the maximum number of characters in inbound/outbound
|
||||
* buffers at least NUMREGBYTES*2 are needed for register packets,
|
||||
* Longer buffer is needed to list all threads.
|
||||
*/
|
||||
#define BUFMAX 2048
|
||||
|
||||
#define BREAK_INSTR_SIZE TILEGX_BUNDLE_SIZE_IN_BYTES
|
||||
|
||||
/*
|
||||
* Require cache flush for set/clear a software breakpoint or write memory.
|
||||
*/
|
||||
#define CACHE_FLUSH_IS_SAFE 1
|
||||
|
||||
/*
|
||||
* The compiled-in breakpoint instruction can be used to "break" into
|
||||
* the debugger via magic system request key (sysrq-G).
|
||||
*/
|
||||
static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
|
||||
|
||||
enum tilegx_regnum {
|
||||
TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
|
||||
TILEGX_FAULTNUM_REGNUM,
|
||||
};
|
||||
|
||||
/*
|
||||
* Generate a breakpoint exception to "break" into the debugger.
|
||||
*/
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm volatile (".quad %0\n\t"
|
||||
::""(compiled_bpt));
|
||||
}
|
||||
|
||||
#endif /* __TILE_KGDB_H__ */
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* arch/tile/include/asm/kprobes.h
|
||||
*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_KPROBES_H
|
||||
#define _ASM_TILE_KPROBES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <arch/opcode.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
typedef tile_bundle_bits kprobe_opcode_t;
|
||||
|
||||
#define flush_insn_slot(p) \
|
||||
flush_icache_range((unsigned long)p->addr, \
|
||||
(unsigned long)p->addr + \
|
||||
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
|
||||
|
||||
struct kprobe;
|
||||
|
||||
/* Architecture specific copy of original instruction. */
|
||||
struct arch_specific_insn {
|
||||
kprobe_opcode_t *insn;
|
||||
};
|
||||
|
||||
struct prev_kprobe {
|
||||
struct kprobe *kp;
|
||||
unsigned long status;
|
||||
unsigned long saved_pc;
|
||||
};
|
||||
|
||||
#define MAX_JPROBES_STACK_SIZE 128
|
||||
#define MAX_JPROBES_STACK_ADDR \
|
||||
(((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
|
||||
- sizeof(struct pt_regs))
|
||||
|
||||
#define MIN_JPROBES_STACK_SIZE(ADDR) \
|
||||
((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
|
||||
? MAX_JPROBES_STACK_ADDR - (ADDR) \
|
||||
: MAX_JPROBES_STACK_SIZE)
|
||||
|
||||
/* per-cpu kprobe control block. */
|
||||
struct kprobe_ctlblk {
|
||||
unsigned long kprobe_status;
|
||||
unsigned long kprobe_saved_pc;
|
||||
unsigned long jprobe_saved_sp;
|
||||
struct prev_kprobe prev_kprobe;
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
char jprobes_stack[MAX_JPROBES_STACK_SIZE];
|
||||
};
|
||||
|
||||
extern tile_bundle_bits breakpoint2_insn;
|
||||
extern tile_bundle_bits breakpoint_insn;
|
||||
|
||||
void arch_remove_kprobe(struct kprobe *);
|
||||
|
||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
|
||||
#endif /* _ASM_TILE_KPROBES_H */
|
|
@ -22,6 +22,7 @@ struct mm_context {
|
|||
* semaphore but atomically, but it is conservatively set.
|
||||
*/
|
||||
unsigned long priority_cached;
|
||||
unsigned long vdso_base;
|
||||
};
|
||||
|
||||
typedef struct mm_context mm_context_t;
|
||||
|
|
|
@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
|
|||
|
||||
static inline void install_page_table(pgd_t *pgdir, int asid)
|
||||
{
|
||||
pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir);
|
||||
pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
|
||||
__install_page_table(pgdir, asid, *ptep);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ static inline int pfn_to_nid(unsigned long pfn)
|
|||
|
||||
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
|
||||
|
||||
static inline int pfn_valid(int pfn)
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
int nid = pfn_to_nid(pfn);
|
||||
|
||||
|
|
|
@ -38,6 +38,12 @@
|
|||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
|
||||
/*
|
||||
* We do define AT_SYSINFO_EHDR to support vDSO,
|
||||
* but don't use the gate mechanism.
|
||||
*/
|
||||
#define __HAVE_ARCH_GATE_AREA 1
|
||||
|
||||
/*
|
||||
* If the Kconfig doesn't specify, set a maximum zone order that
|
||||
* is enough so that we can create huge pages from small pages given
|
||||
|
@ -142,8 +148,12 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
#endif
|
||||
|
||||
/* Allow overriding how much VA or PA the kernel will use. */
|
||||
#define MAX_PA_WIDTH CHIP_PA_WIDTH()
|
||||
#define MAX_VA_WIDTH CHIP_VA_WIDTH()
|
||||
|
||||
/* Each memory controller has PAs distinct in their high bits. */
|
||||
#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
|
||||
#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
|
||||
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
|
||||
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
|
||||
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
|
||||
|
@ -154,7 +164,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
* We reserve the lower half of memory for user-space programs, and the
|
||||
* upper half for system code. We re-map all of physical memory in the
|
||||
* upper half, which takes a quarter of our VA space. Then we have
|
||||
* the vmalloc regions. The supervisor code lives at 0xfffffff700000000,
|
||||
* the vmalloc regions. The supervisor code lives at the highest address,
|
||||
* with the hypervisor above that.
|
||||
*
|
||||
* Loadable kernel modules are placed immediately after the static
|
||||
|
@ -166,26 +176,19 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
* Similarly, for now we don't play any struct page mapping games.
|
||||
*/
|
||||
|
||||
#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
|
||||
#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
|
||||
# error Too much PA to map with the VA available!
|
||||
#endif
|
||||
#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
|
||||
|
||||
#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */
|
||||
#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */
|
||||
#define PAGE_OFFSET MEM_HIGH_START
|
||||
#define FIXADDR_BASE _AC(0xfffffff400000000, UL) /* 4 GB */
|
||||
#define FIXADDR_TOP _AC(0xfffffff500000000, UL) /* 4 GB */
|
||||
#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
|
||||
#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
|
||||
#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */
|
||||
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
|
||||
#define _VMALLOC_START FIXADDR_TOP
|
||||
#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */
|
||||
#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */
|
||||
#define MEM_SV_INTRPT MEM_SV_START
|
||||
#define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */
|
||||
#define HUGE_VMAP_BASE (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
|
||||
#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
|
||||
#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
|
||||
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
|
||||
#define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */
|
||||
|
||||
/* Highest DTLB address we will use */
|
||||
#define KERNEL_HIGH_VADDR MEM_SV_START
|
||||
|
||||
#else /* !__tilegx__ */
|
||||
|
||||
|
@ -207,25 +210,18 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
* values, and after that, we show "typical" values, since the actual
|
||||
* addresses depend on kernel #defines.
|
||||
*
|
||||
* MEM_HV_INTRPT 0xfe000000
|
||||
* MEM_SV_INTRPT (kernel code) 0xfd000000
|
||||
* MEM_HV_START 0xfe000000
|
||||
* MEM_SV_START (kernel code) 0xfd000000
|
||||
* MEM_USER_INTRPT (user vector) 0xfc000000
|
||||
* FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR)
|
||||
* PKMAP_BASE 0xf7000000 (via LAST_PKMAP)
|
||||
* HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
|
||||
* VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
|
||||
* FIX_KMAP_xxx 0xfa000000 (via NR_CPUS * KM_TYPE_NR)
|
||||
* PKMAP_BASE 0xf9000000 (via LAST_PKMAP)
|
||||
* VMALLOC_START 0xf7000000 (via VMALLOC_RESERVE)
|
||||
* mapped LOWMEM 0xc0000000
|
||||
*/
|
||||
|
||||
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
|
||||
#if CONFIG_KERNEL_PL == 1
|
||||
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
|
||||
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
|
||||
#else
|
||||
#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
|
||||
#define MEM_SV_INTRPT _AC(0xfe000000, UL)
|
||||
#define MEM_HV_INTRPT _AC(0xff000000, UL)
|
||||
#endif
|
||||
#define MEM_SV_START _AC(0xfd000000, UL)
|
||||
#define MEM_HV_START _AC(0xfe000000, UL)
|
||||
|
||||
#define INTRPT_SIZE 0x4000
|
||||
|
||||
|
@ -246,7 +242,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
|
|||
|
||||
#endif /* __tilegx__ */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
||||
|
@ -332,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn)
|
|||
|
||||
struct mm_struct;
|
||||
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
||||
extern pte_t *virt_to_kpte(unsigned long kaddr);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/numa.h>
|
||||
#include <asm-generic/pci_iomap.h>
|
||||
|
||||
#ifndef __tilegx__
|
||||
|
@ -29,7 +28,6 @@ struct pci_controller {
|
|||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
int first_busno;
|
||||
int last_busno;
|
||||
|
||||
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
|
||||
|
@ -124,6 +122,11 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
|
|||
* the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
|
||||
* devices, we create a separate map region that handles the low
|
||||
* 4GB.
|
||||
*
|
||||
* This design lets us avoid the "PCI hole" problem where the host bridge
|
||||
* won't pass DMA traffic with target addresses that happen to fall within the
|
||||
* BAR space. This enables us to use all the physical memory for DMA, instead
|
||||
* of wasting the same amount of physical memory as the BAR window size.
|
||||
*/
|
||||
#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
|
||||
|
||||
|
@ -145,6 +148,10 @@ struct pci_controller {
|
|||
|
||||
int pio_mem_index; /* PIO region index for memory access */
|
||||
|
||||
#ifdef CONFIG_TILE_PCI_IO
|
||||
int pio_io_index; /* PIO region index for I/O space access */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mem-Map regions for all the memory controllers so that Linux can
|
||||
* map all of its physical memory space to the PCI bus.
|
||||
|
@ -154,6 +161,10 @@ struct pci_controller {
|
|||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
/* PCI I/O space resource for this controller. */
|
||||
struct resource io_space;
|
||||
char io_space_name[32];
|
||||
|
||||
/* PCI memory space resource for this controller. */
|
||||
struct resource mem_space;
|
||||
char mem_space_name[32];
|
||||
|
@ -166,13 +177,11 @@ struct pci_controller {
|
|||
|
||||
/* Table that maps the INTx numbers to Linux irq numbers. */
|
||||
int irq_intx_table[4];
|
||||
|
||||
/* Address ranges that are routed to this controller/bridge. */
|
||||
struct resource mem_resources[3];
|
||||
};
|
||||
|
||||
extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
|
||||
extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
|
||||
extern int num_trio_shims;
|
||||
|
||||
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
|
||||
|
||||
|
@ -211,7 +220,8 @@ static inline int pcibios_assign_all_busses(void)
|
|||
}
|
||||
|
||||
#define PCIBIOS_MIN_MEM 0
|
||||
#define PCIBIOS_MIN_IO 0
|
||||
/* Minimum PCI I/O address, starting at the page boundary. */
|
||||
#define PCIBIOS_MIN_IO PAGE_SIZE
|
||||
|
||||
/* Use any cpu for PCI. */
|
||||
#define cpumask_of_pcibus(bus) cpu_online_mask
|
||||
|
|
|
@ -84,10 +84,12 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
|
|||
/* We have no pmd or pud since we are strictly a two-level page table */
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline int pud_huge_page(pud_t pud) { return 0; }
|
||||
|
||||
/* We don't define any pgds for these addresses. */
|
||||
static inline int pgd_addr_invalid(unsigned long addr)
|
||||
{
|
||||
return addr >= MEM_HV_INTRPT;
|
||||
return addr >= MEM_HV_START;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -63,6 +63,15 @@
|
|||
/* We have no pud since we are a three-level page table. */
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
/*
|
||||
* pmds are the same as pgds and ptes, so converting is a no-op.
|
||||
*/
|
||||
#define pmd_pte(pmd) (pmd)
|
||||
#define pmdp_ptep(pmdp) (pmdp)
|
||||
#define pte_pmd(pte) (pte)
|
||||
|
||||
#define pud_pte(pud) ((pud).pgd)
|
||||
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) == 0;
|
||||
|
@ -73,6 +82,11 @@ static inline int pud_present(pud_t pud)
|
|||
return pud_val(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline int pud_huge_page(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) & _PAGE_HUGE_PAGE;
|
||||
}
|
||||
|
||||
#define pmd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
|
@ -89,6 +103,9 @@ static inline int pud_bad(pud_t pud)
|
|||
/* Return the page-table frame number (ptfn) that a pud_t points at. */
|
||||
#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
|
||||
|
||||
/* Return the page frame number (pfn) that a pud_t points at. */
|
||||
#define pud_pfn(pud) pte_pfn(pud_pte(pud))
|
||||
|
||||
/*
|
||||
* A given kernel pud_t maps to a kernel pmd_t table at a specific
|
||||
* virtual address. Since kernel pmd_t tables can be aligned at
|
||||
|
@ -123,8 +140,7 @@ static inline unsigned long pgd_addr_normalize(unsigned long addr)
|
|||
/* We don't define any pgds for these addresses. */
|
||||
static inline int pgd_addr_invalid(unsigned long addr)
|
||||
{
|
||||
return addr >= MEM_HV_START ||
|
||||
(addr > MEM_LOW_END && addr < MEM_HIGH_START);
|
||||
return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -152,13 +168,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|||
return hv_pte(__insn_exch(&ptep->val, 0UL));
|
||||
}
|
||||
|
||||
/*
|
||||
* pmds are the same as pgds and ptes, so converting is a no-op.
|
||||
*/
|
||||
#define pmd_pte(pmd) (pmd)
|
||||
#define pmdp_ptep(pmdp) (pmdp)
|
||||
#define pte_pmd(pte) (pte)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_TILE_PGTABLE_64_H */
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#ifndef _ASM_TILE_PROCESSOR_H
|
||||
#define _ASM_TILE_PROCESSOR_H
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
|
@ -25,7 +27,6 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#include <arch/chip.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
struct task_struct;
|
||||
|
@ -110,18 +111,16 @@ struct thread_struct {
|
|||
unsigned long long interrupt_mask;
|
||||
/* User interrupt-control 0 state */
|
||||
unsigned long intctrl_0;
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
/* Is this task currently doing a backtrace? */
|
||||
bool in_backtrace;
|
||||
/* Any other miscellaneous processor state bits */
|
||||
unsigned long proc_status;
|
||||
#endif
|
||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
||||
/* Interrupt base for PL0 interrupts */
|
||||
unsigned long interrupt_vector_base;
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_RTF_HWM()
|
||||
/* Tile cache retry fifo high-water mark */
|
||||
unsigned long tile_rtf_hwm;
|
||||
#endif
|
||||
#if CHIP_HAS_DSTREAM_PF()
|
||||
/* Data stream prefetch control */
|
||||
unsigned long dstream_pf;
|
||||
|
@ -134,21 +133,16 @@ struct thread_struct {
|
|||
/* Async DMA TLB fault information */
|
||||
struct async_tlb dma_async_tlb;
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/* Was static network processor when we were switched out? */
|
||||
int sn_proc_running;
|
||||
/* Async SNI TLB fault information */
|
||||
struct async_tlb sn_async_tlb;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Start with "sp" this many bytes below the top of the kernel stack.
|
||||
* This preserves the invariant that a called function may write to *sp.
|
||||
* This allows us to be cache-aware when handling the initial save
|
||||
* of the pt_regs value to the stack.
|
||||
*/
|
||||
#define STACK_TOP_DELTA 8
|
||||
#define STACK_TOP_DELTA 64
|
||||
|
||||
/*
|
||||
* When entering the kernel via a fault, start with the top of the
|
||||
|
@ -164,7 +158,7 @@ struct thread_struct {
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define TASK_SIZE_MAX (MEM_LOW_END + 1)
|
||||
#define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
|
||||
#else
|
||||
#define TASK_SIZE_MAX PAGE_OFFSET
|
||||
#endif
|
||||
|
@ -178,10 +172,10 @@ struct thread_struct {
|
|||
#define TASK_SIZE TASK_SIZE_MAX
|
||||
#endif
|
||||
|
||||
/* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */
|
||||
#define VDSO_BASE (TASK_SIZE - PAGE_SIZE)
|
||||
#define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
|
||||
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
|
||||
|
||||
#define STACK_TOP VDSO_BASE
|
||||
#define STACK_TOP TASK_SIZE
|
||||
|
||||
/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
|
||||
#define STACK_TOP_MAX TASK_SIZE_MAX
|
||||
|
@ -232,21 +226,28 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags);
|
|||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
/* Return initial ksp value for given task. */
|
||||
#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE)
|
||||
#define task_ksp0(task) \
|
||||
((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
|
||||
|
||||
/* Return some info about the user process TASK. */
|
||||
#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
|
||||
#define task_pt_regs(task) \
|
||||
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
|
||||
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
|
||||
#define current_pt_regs() \
|
||||
((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
|
||||
(KSTK_PTREGS_GAP - 1)) - 1)
|
||||
((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
|
||||
STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
|
||||
#define task_sp(task) (task_pt_regs(task)->sp)
|
||||
#define task_pc(task) (task_pt_regs(task)->pc)
|
||||
/* Aliases for pc and sp (used in fs/proc/array.c) */
|
||||
#define KSTK_EIP(task) task_pc(task)
|
||||
#define KSTK_ESP(task) task_sp(task)
|
||||
|
||||
/* Fine-grained unaligned JIT support */
|
||||
#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
|
||||
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
|
||||
|
||||
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
|
||||
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
|
||||
|
||||
/* Standard format for printing registers and other word-size data. */
|
||||
#ifdef __tilegx__
|
||||
# define REGFMT "0x%016lx"
|
||||
|
@ -275,7 +276,6 @@ extern char chip_model[64];
|
|||
/* Data on which physical memory controller corresponds to which NUMA node. */
|
||||
extern int node_controller[];
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Does the heap allocator return hash-for-home pages by default? */
|
||||
extern int hash_default;
|
||||
|
||||
|
@ -285,11 +285,6 @@ extern int kstack_hash;
|
|||
/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
|
||||
#define uheap_hash hash_default
|
||||
|
||||
#else
|
||||
#define hash_default 0
|
||||
#define kstack_hash 0
|
||||
#define uheap_hash 0
|
||||
#endif
|
||||
|
||||
/* Are we using huge pages in the TLB for kernel data? */
|
||||
extern int kdata_huge;
|
||||
|
@ -337,7 +332,6 @@ extern int kdata_huge;
|
|||
|
||||
/*
|
||||
* Provide symbolic constants for PLs.
|
||||
* Note that assembly code assumes that USER_PL is zero.
|
||||
*/
|
||||
#define USER_PL 0
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
|
@ -346,20 +340,38 @@ extern int kdata_huge;
|
|||
#define KERNEL_PL CONFIG_KERNEL_PL
|
||||
|
||||
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
|
||||
#define CPU_LOG_MASK_VALUE 12
|
||||
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
|
||||
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
|
||||
# error Too many cpus!
|
||||
#ifdef __tilegx__
|
||||
#define CPU_SHIFT 48
|
||||
#if CHIP_VA_WIDTH() > CPU_SHIFT
|
||||
# error Too many VA bits!
|
||||
#endif
|
||||
#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
|
||||
#define raw_smp_processor_id() \
|
||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
|
||||
((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
|
||||
#define get_current_ksp0() \
|
||||
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
|
||||
((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
|
||||
(64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
|
||||
#define next_current_ksp0(task) ({ \
|
||||
unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
|
||||
unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
|
||||
__ksp0 | __cpu; \
|
||||
})
|
||||
#else
|
||||
#define LOG2_NR_CPU_IDS 6
|
||||
#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
|
||||
#define raw_smp_processor_id() \
|
||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
|
||||
#define get_current_ksp0() \
|
||||
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
|
||||
#define next_current_ksp0(task) ({ \
|
||||
unsigned long __ksp0 = task_ksp0(task); \
|
||||
int __cpu = raw_smp_processor_id(); \
|
||||
BUG_ON(__ksp0 & CPU_MASK_VALUE); \
|
||||
BUG_ON(__ksp0 & MAX_CPU_ID); \
|
||||
__ksp0 | __cpu; \
|
||||
})
|
||||
#endif
|
||||
#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
|
||||
# error Too many cpus!
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_PROCESSOR_H */
|
||||
|
|
|
@ -33,12 +33,13 @@ typedef unsigned long pt_reg_t;
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define regs_return_value(regs) ((regs)->regs[0])
|
||||
#define instruction_pointer(regs) ((regs)->pc)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
#define user_stack_pointer(regs) ((regs)->sp)
|
||||
|
||||
/* Does the process account for user or for system time? */
|
||||
#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL)
|
||||
#define user_mode(regs) (EX1_PL((regs)->ex1) < KERNEL_PL)
|
||||
|
||||
/* Fill in a struct pt_regs with the current kernel registers. */
|
||||
struct pt_regs *get_pt_regs(struct pt_regs *);
|
||||
|
@ -79,8 +80,7 @@ extern void single_step_execve(void);
|
|||
|
||||
struct task_struct;
|
||||
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
||||
int error_code);
|
||||
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
|
||||
|
|
|
@ -25,10 +25,16 @@ extern char _sinitdata[], _einitdata[];
|
|||
/* Write-once data is writable only till the end of initialization. */
|
||||
extern char __w1data_begin[], __w1data_end[];
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern char vdso32_start[], vdso32_end[];
|
||||
#endif
|
||||
|
||||
/* Not exactly sections, but PC comparison points in the code. */
|
||||
extern char __rt_sigreturn[], __rt_sigreturn_end[];
|
||||
#ifndef __tilegx__
|
||||
#ifdef __tilegx__
|
||||
extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
|
||||
#else
|
||||
extern char sys_cmpxchg[], __sys_cmpxchg_end[];
|
||||
extern char __sys_cmpxchg_grab_lock[];
|
||||
extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
|
||||
|
|
|
@ -24,9 +24,8 @@
|
|||
*/
|
||||
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
|
||||
|
||||
int tile_console_write(const char *buf, int count);
|
||||
void early_panic(const char *fmt, ...);
|
||||
void warn_early_printk(void);
|
||||
void __init disable_early_printk(void);
|
||||
|
||||
/* Init-time routine to do tile-specific per-cpu setup. */
|
||||
void setup_cpu(int boot);
|
||||
|
|
|
@ -101,10 +101,8 @@ void print_disabled_cpus(void);
|
|||
extern struct cpumask cpu_lotar_map;
|
||||
#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Which processors are used for hash-for-home mapping */
|
||||
extern struct cpumask hash_for_home_map;
|
||||
#endif
|
||||
|
||||
/* Which cpus can have their cache flushed by hv_flush_remote(). */
|
||||
extern struct cpumask cpu_cacheable_map;
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
* Return the "current" portion of a ticket lock value,
|
||||
* i.e. the number that currently owns the lock.
|
||||
*/
|
||||
static inline int arch_spin_current(u32 val)
|
||||
static inline u32 arch_spin_current(u32 val)
|
||||
{
|
||||
return val >> __ARCH_SPIN_CURRENT_SHIFT;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ static inline int arch_spin_current(u32 val)
|
|||
* Return the "next" portion of a ticket lock value,
|
||||
* i.e. the number that the next task to try to acquire the lock will get.
|
||||
*/
|
||||
static inline int arch_spin_next(u32 val)
|
||||
static inline u32 arch_spin_next(u32 val)
|
||||
{
|
||||
return val & __ARCH_SPIN_NEXT_MASK;
|
||||
}
|
||||
|
|
|
@ -21,8 +21,10 @@
|
|||
#define __HAVE_ARCH_MEMMOVE
|
||||
#define __HAVE_ARCH_STRCHR
|
||||
#define __HAVE_ARCH_STRLEN
|
||||
#define __HAVE_ARCH_STRNLEN
|
||||
|
||||
extern __kernel_size_t strlen(const char *);
|
||||
extern __kernel_size_t strnlen(const char *, __kernel_size_t);
|
||||
extern char *strchr(const char *s, int c);
|
||||
extern void *memchr(const void *s, int c, size_t n);
|
||||
extern void *memset(void *, int, __kernel_size_t);
|
||||
|
|
|
@ -39,6 +39,11 @@ struct thread_info {
|
|||
struct restart_block restart_block;
|
||||
struct single_step_state *step_state; /* single step state
|
||||
(if non-zero) */
|
||||
int align_ctl; /* controls unaligned access */
|
||||
#ifdef __tilegx__
|
||||
unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
|
||||
void __user *unalign_jit_base; /* unalign fixup JIT base */
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -56,6 +61,7 @@ struct thread_info {
|
|||
.fn = do_no_restart_syscall, \
|
||||
}, \
|
||||
.step_state = NULL, \
|
||||
.align_ctl = 0, \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
|
|
|
@ -15,12 +15,13 @@
|
|||
#ifndef _ASM_TILE_TRAPS_H
|
||||
#define _ASM_TILE_TRAPS_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <arch/chip.h>
|
||||
|
||||
/* mm/fault.c */
|
||||
void do_page_fault(struct pt_regs *, int fault_num,
|
||||
unsigned long address, unsigned long write);
|
||||
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
void do_async_page_fault(struct pt_regs *);
|
||||
#endif
|
||||
|
||||
|
@ -69,6 +70,16 @@ void gx_singlestep_handle(struct pt_regs *, int fault_num);
|
|||
|
||||
/* kernel/intvec_64.S */
|
||||
void fill_ra_stack(void);
|
||||
|
||||
/* Handle unalign data fixup. */
|
||||
extern void do_unaligned(struct pt_regs *regs, int vecnum);
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __tilegx__
|
||||
/* 128 byte JIT per unalign fixup. */
|
||||
#define UNALIGN_JIT_SHIFT 7
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_TRAPS_H */
|
||||
|
|
|
@ -127,8 +127,10 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
|
||||
#ifdef __LP64__
|
||||
#define _ASM_PTR ".quad"
|
||||
#define _ASM_ALIGN ".align 8"
|
||||
#else
|
||||
#define _ASM_PTR ".long"
|
||||
#define _ASM_ALIGN ".align 4"
|
||||
#endif
|
||||
|
||||
#define __get_user_asm(OP, x, ptr, ret) \
|
||||
|
@ -137,6 +139,7 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
"0: { movei %1, 0; movei %0, %3 }\n" \
|
||||
"j 9f\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR " 1b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
|
@ -168,6 +171,7 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
"0: { movei %1, 0; movei %2, 0 }\n" \
|
||||
"{ movei %0, %4; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
".align 4\n" \
|
||||
".word 1b, 0b\n" \
|
||||
".word 2b, 0b\n" \
|
||||
".popsection\n" \
|
||||
|
@ -224,6 +228,7 @@ extern int __get_user_bad(void)
|
|||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %0, %3; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR " 1b, 0b\n" \
|
||||
".popsection\n" \
|
||||
"9:" \
|
||||
|
@ -248,6 +253,7 @@ extern int __get_user_bad(void)
|
|||
".pushsection .fixup,\"ax\"\n" \
|
||||
"0: { movei %0, %4; j 9f }\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
".align 4\n" \
|
||||
".word 1b, 0b\n" \
|
||||
".word 2b, 0b\n" \
|
||||
".popsection\n" \
|
||||
|
@ -566,37 +572,6 @@ static inline unsigned long __must_check flush_user(
|
|||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* inv_user: - Invalidate a block of memory in user space from cache.
|
||||
* @mem: Destination address, in user space.
|
||||
* @len: Number of bytes to invalidate.
|
||||
*
|
||||
* Returns number of bytes that could not be invalidated.
|
||||
* On success, this will be zero.
|
||||
*
|
||||
* Note that on Tile64, the "inv" operation is in fact a
|
||||
* "flush and invalidate", so cache write-backs will occur prior
|
||||
* to the cache being marked invalid.
|
||||
*/
|
||||
extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
|
||||
static inline unsigned long __must_check __inv_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
int retval;
|
||||
|
||||
might_fault();
|
||||
retval = inv_user_asm(mem, len);
|
||||
mb_incoherent();
|
||||
return retval;
|
||||
}
|
||||
static inline unsigned long __must_check inv_user(
|
||||
void __user *mem, unsigned long len)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, mem, len))
|
||||
return __inv_user(mem, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* finv_user: - Flush-inval a block of memory in user space from cache.
|
||||
* @mem: Destination address, in user space.
|
||||
|
|
|
@ -15,11 +15,15 @@
|
|||
#ifndef _ASM_TILE_UNALIGNED_H
|
||||
#define _ASM_TILE_UNALIGNED_H
|
||||
|
||||
#include <linux/unaligned/le_struct.h>
|
||||
#include <linux/unaligned/be_byteshift.h>
|
||||
#include <linux/unaligned/generic.h>
|
||||
#define get_unaligned __get_unaligned_le
|
||||
#define put_unaligned __put_unaligned_le
|
||||
/*
|
||||
* We could implement faster get_unaligned_[be/le]64 using the ldna
|
||||
* instruction on tilegx; however, we need to either copy all of the
|
||||
* other generic functions to here (which is pretty ugly) or else
|
||||
* modify both the generic code and other arch code to allow arch
|
||||
* specific unaligned data access functions. Given these functions
|
||||
* are not often called, we'll stick with the generic version.
|
||||
*/
|
||||
#include <asm-generic/unaligned.h>
|
||||
|
||||
/*
|
||||
* Is the kernel doing fixups of unaligned accesses? If <0, no kernel
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __TILE_VDSO_H__
|
||||
#define __TILE_VDSO_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Note about the vdso_data structure:
|
||||
*
|
||||
* NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
|
||||
* structure is supposed to be known only to the function in the vdso
|
||||
* itself and may change without notice.
|
||||
*/
|
||||
|
||||
struct vdso_data {
|
||||
__u64 tz_update_count; /* Timezone atomicity ctr */
|
||||
__u64 tb_update_count; /* Timebase atomicity ctr */
|
||||
__u64 xtime_tod_stamp; /* TOD clock for xtime */
|
||||
__u64 xtime_clock_sec; /* Kernel time second */
|
||||
__u64 xtime_clock_nsec; /* Kernel time nanosecond */
|
||||
__u64 wtom_clock_sec; /* Wall to monotonic clock second */
|
||||
__u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
|
||||
__u32 mult; /* Cycle to nanosecond multiplier */
|
||||
__u32 shift; /* Cycle to nanosecond divisor (power of two) */
|
||||
__u32 tz_minuteswest; /* Minutes west of Greenwich */
|
||||
__u32 tz_dsttime; /* Type of dst correction */
|
||||
};
|
||||
|
||||
extern struct vdso_data *vdso_data;
|
||||
|
||||
/* __vdso_rt_sigreturn is defined with the addresses in the vdso page. */
|
||||
extern void __vdso_rt_sigreturn(void);
|
||||
|
||||
extern int setup_vdso_pages(void);
|
||||
|
||||
#endif /* __TILE_VDSO_H__ */
|
|
@ -30,6 +30,7 @@
|
|||
|
||||
#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
|
||||
|
||||
#define GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
|
||||
#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
|
||||
|
||||
#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
|
||||
|
@ -54,6 +55,10 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
|
|||
unsigned int flags);
|
||||
|
||||
|
||||
int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
|
||||
unsigned int count, unsigned int first,
|
||||
unsigned int flags);
|
||||
|
||||
int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
|
||||
unsigned int count, unsigned int first,
|
||||
unsigned int flags);
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* This file is machine-generated; DO NOT EDIT! */
|
||||
#ifndef __GXIO_UART_LINUX_RPC_H__
|
||||
#define __GXIO_UART_LINUX_RPC_H__
|
||||
|
||||
#include <hv/iorpc.h>
|
||||
|
||||
#include <hv/drv_uart_intf.h>
|
||||
#include <gxio/uart.h>
|
||||
#include <gxio/kiorpc.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define GXIO_UART_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1900)
|
||||
#define GXIO_UART_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
|
||||
#define GXIO_UART_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
|
||||
|
||||
int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
|
||||
int inter_y, int inter_ipi, int inter_event);
|
||||
|
||||
int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base);
|
||||
|
||||
int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
|
||||
unsigned long offset, unsigned long size);
|
||||
|
||||
#endif /* !__GXIO_UART_LINUX_RPC_H__ */
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _GXIO_UART_H_
|
||||
#define _GXIO_UART_H_
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include <hv/drv_uart_intf.h>
|
||||
#include <hv/iorpc.h>
|
||||
|
||||
/*
|
||||
*
|
||||
* An API for manipulating UART interface.
|
||||
*/
|
||||
|
||||
/*
|
||||
*
|
||||
* The Rshim allows access to the processor's UART interface.
|
||||
*/
|
||||
|
||||
/* A context object used to manage UART resources. */
|
||||
typedef struct {
|
||||
|
||||
/* File descriptor for calling up to the hypervisor. */
|
||||
int fd;
|
||||
|
||||
/* The VA at which our MMIO registers are mapped. */
|
||||
char *mmio_base;
|
||||
|
||||
} gxio_uart_context_t;
|
||||
|
||||
/* Request UART interrupts.
|
||||
*
|
||||
* Request that interrupts be delivered to a tile when the UART's
|
||||
* Receive FIFO is written, or the Write FIFO is read.
|
||||
*
|
||||
* @param context Pointer to a properly initialized gxio_uart_context_t.
|
||||
* @param bind_cpu_x X coordinate of CPU to which interrupt will be delivered.
|
||||
* @param bind_cpu_y Y coordinate of CPU to which interrupt will be delivered.
|
||||
* @param bind_interrupt IPI interrupt number.
|
||||
* @param bind_event Sub-interrupt event bit number; a negative value can
|
||||
* disable the interrupt.
|
||||
* @return Zero if all of the requested UART events were successfully
|
||||
* configured to interrupt.
|
||||
*/
|
||||
extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context,
|
||||
int bind_cpu_x,
|
||||
int bind_cpu_y,
|
||||
int bind_interrupt, int bind_event);
|
||||
|
||||
/* Initialize a UART context.
|
||||
*
|
||||
* A properly initialized context must be obtained before any of the other
|
||||
* gxio_uart routines may be used.
|
||||
*
|
||||
* @param context Pointer to a gxio_uart_context_t, which will be initialized
|
||||
* by this routine, if it succeeds.
|
||||
* @param uart_index Index of the UART to use.
|
||||
* @return Zero if the context was successfully initialized, else a
|
||||
* GXIO_ERR_xxx error code.
|
||||
*/
|
||||
extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index);
|
||||
|
||||
/* Destroy a UART context.
|
||||
*
|
||||
* Once destroyed, a context may not be used with any gxio_uart routines
|
||||
* other than gxio_uart_init(). After this routine returns, no further
|
||||
* interrupts requested on this context will be delivered. The state and
|
||||
* configuration of the pins which had been attached to this context are
|
||||
* unchanged by this operation.
|
||||
*
|
||||
* @param context Pointer to a gxio_uart_context_t.
|
||||
* @return Zero if the context was successfully destroyed, else a
|
||||
* GXIO_ERR_xxx error code.
|
||||
*/
|
||||
extern int gxio_uart_destroy(gxio_uart_context_t *context);
|
||||
|
||||
/* Write UART register.
|
||||
* @param context Pointer to a gxio_uart_context_t.
|
||||
* @param offset UART register offset.
|
||||
* @param word Data will be wrote to UART reigister.
|
||||
*/
|
||||
extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
|
||||
uint64_t word);
|
||||
|
||||
/* Read UART register.
|
||||
* @param context Pointer to a gxio_uart_context_t.
|
||||
* @param offset UART register offset.
|
||||
* @return Data read from UART register.
|
||||
*/
|
||||
extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
|
||||
|
||||
#endif /* _GXIO_UART_H_ */
|
|
@ -64,8 +64,9 @@ struct pcie_port_property
|
|||
* will not consider it an error if the link comes up as a x8 link. */
|
||||
uint8_t allow_x8: 1;
|
||||
|
||||
/** Reserved. */
|
||||
uint8_t reserved: 1;
|
||||
/** If true, this link is connected to a device which may or may not
|
||||
* be present. */
|
||||
uint8_t removable: 1;
|
||||
|
||||
};
|
||||
|
||||
|
@ -167,6 +168,9 @@ pcie_stream_intr_config_sel_t;
|
|||
struct pcie_trio_ports_property
|
||||
{
|
||||
struct pcie_port_property ports[TILEGX_TRIO_PCIES];
|
||||
|
||||
/** Set if this TRIO belongs to a Gx72 device. */
|
||||
uint8_t is_gx72;
|
||||
};
|
||||
|
||||
/* Flags indicating traffic class. */
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Interface definitions for the UART driver.
|
||||
*/
|
||||
|
||||
#ifndef _SYS_HV_DRV_UART_INTF_H
|
||||
#define _SYS_HV_DRV_UART_INTF_H
|
||||
|
||||
#include <arch/uart.h>
|
||||
|
||||
/** Number of UART ports supported. */
|
||||
#define TILEGX_UART_NR 2
|
||||
|
||||
/** The mmap file offset (PA) of the UART MMIO region. */
|
||||
#define HV_UART_MMIO_OFFSET 0
|
||||
|
||||
/** The maximum size of the UARTs MMIO region (64K Bytes). */
|
||||
#define HV_UART_MMIO_SIZE (1UL << 16)
|
||||
|
||||
#endif /* _SYS_HV_DRV_UART_INTF_H */
|
|
@ -318,8 +318,11 @@
|
|||
/** hv_set_pte_super_shift */
|
||||
#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
|
||||
|
||||
/** hv_console_set_ipi */
|
||||
#define HV_DISPATCH_CONSOLE_SET_IPI 63
|
||||
|
||||
/** One more than the largest dispatch value */
|
||||
#define _HV_DISPATCH_END 58
|
||||
#define _HV_DISPATCH_END 64
|
||||
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
@ -541,14 +544,24 @@ typedef enum {
|
|||
HV_CONFSTR_CPUMOD_REV = 18,
|
||||
|
||||
/** Human-readable CPU module description. */
|
||||
HV_CONFSTR_CPUMOD_DESC = 19
|
||||
HV_CONFSTR_CPUMOD_DESC = 19,
|
||||
|
||||
/** Per-tile hypervisor statistics. When this identifier is specified,
|
||||
* the hv_confstr call takes two extra arguments. The first is the
|
||||
* HV_XY_TO_LOTAR of the target tile's coordinates. The second is
|
||||
* a flag word. The only current flag is the lowest bit, which means
|
||||
* "zero out the stats instead of retrieving them"; in this case the
|
||||
* buffer and buffer length are ignored. */
|
||||
HV_CONFSTR_HV_STATS = 20
|
||||
|
||||
} HV_ConfstrQuery;
|
||||
|
||||
/** Query a configuration string from the hypervisor.
|
||||
*
|
||||
* @param query Identifier for the specific string to be retrieved
|
||||
* (HV_CONFSTR_xxx).
|
||||
* (HV_CONFSTR_xxx). Some strings may require or permit extra
|
||||
* arguments to be appended which select specific objects to be
|
||||
* described; see the string descriptions above.
|
||||
* @param buf Buffer in which to place the string.
|
||||
* @param len Length of the buffer.
|
||||
* @return If query is valid, then the length of the corresponding string,
|
||||
|
@ -556,21 +569,16 @@ typedef enum {
|
|||
* was truncated. If query is invalid, HV_EINVAL. If the specified
|
||||
* buffer is not writable by the client, HV_EFAULT.
|
||||
*/
|
||||
int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
|
||||
int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len, ...);
|
||||
|
||||
/** Tile coordinate */
|
||||
typedef struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/** X coordinate, relative to supervisor's top-left coordinate */
|
||||
int x;
|
||||
|
||||
/** Y coordinate, relative to supervisor's top-left coordinate */
|
||||
int y;
|
||||
#else
|
||||
int y;
|
||||
int x;
|
||||
#endif
|
||||
} HV_Coord;
|
||||
|
||||
|
||||
|
@ -585,6 +593,30 @@ typedef struct
|
|||
*/
|
||||
int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte);
|
||||
|
||||
/** Configure the console interrupt.
|
||||
*
|
||||
* When the console client interrupt is enabled, the hypervisor will
|
||||
* deliver the specified IPI to the client in the following situations:
|
||||
*
|
||||
* - The console has at least one character available for input.
|
||||
*
|
||||
* - The console can accept new characters for output, and the last call
|
||||
* to hv_console_write() did not write all of the characters requested
|
||||
* by the client.
|
||||
*
|
||||
* Note that in some system configurations, console interrupt will not
|
||||
* be available; clients should be prepared for this routine to fail and
|
||||
* to fall back to periodic console polling in that case.
|
||||
*
|
||||
* @param ipi Index of the IPI register which will receive the interrupt.
|
||||
* @param event IPI event number for console interrupt. If less than 0,
|
||||
* disable the console IPI interrupt.
|
||||
* @param coord Tile to be targeted for console interrupt.
|
||||
* @return 0 on success, otherwise, HV_EINVAL if illegal parameter,
|
||||
* HV_ENOTSUP if console interrupt are not available.
|
||||
*/
|
||||
int hv_console_set_ipi(int ipi, int event, HV_Coord coord);
|
||||
|
||||
#else /* !CHIP_HAS_IPI() */
|
||||
|
||||
/** A set of interrupts. */
|
||||
|
@ -1092,13 +1124,8 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
|
|||
/** A range of ASID values. */
|
||||
typedef struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
HV_ASID start; /**< First ASID in the range. */
|
||||
unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
|
||||
#else
|
||||
unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
|
||||
HV_ASID start; /**< First ASID in the range. */
|
||||
#endif
|
||||
} HV_ASIDRange;
|
||||
|
||||
/** Returns information about a range of ASIDs.
|
||||
|
@ -1422,7 +1449,6 @@ typedef enum
|
|||
/** Message recipient. */
|
||||
typedef struct
|
||||
{
|
||||
#ifndef __BIG_ENDIAN__
|
||||
/** X coordinate, relative to supervisor's top-left coordinate */
|
||||
unsigned int x:11;
|
||||
|
||||
|
@ -1431,11 +1457,6 @@ typedef struct
|
|||
|
||||
/** Status of this recipient */
|
||||
HV_Recip_State state:10;
|
||||
#else //__BIG_ENDIAN__
|
||||
HV_Recip_State state:10;
|
||||
unsigned int y:11;
|
||||
unsigned int x:11;
|
||||
#endif
|
||||
} HV_Recipient;
|
||||
|
||||
/** Send a message to a set of recipients.
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# UAPI Header export list
|
||||
header-y += abi.h
|
||||
header-y += chip.h
|
||||
header-y += chip_tile64.h
|
||||
header-y += chip_tilegx.h
|
||||
header-y += chip_tilepro.h
|
||||
header-y += icache.h
|
||||
|
|
|
@ -12,9 +12,7 @@
|
|||
* more details.
|
||||
*/
|
||||
|
||||
#if __tile_chip__ == 0
|
||||
#include <arch/chip_tile64.h>
|
||||
#elif __tile_chip__ == 1
|
||||
#if __tile_chip__ == 1
|
||||
#include <arch/chip_tilepro.h>
|
||||
#elif defined(__tilegx__)
|
||||
#include <arch/chip_tilegx.h>
|
||||
|
|
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @file
|
||||
* Global header file.
|
||||
* This header file specifies defines for TILE64.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_CHIP_H__
|
||||
#define __ARCH_CHIP_H__
|
||||
|
||||
/** Specify chip version.
|
||||
* When possible, prefer the CHIP_xxx symbols below for future-proofing.
|
||||
* This is intended for cross-compiling; native compilation should
|
||||
* use the predefined __tile_chip__ symbol.
|
||||
*/
|
||||
#define TILE_CHIP 0
|
||||
|
||||
/** Specify chip revision.
|
||||
* This provides for the case of a respin of a particular chip type;
|
||||
* the normal value for this symbol is "0".
|
||||
* This is intended for cross-compiling; native compilation should
|
||||
* use the predefined __tile_chip_rev__ symbol.
|
||||
*/
|
||||
#define TILE_CHIP_REV 0
|
||||
|
||||
/** The name of this architecture. */
|
||||
#define CHIP_ARCH_NAME "tile64"
|
||||
|
||||
/** The ELF e_machine type for binaries for this chip. */
|
||||
#define CHIP_ELF_TYPE() EM_TILE64
|
||||
|
||||
/** The alternate ELF e_machine type for binaries for this chip. */
|
||||
#define CHIP_COMPAT_ELF_TYPE() 0x2506
|
||||
|
||||
/** What is the native word size of the machine? */
|
||||
#define CHIP_WORD_SIZE() 32
|
||||
|
||||
/** How many bits of a virtual address are used. Extra bits must be
|
||||
* the sign extension of the low bits.
|
||||
*/
|
||||
#define CHIP_VA_WIDTH() 32
|
||||
|
||||
/** How many bits are in a physical address? */
|
||||
#define CHIP_PA_WIDTH() 36
|
||||
|
||||
/** Size of the L2 cache, in bytes. */
|
||||
#define CHIP_L2_CACHE_SIZE() 65536
|
||||
|
||||
/** Log size of an L2 cache line in bytes. */
|
||||
#define CHIP_L2_LOG_LINE_SIZE() 6
|
||||
|
||||
/** Size of an L2 cache line, in bytes. */
|
||||
#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L2 cache. */
|
||||
#define CHIP_L2_ASSOC() 2
|
||||
|
||||
/** Size of the L1 data cache, in bytes. */
|
||||
#define CHIP_L1D_CACHE_SIZE() 8192
|
||||
|
||||
/** Log size of an L1 data cache line in bytes. */
|
||||
#define CHIP_L1D_LOG_LINE_SIZE() 4
|
||||
|
||||
/** Size of an L1 data cache line, in bytes. */
|
||||
#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L1 data cache. */
|
||||
#define CHIP_L1D_ASSOC() 2
|
||||
|
||||
/** Size of the L1 instruction cache, in bytes. */
|
||||
#define CHIP_L1I_CACHE_SIZE() 8192
|
||||
|
||||
/** Log size of an L1 instruction cache line in bytes. */
|
||||
#define CHIP_L1I_LOG_LINE_SIZE() 6
|
||||
|
||||
/** Size of an L1 instruction cache line, in bytes. */
|
||||
#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
|
||||
|
||||
/** Associativity of the L1 instruction cache. */
|
||||
#define CHIP_L1I_ASSOC() 1
|
||||
|
||||
/** Stride with which flush instructions must be issued. */
|
||||
#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
|
||||
|
||||
/** Stride with which inv instructions must be issued. */
|
||||
#define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE()
|
||||
|
||||
/** Stride with which finv instructions must be issued. */
|
||||
#define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE()
|
||||
|
||||
/** Can the local cache coherently cache data that is homed elsewhere? */
|
||||
#define CHIP_HAS_COHERENT_LOCAL_CACHE() 0
|
||||
|
||||
/** How many simultaneous outstanding victims can the L2 cache have? */
|
||||
#define CHIP_MAX_OUTSTANDING_VICTIMS() 2
|
||||
|
||||
/** Does the TLB support the NC and NOALLOC bits? */
|
||||
#define CHIP_HAS_NC_AND_NOALLOC_BITS() 0
|
||||
|
||||
/** Does the chip support hash-for-home caching? */
|
||||
#define CHIP_HAS_CBOX_HOME_MAP() 0
|
||||
|
||||
/** Number of entries in the chip's home map tables. */
|
||||
/* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */
|
||||
|
||||
/** Do uncacheable requests miss in the cache regardless of whether
|
||||
* there is matching data? */
|
||||
#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0
|
||||
|
||||
/** Does the mf instruction wait for victims? */
|
||||
#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1
|
||||
|
||||
/** Does the chip have an "inv" instruction that doesn't also flush? */
|
||||
#define CHIP_HAS_INV() 0
|
||||
|
||||
/** Does the chip have a "wh64" instruction? */
|
||||
#define CHIP_HAS_WH64() 0
|
||||
|
||||
/** Does this chip have a 'dword_align' instruction? */
|
||||
#define CHIP_HAS_DWORD_ALIGN() 0
|
||||
|
||||
/** Number of performance counters. */
|
||||
#define CHIP_PERFORMANCE_COUNTERS() 2
|
||||
|
||||
/** Does this chip have auxiliary performance counters? */
|
||||
#define CHIP_HAS_AUX_PERF_COUNTERS() 0
|
||||
|
||||
/** Is the CBOX_MSR1 SPR supported? */
|
||||
#define CHIP_HAS_CBOX_MSR1() 0
|
||||
|
||||
/** Is the TILE_RTF_HWM SPR supported? */
|
||||
#define CHIP_HAS_TILE_RTF_HWM() 0
|
||||
|
||||
/** Is the TILE_WRITE_PENDING SPR supported? */
|
||||
#define CHIP_HAS_TILE_WRITE_PENDING() 0
|
||||
|
||||
/** Is the PROC_STATUS SPR supported? */
|
||||
#define CHIP_HAS_PROC_STATUS_SPR() 0
|
||||
|
||||
/** Is the DSTREAM_PF SPR supported? */
|
||||
#define CHIP_HAS_DSTREAM_PF() 0
|
||||
|
||||
/** Log of the number of mshims we have. */
|
||||
#define CHIP_LOG_NUM_MSHIMS() 2
|
||||
|
||||
/** Are the bases of the interrupt vector areas fixed? */
|
||||
#define CHIP_HAS_FIXED_INTVEC_BASE() 1
|
||||
|
||||
/** Are the interrupt masks split up into 2 SPRs? */
|
||||
#define CHIP_HAS_SPLIT_INTR_MASK() 1
|
||||
|
||||
/** Is the cycle count split up into 2 SPRs? */
|
||||
#define CHIP_HAS_SPLIT_CYCLE() 1
|
||||
|
||||
/** Does the chip have a static network? */
|
||||
#define CHIP_HAS_SN() 1
|
||||
|
||||
/** Does the chip have a static network processor? */
|
||||
#define CHIP_HAS_SN_PROC() 1
|
||||
|
||||
/** Size of the L1 static network processor instruction cache, in bytes. */
|
||||
#define CHIP_L1SNI_CACHE_SIZE() 2048
|
||||
|
||||
/** Does the chip have DMA support in each tile? */
|
||||
#define CHIP_HAS_TILE_DMA() 1
|
||||
|
||||
/** Does the chip have the second revision of the directly accessible
|
||||
* dynamic networks? This encapsulates a number of characteristics,
|
||||
* including the absence of the catch-all, the absence of inline message
|
||||
* tags, the absence of support for network context-switching, and so on.
|
||||
*/
|
||||
#define CHIP_HAS_REV1_XDN() 0
|
||||
|
||||
/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
|
||||
#define CHIP_HAS_CMPEXCH() 0
|
||||
|
||||
/** Does the chip have memory-mapped I/O support? */
|
||||
#define CHIP_HAS_MMIO() 0
|
||||
|
||||
/** Does the chip have post-completion interrupts? */
|
||||
#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
|
||||
|
||||
/** Does the chip have native single step support? */
|
||||
#define CHIP_HAS_SINGLE_STEP() 0
|
||||
|
||||
#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
|
||||
|
||||
/** How many entries are present in the instruction TLB? */
|
||||
#define CHIP_ITLB_ENTRIES() 8
|
||||
|
||||
/** How many entries are present in the data TLB? */
|
||||
#define CHIP_DTLB_ENTRIES() 16
|
||||
|
||||
/** How many MAF entries does the XAUI shim have? */
|
||||
#define CHIP_XAUI_MAF_ENTRIES() 16
|
||||
|
||||
/** Does the memory shim have a source-id table? */
|
||||
#define CHIP_HAS_MSHIM_SRCID_TABLE() 1
|
||||
|
||||
/** Does the L1 instruction cache clear on reset? */
|
||||
#define CHIP_HAS_L1I_CLEAR_ON_RESET() 0
|
||||
|
||||
/** Does the chip come out of reset with valid coordinates on all tiles?
|
||||
* Note that if defined, this also implies that the upper left is 1,1.
|
||||
*/
|
||||
#define CHIP_HAS_VALID_TILE_COORD_RESET() 0
|
||||
|
||||
/** Does the chip have unified packet formats? */
|
||||
#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0
|
||||
|
||||
/** Does the chip support write reordering? */
|
||||
#define CHIP_HAS_WRITE_REORDERING() 0
|
||||
|
||||
/** Does the chip support Y-X routing as well as X-Y? */
|
||||
#define CHIP_HAS_Y_X_ROUTING() 0
|
||||
|
||||
/** Is INTCTRL_3 managed with the correct MPL? */
|
||||
#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0
|
||||
|
||||
/** Is it possible to configure the chip to be big-endian? */
|
||||
#define CHIP_HAS_BIG_ENDIAN_CONFIG() 0
|
||||
|
||||
/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
|
||||
#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
|
||||
|
||||
/** Is the DIAG_TRACE_WAY SPR supported? */
|
||||
#define CHIP_HAS_DIAG_TRACE_WAY() 0
|
||||
|
||||
/** Is the MEM_STRIPE_CONFIG SPR supported? */
|
||||
#define CHIP_HAS_MEM_STRIPE_CONFIG() 0
|
||||
|
||||
/** Are the TLB_PERF SPRs supported? */
|
||||
#define CHIP_HAS_TLB_PERF() 0
|
||||
|
||||
/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
|
||||
#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
|
||||
|
||||
/** Does the chip support rev1 DMA packets? */
|
||||
#define CHIP_HAS_REV1_DMA_PACKETS() 0
|
||||
|
||||
/** Does the chip have an IPI shim? */
|
||||
#define CHIP_HAS_IPI() 0
|
||||
|
||||
#endif /* !__OPEN_SOURCE__ */
|
||||
#endif /* __ARCH_CHIP_H__ */
|
|
@ -61,6 +61,7 @@ typedef tilegx_bundle_bits tile_bundle_bits;
|
|||
#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
|
||||
#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
|
||||
TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
|
||||
#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
|
||||
|
||||
/* 64-bit pattern for a { bpt ; nop } bundle. */
|
||||
#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
|
||||
|
|
|
@ -71,6 +71,7 @@ typedef tilepro_bundle_bits tile_bundle_bits;
|
|||
#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES
|
||||
#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
|
||||
TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
|
||||
#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE
|
||||
|
||||
/* 64-bit pattern for a { bpt ; nop } bundle. */
|
||||
#define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL
|
||||
|
|
|
@ -200,8 +200,6 @@
|
|||
#define SPR_SIM_CONTROL 0x4e0c
|
||||
#define SPR_SNCTL 0x0805
|
||||
#define SPR_SNCTL__FRZFABRIC_MASK 0x1
|
||||
#define SPR_SNCTL__FRZPROC_MASK 0x2
|
||||
#define SPR_SNPC 0x080b
|
||||
#define SPR_SNSTATIC 0x080c
|
||||
#define SPR_SYSTEM_SAVE_0_0 0x4b00
|
||||
#define SPR_SYSTEM_SAVE_0_1 0x4b01
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#ifndef _ASM_TILE_AUXVEC_H
|
||||
#define _ASM_TILE_AUXVEC_H
|
||||
|
||||
/* No extensions to auxvec */
|
||||
/* The vDSO location. */
|
||||
#define AT_SYSINFO_EHDR 33
|
||||
|
||||
#endif /* _ASM_TILE_AUXVEC_H */
|
||||
|
|
|
@ -29,8 +29,8 @@
|
|||
* to honor the arguments at some point.)
|
||||
*
|
||||
* Flush and invalidation of memory can normally be performed with the
|
||||
* __insn_flush(), __insn_inv(), and __insn_finv() instructions from
|
||||
* userspace. The DCACHE option to the system call allows userspace
|
||||
* __insn_flush() and __insn_finv() instructions from userspace.
|
||||
* The DCACHE option to the system call allows userspace
|
||||
* to flush the entire L1+L2 data cache from the core. In this case,
|
||||
* the address and length arguments are not used. The DCACHE flush is
|
||||
* restricted to the current core, not all cores in the address space.
|
||||
|
|
|
@ -3,11 +3,17 @@
|
|||
#
|
||||
|
||||
extra-y := vmlinux.lds head_$(BITS).o
|
||||
obj-y := backtrace.o entry.o irq.o messaging.o \
|
||||
obj-y := backtrace.o entry.o hvglue.o irq.o messaging.o \
|
||||
pci-dma.o proc.o process.o ptrace.o reboot.o \
|
||||
setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \
|
||||
setup.o signal.o single_step.o stack.o sys.o \
|
||||
sysfs.o time.o traps.o unaligned.o vdso.o \
|
||||
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_early_printk.o = -pg
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_HARDWALL) += hardwall.o
|
||||
obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
|
||||
|
@ -20,3 +26,9 @@ else
|
|||
obj-$(CONFIG_PCI) += pci.o
|
||||
endif
|
||||
obj-$(CONFIG_TILE_USB) += usb.o
|
||||
obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
|
||||
obj-y += vdso/
|
||||
|
|
|
@ -14,13 +14,6 @@
|
|||
* Generates definitions from c-type structures used by assembly sources.
|
||||
*/
|
||||
|
||||
#include <linux/kbuild.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Check for compatible compiler early in the build. */
|
||||
#ifdef CONFIG_TILEGX
|
||||
# ifndef __tilegx__
|
||||
|
@ -31,46 +24,61 @@
|
|||
# endif
|
||||
#else
|
||||
# ifdef __tilegx__
|
||||
# error Can not build TILEPro/TILE64 configurations with tilegx compiler
|
||||
# error Can not build TILEPro configurations with tilegx compiler
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include <linux/kbuild.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
void foo(void)
|
||||
{
|
||||
DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \
|
||||
DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET,
|
||||
offsetof(struct single_step_state, buffer));
|
||||
DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \
|
||||
DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET,
|
||||
offsetof(struct single_step_state, flags));
|
||||
DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \
|
||||
DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET,
|
||||
offsetof(struct single_step_state, orig_pc));
|
||||
DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \
|
||||
DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET,
|
||||
offsetof(struct single_step_state, next_pc));
|
||||
DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \
|
||||
DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET,
|
||||
offsetof(struct single_step_state, branch_next_pc));
|
||||
DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \
|
||||
DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET,
|
||||
offsetof(struct single_step_state, update_value));
|
||||
|
||||
DEFINE(THREAD_INFO_TASK_OFFSET, \
|
||||
DEFINE(THREAD_INFO_TASK_OFFSET,
|
||||
offsetof(struct thread_info, task));
|
||||
DEFINE(THREAD_INFO_FLAGS_OFFSET, \
|
||||
DEFINE(THREAD_INFO_FLAGS_OFFSET,
|
||||
offsetof(struct thread_info, flags));
|
||||
DEFINE(THREAD_INFO_STATUS_OFFSET, \
|
||||
DEFINE(THREAD_INFO_STATUS_OFFSET,
|
||||
offsetof(struct thread_info, status));
|
||||
DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \
|
||||
DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
|
||||
offsetof(struct thread_info, homecache_cpu));
|
||||
DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \
|
||||
DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
|
||||
offsetof(struct thread_info, preempt_count));
|
||||
DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
|
||||
offsetof(struct thread_info, step_state));
|
||||
#ifdef __tilegx__
|
||||
DEFINE(THREAD_INFO_UNALIGN_JIT_BASE_OFFSET,
|
||||
offsetof(struct thread_info, unalign_jit_base));
|
||||
DEFINE(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET,
|
||||
offsetof(struct thread_info, unalign_jit_tmp));
|
||||
#endif
|
||||
|
||||
DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
|
||||
offsetof(struct task_struct, thread.ksp));
|
||||
DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
|
||||
offsetof(struct task_struct, thread.pc));
|
||||
|
||||
DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \
|
||||
DEFINE(HV_TOPOLOGY_WIDTH_OFFSET,
|
||||
offsetof(HV_Topology, width));
|
||||
DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \
|
||||
DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET,
|
||||
offsetof(HV_Topology, height));
|
||||
|
||||
DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \
|
||||
DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET,
|
||||
offsetof(irq_cpustat_t, irq_syscall_count));
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <asm/ucontext.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <arch/interrupts.h>
|
||||
|
||||
struct compat_ucontext {
|
||||
|
@ -227,7 +228,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
if (err)
|
||||
goto give_sigsegv;
|
||||
|
||||
restorer = VDSO_BASE;
|
||||
restorer = VDSO_SYM(&__vdso_rt_sigreturn);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ptr_to_compat_reg(ka->sa.sa_restorer);
|
||||
|
||||
|
|
|
@ -23,19 +23,24 @@
|
|||
|
||||
static void early_hv_write(struct console *con, const char *s, unsigned n)
|
||||
{
|
||||
hv_console_write((HV_VirtAddr) s, n);
|
||||
tile_console_write(s, n);
|
||||
|
||||
/*
|
||||
* Convert NL to NLCR (close enough to CRNL) during early boot.
|
||||
* We assume newlines are at the ends of strings, which turns out
|
||||
* to be good enough for early boot console output.
|
||||
*/
|
||||
if (n && s[n-1] == '\n')
|
||||
tile_console_write("\r", 1);
|
||||
}
|
||||
|
||||
static struct console early_hv_console = {
|
||||
.name = "earlyhv",
|
||||
.write = early_hv_write,
|
||||
.flags = CON_PRINTBUFFER,
|
||||
.flags = CON_PRINTBUFFER | CON_BOOT,
|
||||
.index = -1,
|
||||
};
|
||||
|
||||
/* Direct interface for emergencies */
|
||||
static int early_console_complete;
|
||||
|
||||
void early_panic(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
@ -43,51 +48,21 @@ void early_panic(const char *fmt, ...)
|
|||
va_start(ap, fmt);
|
||||
early_printk("Kernel panic - not syncing: ");
|
||||
early_vprintk(fmt, ap);
|
||||
early_console->write(early_console, "\n", 1);
|
||||
early_printk("\n");
|
||||
va_end(ap);
|
||||
dump_stack();
|
||||
hv_halt();
|
||||
}
|
||||
|
||||
static int __initdata keep_early;
|
||||
|
||||
static int __init setup_early_printk(char *str)
|
||||
{
|
||||
if (early_console)
|
||||
return 1;
|
||||
|
||||
if (str != NULL && strncmp(str, "keep", 4) == 0)
|
||||
keep_early = 1;
|
||||
|
||||
early_console = &early_hv_console;
|
||||
register_console(early_console);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init disable_early_printk(void)
|
||||
{
|
||||
early_console_complete = 1;
|
||||
if (!early_console)
|
||||
return;
|
||||
if (!keep_early) {
|
||||
early_printk("disabling early console\n");
|
||||
unregister_console(early_console);
|
||||
early_console = NULL;
|
||||
} else {
|
||||
early_printk("keeping early console\n");
|
||||
}
|
||||
}
|
||||
|
||||
void warn_early_printk(void)
|
||||
{
|
||||
if (early_console_complete || early_console)
|
||||
return;
|
||||
early_printk("\
|
||||
Machine shutting down before console output is fully initialized.\n\
|
||||
You may wish to reboot and add the option 'earlyprintk' to your\n\
|
||||
boot command line to see any diagnostic early console output.\n\
|
||||
");
|
||||
}
|
||||
|
||||
early_param("earlyprintk", setup_early_printk);
|
||||
|
|
|
@ -27,22 +27,6 @@ STD_ENTRY(current_text_addr)
|
|||
{ move r0, lr; jrp lr }
|
||||
STD_ENDPROC(current_text_addr)
|
||||
|
||||
/*
|
||||
* We don't run this function directly, but instead copy it to a page
|
||||
* we map into every user process. See vdso_setup().
|
||||
*
|
||||
* Note that libc has a copy of this function that it uses to compare
|
||||
* against the PC when a stack backtrace ends, so if this code is
|
||||
* changed, the libc implementation(s) should also be updated.
|
||||
*/
|
||||
.pushsection .data
|
||||
ENTRY(__rt_sigreturn)
|
||||
moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
|
||||
swint1
|
||||
ENDPROC(__rt_sigreturn)
|
||||
ENTRY(__rt_sigreturn_end)
|
||||
.popsection
|
||||
|
||||
STD_ENTRY(dump_stack)
|
||||
{ move r2, lr; lnk r1 }
|
||||
{ move r4, r52; addli r1, r1, dump_stack - . }
|
||||
|
|
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE-Gx specific ftrace support
|
||||
*/
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include <arch/opcode.h>
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
static inline tilegx_bundle_bits NOP(void)
|
||||
{
|
||||
return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
||||
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
||||
create_Opcode_X0(RRR_0_OPCODE_X0) |
|
||||
create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
|
||||
create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
|
||||
create_Opcode_X1(RRR_0_OPCODE_X1);
|
||||
}
|
||||
|
||||
static int machine_stopped __read_mostly;
|
||||
|
||||
int ftrace_arch_code_modify_prepare(void)
|
||||
{
|
||||
machine_stopped = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_arch_code_modify_post_process(void)
|
||||
{
|
||||
flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
|
||||
machine_stopped = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
|
||||
* tracer just add one cycle overhead to every kernel function when disabled.
|
||||
*/
|
||||
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
|
||||
bool link)
|
||||
{
|
||||
tilegx_bundle_bits opcode_x0, opcode_x1;
|
||||
long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
|
||||
|
||||
if (link) {
|
||||
/* opcode: jal addr */
|
||||
opcode_x1 =
|
||||
create_Opcode_X1(JUMP_OPCODE_X1) |
|
||||
create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
|
||||
create_JumpOff_X1(pcrel_by_instr);
|
||||
} else {
|
||||
/* opcode: j addr */
|
||||
opcode_x1 =
|
||||
create_Opcode_X1(JUMP_OPCODE_X1) |
|
||||
create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
|
||||
create_JumpOff_X1(pcrel_by_instr);
|
||||
}
|
||||
|
||||
if (addr == FTRACE_ADDR) {
|
||||
/* opcode: or r10, lr, zero */
|
||||
opcode_x0 =
|
||||
create_Dest_X0(10) |
|
||||
create_SrcA_X0(TREG_LR) |
|
||||
create_SrcB_X0(TREG_ZERO) |
|
||||
create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
|
||||
create_Opcode_X0(RRR_0_OPCODE_X0);
|
||||
} else {
|
||||
/* opcode: fnop */
|
||||
opcode_x0 =
|
||||
create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
||||
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
||||
create_Opcode_X0(RRR_0_OPCODE_X0);
|
||||
}
|
||||
|
||||
return opcode_x1 | opcode_x0;
|
||||
}
|
||||
|
||||
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
||||
{
|
||||
return NOP();
|
||||
}
|
||||
|
||||
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
|
||||
{
|
||||
return ftrace_gen_branch(pc, addr, true);
|
||||
}
|
||||
|
||||
static int ftrace_modify_code(unsigned long pc, unsigned long old,
|
||||
unsigned long new)
|
||||
{
|
||||
unsigned long pc_wr;
|
||||
|
||||
/* Check if the address is in kernel text space and module space. */
|
||||
if (!kernel_text_address(pc))
|
||||
return -EINVAL;
|
||||
|
||||
/* Operate on writable kernel text mapping. */
|
||||
pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
|
||||
|
||||
if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
||||
smp_wmb();
|
||||
|
||||
if (!machine_stopped && num_online_cpus() > 1)
|
||||
flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
unsigned long pc, old;
|
||||
unsigned long new;
|
||||
int ret;
|
||||
|
||||
pc = (unsigned long)&ftrace_call;
|
||||
memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
|
||||
new = ftrace_call_replace(pc, (unsigned long)func);
|
||||
|
||||
ret = ftrace_modify_code(pc, old, new);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
unsigned long new, old;
|
||||
unsigned long ip = rec->ip;
|
||||
|
||||
old = ftrace_nop_replace(rec);
|
||||
new = ftrace_call_replace(ip, addr);
|
||||
|
||||
return ftrace_modify_code(rec->ip, old, new);
|
||||
}
|
||||
|
||||
int ftrace_make_nop(struct module *mod,
|
||||
struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
unsigned long ip = rec->ip;
|
||||
unsigned long old;
|
||||
unsigned long new;
|
||||
int ret;
|
||||
|
||||
old = ftrace_call_replace(ip, addr);
|
||||
new = ftrace_nop_replace(rec);
|
||||
ret = ftrace_modify_code(ip, old, new);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __init ftrace_dyn_arch_init(void *data)
|
||||
{
|
||||
*(unsigned long *)data = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
unsigned long frame_pointer)
|
||||
{
|
||||
unsigned long return_hooker = (unsigned long) &return_to_handler;
|
||||
struct ftrace_graph_ent trace;
|
||||
unsigned long old;
|
||||
int err;
|
||||
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
|
||||
old = *parent;
|
||||
*parent = return_hooker;
|
||||
|
||||
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
|
||||
frame_pointer);
|
||||
if (err == -EBUSY) {
|
||||
*parent = old;
|
||||
return;
|
||||
}
|
||||
|
||||
trace.func = self_addr;
|
||||
|
||||
/* Only trace if the calling function expects to */
|
||||
if (!ftrace_graph_entry(&trace)) {
|
||||
current->curr_ret_stack--;
|
||||
*parent = old;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern unsigned long ftrace_graph_call;
|
||||
|
||||
static int __ftrace_modify_caller(unsigned long *callsite,
|
||||
void (*func) (void), bool enable)
|
||||
{
|
||||
unsigned long caller_fn = (unsigned long) func;
|
||||
unsigned long pc = (unsigned long) callsite;
|
||||
unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
|
||||
unsigned long nop = NOP();
|
||||
unsigned long old = enable ? nop : branch;
|
||||
unsigned long new = enable ? branch : nop;
|
||||
|
||||
return ftrace_modify_code(pc, old, new);
|
||||
}
|
||||
|
||||
static int ftrace_modify_graph_caller(bool enable)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __ftrace_modify_caller(&ftrace_graph_call,
|
||||
ftrace_graph_caller,
|
||||
enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
return ftrace_modify_graph_caller(true);
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
return ftrace_modify_graph_caller(false);
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
|
|||
struct hardwall_info *r = info;
|
||||
struct hardwall_type *hwt = r->type;
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
int x = cpu % smp_width;
|
||||
int y = cpu / smp_width;
|
||||
int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
|
||||
int x = cpu_x(cpu);
|
||||
int y = cpu_y(cpu);
|
||||
int bits = 0;
|
||||
if (x == r->ulhc_x)
|
||||
bits |= W_PROTECT;
|
||||
|
@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
|
|||
on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
|
||||
}
|
||||
|
||||
/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
|
||||
void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
|
||||
{
|
||||
struct hardwall_info *rect;
|
||||
|
@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
|
|||
struct siginfo info;
|
||||
int cpu = smp_processor_id();
|
||||
int found_processes;
|
||||
unsigned long flags;
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
irq_enter();
|
||||
|
@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
|
|||
BUG_ON(hwt->disabled);
|
||||
|
||||
/* This tile trapped a network access; find the rectangle. */
|
||||
spin_lock_irqsave(&hwt->lock, flags);
|
||||
spin_lock(&hwt->lock);
|
||||
list_for_each_entry(rect, &hwt->list, list) {
|
||||
if (cpumask_test_cpu(cpu, &rect->cpumask))
|
||||
break;
|
||||
|
@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
|
|||
pr_notice("hardwall: no associated processes!\n");
|
||||
|
||||
done:
|
||||
spin_unlock_irqrestore(&hwt->lock, flags);
|
||||
spin_unlock(&hwt->lock);
|
||||
|
||||
/*
|
||||
* We have to disable firewall interrupts now, or else when we
|
||||
|
@ -540,6 +540,14 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Eliminate cpus that are not part of this Linux client.
|
||||
* Note that this allows for configurations that we might not want to
|
||||
* support, such as one client on every even cpu, another client on
|
||||
* every odd cpu.
|
||||
*/
|
||||
cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
|
||||
|
||||
/* Confirm it doesn't overlap and add it to the list. */
|
||||
spin_lock_irqsave(&hwt->lock, flags);
|
||||
list_for_each_entry(iter, &hwt->list, list) {
|
||||
|
@ -612,7 +620,7 @@ static int hardwall_activate(struct hardwall_info *info)
|
|||
|
||||
/*
|
||||
* Deactivate a task's hardwall. Must hold lock for hardwall_type.
|
||||
* This method may be called from free_task(), so we don't want to
|
||||
* This method may be called from exit_thread(), so we don't want to
|
||||
* rely on too many fields of struct task_struct still being valid.
|
||||
* We assume the cpus_allowed, pid, and comm fields are still valid.
|
||||
*/
|
||||
|
@ -653,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
|
|||
return -EINVAL;
|
||||
|
||||
printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
|
||||
task->pid, task->comm, hwt->name, smp_processor_id());
|
||||
task->pid, task->comm, hwt->name, raw_smp_processor_id());
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -795,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
|
|||
/* Reset UDN coordinates to their standard value */
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int x = cpu % smp_width;
|
||||
unsigned int y = cpu / smp_width;
|
||||
unsigned int x = cpu_x(cpu);
|
||||
unsigned int y = cpu_y(cpu);
|
||||
__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
|
||||
}
|
||||
|
||||
|
|
|
@ -39,12 +39,12 @@ ENTRY(_start)
|
|||
}
|
||||
{
|
||||
moveli r0, _HV_VERSION_OLD_HV_INIT
|
||||
jal hv_init
|
||||
jal _hv_init
|
||||
}
|
||||
/* Get a reasonable default ASID in r0 */
|
||||
{
|
||||
move r0, zero
|
||||
jal hv_inquire_asid
|
||||
jal _hv_inquire_asid
|
||||
}
|
||||
/* Install the default page table */
|
||||
{
|
||||
|
@ -64,7 +64,7 @@ ENTRY(_start)
|
|||
auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
|
||||
}
|
||||
{
|
||||
inv r6
|
||||
finv r6
|
||||
move r1, zero /* high 32 bits of CPA is zero */
|
||||
}
|
||||
{
|
||||
|
@ -73,12 +73,12 @@ ENTRY(_start)
|
|||
}
|
||||
{
|
||||
auli lr, lr, ha16(1f)
|
||||
j hv_install_context
|
||||
j _hv_install_context
|
||||
}
|
||||
1:
|
||||
|
||||
/* Get our processor number and save it away in SAVE_K_0. */
|
||||
jal hv_inquire_topology
|
||||
jal _hv_inquire_topology
|
||||
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
|
||||
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
|
||||
|
||||
|
@ -86,7 +86,7 @@ ENTRY(_start)
|
|||
/*
|
||||
* Load up our per-cpu offset. When the first (master) tile
|
||||
* boots, this value is still zero, so we will load boot_pc
|
||||
* with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
|
||||
* with start_kernel, and boot_sp at the top of init_stack.
|
||||
* The master tile initializes the per-cpu offset array, so that
|
||||
* when subsequent (secondary) tiles boot, they will instead load
|
||||
* from their per-cpu versions of boot_sp and boot_pc.
|
||||
|
@ -126,7 +126,6 @@ ENTRY(_start)
|
|||
lw sp, r1
|
||||
or r4, sp, r4
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
|
||||
addi sp, sp, -STACK_TOP_DELTA
|
||||
{
|
||||
move lr, zero /* stop backtraces in the called function */
|
||||
jr r0
|
||||
|
@ -163,8 +162,8 @@ ENTRY(swapper_pg_dir)
|
|||
.set addr, addr + PGDIR_SIZE
|
||||
.endr
|
||||
|
||||
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
|
||||
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
|
||||
/* The true text VAs are mapped as VA = PA + MEM_SV_START */
|
||||
PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
|
||||
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
|
||||
.org swapper_pg_dir + PGDIR_SIZE
|
||||
END(swapper_pg_dir)
|
||||
|
|
|
@ -25,6 +25,15 @@
|
|||
#include <arch/chip.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/* Extract two 32-bit bit values that were read into one register. */
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
|
||||
#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
|
||||
#else
|
||||
#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
|
||||
#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This module contains the entry code for kernel images. It performs the
|
||||
* minimal setup needed to call the generic C routines.
|
||||
|
@ -46,11 +55,11 @@ ENTRY(_start)
|
|||
movei r2, TILE_CHIP_REV
|
||||
movei r3, KERNEL_PL
|
||||
}
|
||||
jal hv_init
|
||||
jal _hv_init
|
||||
/* Get a reasonable default ASID in r0 */
|
||||
{
|
||||
move r0, zero
|
||||
jal hv_inquire_asid
|
||||
jal _hv_inquire_asid
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -61,7 +70,7 @@ ENTRY(_start)
|
|||
* other CPUs should see a properly-constructed page table.
|
||||
*/
|
||||
{
|
||||
v4int_l r2, zero, r0 /* ASID for hv_install_context */
|
||||
GET_FIRST_INT(r2, r0) /* ASID for hv_install_context */
|
||||
moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
|
||||
}
|
||||
{
|
||||
|
@ -77,7 +86,7 @@ ENTRY(_start)
|
|||
{
|
||||
/* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
|
||||
bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
|
||||
inv r4
|
||||
finv r4
|
||||
}
|
||||
bnez r7, .Lno_write
|
||||
{
|
||||
|
@ -121,29 +130,24 @@ ENTRY(_start)
|
|||
}
|
||||
{
|
||||
moveli r3, CTX_PAGE_FLAG
|
||||
j hv_install_context
|
||||
j _hv_install_context
|
||||
}
|
||||
1:
|
||||
|
||||
/* Install the interrupt base. */
|
||||
moveli r0, hw2_last(MEM_SV_START)
|
||||
shl16insli r0, r0, hw1(MEM_SV_START)
|
||||
shl16insli r0, r0, hw0(MEM_SV_START)
|
||||
moveli r0, hw2_last(intrpt_start)
|
||||
shl16insli r0, r0, hw1(intrpt_start)
|
||||
shl16insli r0, r0, hw0(intrpt_start)
|
||||
mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
|
||||
|
||||
/*
|
||||
* Get our processor number and save it away in SAVE_K_0.
|
||||
* Extract stuff from the topology structure: r4 = y, r6 = x,
|
||||
* r5 = width. FIXME: consider whether we want to just make these
|
||||
* 64-bit values (and if so fix smp_topology write below, too).
|
||||
*/
|
||||
jal hv_inquire_topology
|
||||
/* Get our processor number and save it away in SAVE_K_0. */
|
||||
jal _hv_inquire_topology
|
||||
{
|
||||
v4int_l r5, zero, r1 /* r5 = width */
|
||||
shrui r4, r0, 32 /* r4 = y */
|
||||
GET_FIRST_INT(r5, r1) /* r5 = width */
|
||||
GET_SECOND_INT(r4, r0) /* r4 = y */
|
||||
}
|
||||
{
|
||||
v4int_l r6, zero, r0 /* r6 = x */
|
||||
GET_FIRST_INT(r6, r0) /* r6 = x */
|
||||
mul_lu_lu r4, r4, r5
|
||||
}
|
||||
{
|
||||
|
@ -154,7 +158,7 @@ ENTRY(_start)
|
|||
/*
|
||||
* Load up our per-cpu offset. When the first (master) tile
|
||||
* boots, this value is still zero, so we will load boot_pc
|
||||
* with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
|
||||
* with start_kernel, and boot_sp with at the top of init_stack.
|
||||
* The master tile initializes the per-cpu offset array, so that
|
||||
* when subsequent (secondary) tiles boot, they will instead load
|
||||
* from their per-cpu versions of boot_sp and boot_pc.
|
||||
|
@ -198,9 +202,9 @@ ENTRY(_start)
|
|||
}
|
||||
ld r0, r0
|
||||
ld sp, r1
|
||||
or r4, sp, r4
|
||||
shli r4, r4, CPU_SHIFT
|
||||
bfins r4, sp, 0, CPU_SHIFT-1
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
|
||||
addi sp, sp, -STACK_TOP_DELTA
|
||||
{
|
||||
move lr, zero /* stop backtraces in the called function */
|
||||
jr r0
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
|
||||
.macro gensym sym, val, size
|
||||
.org \val
|
||||
.global _\sym
|
||||
.type _\sym,function
|
||||
_\sym:
|
||||
.size _\sym,\size
|
||||
#ifndef CONFIG_TILE_HVGLUE_TRACE
|
||||
.globl \sym
|
||||
.set \sym,_\sym
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.section .hvglue,"x",@nobits
|
||||
.align 8
|
||||
gensym hv_init, 0x20, 32
|
||||
gensym hv_install_context, 0x40, 32
|
||||
gensym hv_sysconf, 0x60, 32
|
||||
gensym hv_get_rtc, 0x80, 32
|
||||
gensym hv_set_rtc, 0xa0, 32
|
||||
gensym hv_flush_asid, 0xc0, 32
|
||||
gensym hv_flush_page, 0xe0, 32
|
||||
gensym hv_flush_pages, 0x100, 32
|
||||
gensym hv_restart, 0x120, 32
|
||||
gensym hv_halt, 0x140, 32
|
||||
gensym hv_power_off, 0x160, 32
|
||||
gensym hv_inquire_physical, 0x180, 32
|
||||
gensym hv_inquire_memory_controller, 0x1a0, 32
|
||||
gensym hv_inquire_virtual, 0x1c0, 32
|
||||
gensym hv_inquire_asid, 0x1e0, 32
|
||||
gensym hv_nanosleep, 0x200, 32
|
||||
gensym hv_console_read_if_ready, 0x220, 32
|
||||
gensym hv_console_write, 0x240, 32
|
||||
gensym hv_downcall_dispatch, 0x260, 32
|
||||
gensym hv_inquire_topology, 0x280, 32
|
||||
gensym hv_fs_findfile, 0x2a0, 32
|
||||
gensym hv_fs_fstat, 0x2c0, 32
|
||||
gensym hv_fs_pread, 0x2e0, 32
|
||||
gensym hv_physaddr_read64, 0x300, 32
|
||||
gensym hv_physaddr_write64, 0x320, 32
|
||||
gensym hv_get_command_line, 0x340, 32
|
||||
gensym hv_set_caching, 0x360, 32
|
||||
gensym hv_bzero_page, 0x380, 32
|
||||
gensym hv_register_message_state, 0x3a0, 32
|
||||
gensym hv_send_message, 0x3c0, 32
|
||||
gensym hv_receive_message, 0x3e0, 32
|
||||
gensym hv_inquire_context, 0x400, 32
|
||||
gensym hv_start_all_tiles, 0x420, 32
|
||||
gensym hv_dev_open, 0x440, 32
|
||||
gensym hv_dev_close, 0x460, 32
|
||||
gensym hv_dev_pread, 0x480, 32
|
||||
gensym hv_dev_pwrite, 0x4a0, 32
|
||||
gensym hv_dev_poll, 0x4c0, 32
|
||||
gensym hv_dev_poll_cancel, 0x4e0, 32
|
||||
gensym hv_dev_preada, 0x500, 32
|
||||
gensym hv_dev_pwritea, 0x520, 32
|
||||
gensym hv_flush_remote, 0x540, 32
|
||||
gensym hv_console_putc, 0x560, 32
|
||||
gensym hv_inquire_tiles, 0x580, 32
|
||||
gensym hv_confstr, 0x5a0, 32
|
||||
gensym hv_reexec, 0x5c0, 32
|
||||
gensym hv_set_command_line, 0x5e0, 32
|
||||
gensym hv_clear_intr, 0x600, 32
|
||||
gensym hv_enable_intr, 0x620, 32
|
||||
gensym hv_disable_intr, 0x640, 32
|
||||
gensym hv_raise_intr, 0x660, 32
|
||||
gensym hv_trigger_ipi, 0x680, 32
|
||||
gensym hv_store_mapping, 0x6a0, 32
|
||||
gensym hv_inquire_realpa, 0x6c0, 32
|
||||
gensym hv_flush_all, 0x6e0, 32
|
||||
gensym hv_get_ipi_pte, 0x700, 32
|
||||
gensym hv_set_pte_super_shift, 0x720, 32
|
||||
gensym hv_console_set_ipi, 0x7e0, 32
|
||||
gensym hv_glue_internals, 0x800, 30720
|
|
@ -1,59 +0,0 @@
|
|||
/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
|
||||
hv_init = TEXT_OFFSET + 0x10020;
|
||||
hv_install_context = TEXT_OFFSET + 0x10040;
|
||||
hv_sysconf = TEXT_OFFSET + 0x10060;
|
||||
hv_get_rtc = TEXT_OFFSET + 0x10080;
|
||||
hv_set_rtc = TEXT_OFFSET + 0x100a0;
|
||||
hv_flush_asid = TEXT_OFFSET + 0x100c0;
|
||||
hv_flush_page = TEXT_OFFSET + 0x100e0;
|
||||
hv_flush_pages = TEXT_OFFSET + 0x10100;
|
||||
hv_restart = TEXT_OFFSET + 0x10120;
|
||||
hv_halt = TEXT_OFFSET + 0x10140;
|
||||
hv_power_off = TEXT_OFFSET + 0x10160;
|
||||
hv_inquire_physical = TEXT_OFFSET + 0x10180;
|
||||
hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0;
|
||||
hv_inquire_virtual = TEXT_OFFSET + 0x101c0;
|
||||
hv_inquire_asid = TEXT_OFFSET + 0x101e0;
|
||||
hv_nanosleep = TEXT_OFFSET + 0x10200;
|
||||
hv_console_read_if_ready = TEXT_OFFSET + 0x10220;
|
||||
hv_console_write = TEXT_OFFSET + 0x10240;
|
||||
hv_downcall_dispatch = TEXT_OFFSET + 0x10260;
|
||||
hv_inquire_topology = TEXT_OFFSET + 0x10280;
|
||||
hv_fs_findfile = TEXT_OFFSET + 0x102a0;
|
||||
hv_fs_fstat = TEXT_OFFSET + 0x102c0;
|
||||
hv_fs_pread = TEXT_OFFSET + 0x102e0;
|
||||
hv_physaddr_read64 = TEXT_OFFSET + 0x10300;
|
||||
hv_physaddr_write64 = TEXT_OFFSET + 0x10320;
|
||||
hv_get_command_line = TEXT_OFFSET + 0x10340;
|
||||
hv_set_caching = TEXT_OFFSET + 0x10360;
|
||||
hv_bzero_page = TEXT_OFFSET + 0x10380;
|
||||
hv_register_message_state = TEXT_OFFSET + 0x103a0;
|
||||
hv_send_message = TEXT_OFFSET + 0x103c0;
|
||||
hv_receive_message = TEXT_OFFSET + 0x103e0;
|
||||
hv_inquire_context = TEXT_OFFSET + 0x10400;
|
||||
hv_start_all_tiles = TEXT_OFFSET + 0x10420;
|
||||
hv_dev_open = TEXT_OFFSET + 0x10440;
|
||||
hv_dev_close = TEXT_OFFSET + 0x10460;
|
||||
hv_dev_pread = TEXT_OFFSET + 0x10480;
|
||||
hv_dev_pwrite = TEXT_OFFSET + 0x104a0;
|
||||
hv_dev_poll = TEXT_OFFSET + 0x104c0;
|
||||
hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0;
|
||||
hv_dev_preada = TEXT_OFFSET + 0x10500;
|
||||
hv_dev_pwritea = TEXT_OFFSET + 0x10520;
|
||||
hv_flush_remote = TEXT_OFFSET + 0x10540;
|
||||
hv_console_putc = TEXT_OFFSET + 0x10560;
|
||||
hv_inquire_tiles = TEXT_OFFSET + 0x10580;
|
||||
hv_confstr = TEXT_OFFSET + 0x105a0;
|
||||
hv_reexec = TEXT_OFFSET + 0x105c0;
|
||||
hv_set_command_line = TEXT_OFFSET + 0x105e0;
|
||||
hv_clear_intr = TEXT_OFFSET + 0x10600;
|
||||
hv_enable_intr = TEXT_OFFSET + 0x10620;
|
||||
hv_disable_intr = TEXT_OFFSET + 0x10640;
|
||||
hv_raise_intr = TEXT_OFFSET + 0x10660;
|
||||
hv_trigger_ipi = TEXT_OFFSET + 0x10680;
|
||||
hv_store_mapping = TEXT_OFFSET + 0x106a0;
|
||||
hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
|
||||
hv_flush_all = TEXT_OFFSET + 0x106e0;
|
||||
hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
|
||||
hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
|
||||
hv_glue_internals = TEXT_OFFSET + 0x10740;
|
|
@ -0,0 +1,266 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pull in the hypervisor header so we declare all the ABI functions
|
||||
* with the underscore versions, then undef the names so that we can
|
||||
* provide our own wrapper versions.
|
||||
*/
|
||||
#define hv_init _hv_init
|
||||
#define hv_install_context _hv_install_context
|
||||
#define hv_sysconf _hv_sysconf
|
||||
#define hv_get_rtc _hv_get_rtc
|
||||
#define hv_set_rtc _hv_set_rtc
|
||||
#define hv_flush_asid _hv_flush_asid
|
||||
#define hv_flush_page _hv_flush_page
|
||||
#define hv_flush_pages _hv_flush_pages
|
||||
#define hv_restart _hv_restart
|
||||
#define hv_halt _hv_halt
|
||||
#define hv_power_off _hv_power_off
|
||||
#define hv_inquire_physical _hv_inquire_physical
|
||||
#define hv_inquire_memory_controller _hv_inquire_memory_controller
|
||||
#define hv_inquire_virtual _hv_inquire_virtual
|
||||
#define hv_inquire_asid _hv_inquire_asid
|
||||
#define hv_nanosleep _hv_nanosleep
|
||||
#define hv_console_read_if_ready _hv_console_read_if_ready
|
||||
#define hv_console_write _hv_console_write
|
||||
#define hv_downcall_dispatch _hv_downcall_dispatch
|
||||
#define hv_inquire_topology _hv_inquire_topology
|
||||
#define hv_fs_findfile _hv_fs_findfile
|
||||
#define hv_fs_fstat _hv_fs_fstat
|
||||
#define hv_fs_pread _hv_fs_pread
|
||||
#define hv_physaddr_read64 _hv_physaddr_read64
|
||||
#define hv_physaddr_write64 _hv_physaddr_write64
|
||||
#define hv_get_command_line _hv_get_command_line
|
||||
#define hv_set_caching _hv_set_caching
|
||||
#define hv_bzero_page _hv_bzero_page
|
||||
#define hv_register_message_state _hv_register_message_state
|
||||
#define hv_send_message _hv_send_message
|
||||
#define hv_receive_message _hv_receive_message
|
||||
#define hv_inquire_context _hv_inquire_context
|
||||
#define hv_start_all_tiles _hv_start_all_tiles
|
||||
#define hv_dev_open _hv_dev_open
|
||||
#define hv_dev_close _hv_dev_close
|
||||
#define hv_dev_pread _hv_dev_pread
|
||||
#define hv_dev_pwrite _hv_dev_pwrite
|
||||
#define hv_dev_poll _hv_dev_poll
|
||||
#define hv_dev_poll_cancel _hv_dev_poll_cancel
|
||||
#define hv_dev_preada _hv_dev_preada
|
||||
#define hv_dev_pwritea _hv_dev_pwritea
|
||||
#define hv_flush_remote _hv_flush_remote
|
||||
#define hv_console_putc _hv_console_putc
|
||||
#define hv_inquire_tiles _hv_inquire_tiles
|
||||
#define hv_confstr _hv_confstr
|
||||
#define hv_reexec _hv_reexec
|
||||
#define hv_set_command_line _hv_set_command_line
|
||||
#define hv_clear_intr _hv_clear_intr
|
||||
#define hv_enable_intr _hv_enable_intr
|
||||
#define hv_disable_intr _hv_disable_intr
|
||||
#define hv_raise_intr _hv_raise_intr
|
||||
#define hv_trigger_ipi _hv_trigger_ipi
|
||||
#define hv_store_mapping _hv_store_mapping
|
||||
#define hv_inquire_realpa _hv_inquire_realpa
|
||||
#define hv_flush_all _hv_flush_all
|
||||
#define hv_get_ipi_pte _hv_get_ipi_pte
|
||||
#define hv_set_pte_super_shift _hv_set_pte_super_shift
|
||||
#define hv_console_set_ipi _hv_console_set_ipi
|
||||
#include <hv/hypervisor.h>
|
||||
#undef hv_init
|
||||
#undef hv_install_context
|
||||
#undef hv_sysconf
|
||||
#undef hv_get_rtc
|
||||
#undef hv_set_rtc
|
||||
#undef hv_flush_asid
|
||||
#undef hv_flush_page
|
||||
#undef hv_flush_pages
|
||||
#undef hv_restart
|
||||
#undef hv_halt
|
||||
#undef hv_power_off
|
||||
#undef hv_inquire_physical
|
||||
#undef hv_inquire_memory_controller
|
||||
#undef hv_inquire_virtual
|
||||
#undef hv_inquire_asid
|
||||
#undef hv_nanosleep
|
||||
#undef hv_console_read_if_ready
|
||||
#undef hv_console_write
|
||||
#undef hv_downcall_dispatch
|
||||
#undef hv_inquire_topology
|
||||
#undef hv_fs_findfile
|
||||
#undef hv_fs_fstat
|
||||
#undef hv_fs_pread
|
||||
#undef hv_physaddr_read64
|
||||
#undef hv_physaddr_write64
|
||||
#undef hv_get_command_line
|
||||
#undef hv_set_caching
|
||||
#undef hv_bzero_page
|
||||
#undef hv_register_message_state
|
||||
#undef hv_send_message
|
||||
#undef hv_receive_message
|
||||
#undef hv_inquire_context
|
||||
#undef hv_start_all_tiles
|
||||
#undef hv_dev_open
|
||||
#undef hv_dev_close
|
||||
#undef hv_dev_pread
|
||||
#undef hv_dev_pwrite
|
||||
#undef hv_dev_poll
|
||||
#undef hv_dev_poll_cancel
|
||||
#undef hv_dev_preada
|
||||
#undef hv_dev_pwritea
|
||||
#undef hv_flush_remote
|
||||
#undef hv_console_putc
|
||||
#undef hv_inquire_tiles
|
||||
#undef hv_confstr
|
||||
#undef hv_reexec
|
||||
#undef hv_set_command_line
|
||||
#undef hv_clear_intr
|
||||
#undef hv_enable_intr
|
||||
#undef hv_disable_intr
|
||||
#undef hv_raise_intr
|
||||
#undef hv_trigger_ipi
|
||||
#undef hv_store_mapping
|
||||
#undef hv_inquire_realpa
|
||||
#undef hv_flush_all
|
||||
#undef hv_get_ipi_pte
|
||||
#undef hv_set_pte_super_shift
|
||||
#undef hv_console_set_ipi
|
||||
|
||||
/*
|
||||
* Provide macros based on <linux/syscalls.h> to provide a wrapper
|
||||
* function that invokes the same function with an underscore prefix.
|
||||
* We can't use the existing __SC_xxx macros because we need to
|
||||
* support up to nine arguments rather than up to six, and also this
|
||||
* way the file stands alone from possible changes in the
|
||||
* implementation of <linux/syscalls.h>.
|
||||
*/
|
||||
#define HV_WRAP0(type, name) \
|
||||
type name(void); \
|
||||
type name(void) \
|
||||
{ \
|
||||
return _##name(); \
|
||||
}
|
||||
#define __HV_DECL1(t1, a1) t1 a1
|
||||
#define __HV_DECL2(t2, a2, ...) t2 a2, __HV_DECL1(__VA_ARGS__)
|
||||
#define __HV_DECL3(t3, a3, ...) t3 a3, __HV_DECL2(__VA_ARGS__)
|
||||
#define __HV_DECL4(t4, a4, ...) t4 a4, __HV_DECL3(__VA_ARGS__)
|
||||
#define __HV_DECL5(t5, a5, ...) t5 a5, __HV_DECL4(__VA_ARGS__)
|
||||
#define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__)
|
||||
#define __HV_DECL7(t7, a7, ...) t7 a7, __HV_DECL6(__VA_ARGS__)
|
||||
#define __HV_DECL8(t8, a8, ...) t8 a8, __HV_DECL7(__VA_ARGS__)
|
||||
#define __HV_DECL9(t9, a9, ...) t9 a9, __HV_DECL8(__VA_ARGS__)
|
||||
#define __HV_PASS1(t1, a1) a1
|
||||
#define __HV_PASS2(t2, a2, ...) a2, __HV_PASS1(__VA_ARGS__)
|
||||
#define __HV_PASS3(t3, a3, ...) a3, __HV_PASS2(__VA_ARGS__)
|
||||
#define __HV_PASS4(t4, a4, ...) a4, __HV_PASS3(__VA_ARGS__)
|
||||
#define __HV_PASS5(t5, a5, ...) a5, __HV_PASS4(__VA_ARGS__)
|
||||
#define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__)
|
||||
#define __HV_PASS7(t7, a7, ...) a7, __HV_PASS6(__VA_ARGS__)
|
||||
#define __HV_PASS8(t8, a8, ...) a8, __HV_PASS7(__VA_ARGS__)
|
||||
#define __HV_PASS9(t9, a9, ...) a9, __HV_PASS8(__VA_ARGS__)
|
||||
#define HV_WRAPx(x, type, name, ...) \
|
||||
type name(__HV_DECL##x(__VA_ARGS__)); \
|
||||
type name(__HV_DECL##x(__VA_ARGS__)) \
|
||||
{ \
|
||||
return _##name(__HV_PASS##x(__VA_ARGS__)); \
|
||||
}
|
||||
#define HV_WRAP1(type, name, ...) HV_WRAPx(1, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP2(type, name, ...) HV_WRAPx(2, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP3(type, name, ...) HV_WRAPx(3, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP4(type, name, ...) HV_WRAPx(4, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP5(type, name, ...) HV_WRAPx(5, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP6(type, name, ...) HV_WRAPx(6, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP7(type, name, ...) HV_WRAPx(7, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP8(type, name, ...) HV_WRAPx(8, type, name, __VA_ARGS__)
|
||||
#define HV_WRAP9(type, name, ...) HV_WRAPx(9, type, name, __VA_ARGS__)
|
||||
|
||||
/* List all the hypervisor API functions. */
|
||||
HV_WRAP4(void, hv_init, HV_VersionNumber, interface_version_number,
|
||||
int, chip_num, int, chip_rev_num, int, client_pl)
|
||||
HV_WRAP1(long, hv_sysconf, HV_SysconfQuery, query)
|
||||
HV_WRAP3(int, hv_confstr, HV_ConfstrQuery, query, HV_VirtAddr, buf, int, len)
|
||||
#if CHIP_HAS_IPI()
|
||||
HV_WRAP3(int, hv_get_ipi_pte, HV_Coord, tile, int, pl, HV_PTE*, pte)
|
||||
HV_WRAP3(int, hv_console_set_ipi, int, ipi, int, event, HV_Coord, coord);
|
||||
#else
|
||||
HV_WRAP1(void, hv_enable_intr, HV_IntrMask, enab_mask)
|
||||
HV_WRAP1(void, hv_disable_intr, HV_IntrMask, disab_mask)
|
||||
HV_WRAP1(void, hv_clear_intr, HV_IntrMask, clear_mask)
|
||||
HV_WRAP1(void, hv_raise_intr, HV_IntrMask, raise_mask)
|
||||
HV_WRAP2(HV_Errno, hv_trigger_ipi, HV_Coord, tile, int, interrupt)
|
||||
#endif /* !CHIP_HAS_IPI() */
|
||||
HV_WRAP3(int, hv_store_mapping, HV_VirtAddr, va, unsigned int, len,
|
||||
HV_PhysAddr, pa)
|
||||
HV_WRAP2(HV_PhysAddr, hv_inquire_realpa, HV_PhysAddr, cpa, unsigned int, len)
|
||||
HV_WRAP0(HV_RTCTime, hv_get_rtc)
|
||||
HV_WRAP1(void, hv_set_rtc, HV_RTCTime, time)
|
||||
HV_WRAP4(int, hv_install_context, HV_PhysAddr, page_table, HV_PTE, access,
|
||||
HV_ASID, asid, __hv32, flags)
|
||||
HV_WRAP2(int, hv_set_pte_super_shift, int, level, int, log2_count)
|
||||
HV_WRAP0(HV_Context, hv_inquire_context)
|
||||
HV_WRAP1(int, hv_flush_asid, HV_ASID, asid)
|
||||
HV_WRAP2(int, hv_flush_page, HV_VirtAddr, address, HV_PageSize, page_size)
|
||||
HV_WRAP3(int, hv_flush_pages, HV_VirtAddr, start, HV_PageSize, page_size,
|
||||
unsigned long, size)
|
||||
HV_WRAP1(int, hv_flush_all, int, preserve_global)
|
||||
HV_WRAP2(void, hv_restart, HV_VirtAddr, cmd, HV_VirtAddr, args)
|
||||
HV_WRAP0(void, hv_halt)
|
||||
HV_WRAP0(void, hv_power_off)
|
||||
HV_WRAP1(int, hv_reexec, HV_PhysAddr, entry)
|
||||
HV_WRAP0(HV_Topology, hv_inquire_topology)
|
||||
HV_WRAP3(HV_Errno, hv_inquire_tiles, HV_InqTileSet, set, HV_VirtAddr, cpumask,
|
||||
int, length)
|
||||
HV_WRAP1(HV_PhysAddrRange, hv_inquire_physical, int, idx)
|
||||
HV_WRAP2(HV_MemoryControllerInfo, hv_inquire_memory_controller, HV_Coord, coord,
|
||||
int, controller)
|
||||
HV_WRAP1(HV_VirtAddrRange, hv_inquire_virtual, int, idx)
|
||||
HV_WRAP1(HV_ASIDRange, hv_inquire_asid, int, idx)
|
||||
HV_WRAP1(void, hv_nanosleep, int, nanosecs)
|
||||
HV_WRAP0(int, hv_console_read_if_ready)
|
||||
HV_WRAP1(void, hv_console_putc, int, byte)
|
||||
HV_WRAP2(int, hv_console_write, HV_VirtAddr, bytes, int, len)
|
||||
HV_WRAP0(void, hv_downcall_dispatch)
|
||||
HV_WRAP1(int, hv_fs_findfile, HV_VirtAddr, filename)
|
||||
HV_WRAP1(HV_FS_StatInfo, hv_fs_fstat, int, inode)
|
||||
HV_WRAP4(int, hv_fs_pread, int, inode, HV_VirtAddr, buf,
|
||||
int, length, int, offset)
|
||||
HV_WRAP2(unsigned long long, hv_physaddr_read64, HV_PhysAddr, addr,
|
||||
HV_PTE, access)
|
||||
HV_WRAP3(void, hv_physaddr_write64, HV_PhysAddr, addr, HV_PTE, access,
|
||||
unsigned long long, val)
|
||||
HV_WRAP2(int, hv_get_command_line, HV_VirtAddr, buf, int, length)
|
||||
HV_WRAP2(HV_Errno, hv_set_command_line, HV_VirtAddr, buf, int, length)
|
||||
HV_WRAP1(void, hv_set_caching, unsigned long, bitmask)
|
||||
HV_WRAP2(void, hv_bzero_page, HV_VirtAddr, va, unsigned int, size)
|
||||
HV_WRAP1(HV_Errno, hv_register_message_state, HV_MsgState*, msgstate)
|
||||
HV_WRAP4(int, hv_send_message, HV_Recipient *, recips, int, nrecip,
|
||||
HV_VirtAddr, buf, int, buflen)
|
||||
HV_WRAP3(HV_RcvMsgInfo, hv_receive_message, HV_MsgState, msgstate,
|
||||
HV_VirtAddr, buf, int, buflen)
|
||||
HV_WRAP0(void, hv_start_all_tiles)
|
||||
HV_WRAP2(int, hv_dev_open, HV_VirtAddr, name, __hv32, flags)
|
||||
HV_WRAP1(int, hv_dev_close, int, devhdl)
|
||||
HV_WRAP5(int, hv_dev_pread, int, devhdl, __hv32, flags, HV_VirtAddr, va,
|
||||
__hv32, len, __hv64, offset)
|
||||
HV_WRAP5(int, hv_dev_pwrite, int, devhdl, __hv32, flags, HV_VirtAddr, va,
|
||||
__hv32, len, __hv64, offset)
|
||||
HV_WRAP3(int, hv_dev_poll, int, devhdl, __hv32, events, HV_IntArg, intarg)
|
||||
HV_WRAP1(int, hv_dev_poll_cancel, int, devhdl)
|
||||
HV_WRAP6(int, hv_dev_preada, int, devhdl, __hv32, flags, __hv32, sgl_len,
|
||||
HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
|
||||
HV_WRAP6(int, hv_dev_pwritea, int, devhdl, __hv32, flags, __hv32, sgl_len,
|
||||
HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
|
||||
HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
|
||||
unsigned long, cache_control, unsigned long*, cache_cpumask,
|
||||
HV_VirtAddr, tlb_va, unsigned long, tlb_length,
|
||||
unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
|
||||
HV_Remote_ASID*, asids, int, asidcount)
|
|
@ -28,20 +28,10 @@
|
|||
#include <arch/interrupts.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# error "No support for kernel preemption currently"
|
||||
#endif
|
||||
|
||||
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
|
||||
|
||||
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
|
||||
|
||||
#if !CHIP_HAS_WH64()
|
||||
/* By making this an empty macro, we can use wh64 in the code. */
|
||||
.macro wh64 reg
|
||||
.endm
|
||||
#endif
|
||||
|
||||
.macro push_reg reg, ptr=sp, delta=-4
|
||||
{
|
||||
sw \ptr, \reg
|
||||
|
@ -189,7 +179,7 @@ intvec_\vecname:
|
|||
* point sp at the top aligned address on the actual stack page.
|
||||
*/
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_0
|
||||
mm r0, r0, zero, LOG2_THREAD_SIZE, 31
|
||||
mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
|
||||
|
||||
0:
|
||||
/*
|
||||
|
@ -207,6 +197,9 @@ intvec_\vecname:
|
|||
* cache line 1: r14...r29
|
||||
* cache line 0: 2 x frame, r0..r13
|
||||
*/
|
||||
#if STACK_TOP_DELTA != 64
|
||||
#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
|
||||
#endif
|
||||
andi r0, r0, -64
|
||||
|
||||
/*
|
||||
|
@ -326,18 +319,14 @@ intvec_\vecname:
|
|||
movei r3, -1 /* not used, but set for consistency */
|
||||
}
|
||||
.else
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
.ifc \c_routine, op_handle_aux_perf_interrupt
|
||||
{
|
||||
mfspr r2, AUX_PERF_COUNT_STS
|
||||
movei r3, -1 /* not used, but set for consistency */
|
||||
}
|
||||
.else
|
||||
#endif
|
||||
movei r3, 0
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
.endif
|
||||
#endif
|
||||
.endif
|
||||
.endif
|
||||
.endif
|
||||
|
@ -354,7 +343,7 @@ intvec_\vecname:
|
|||
#ifdef __COLLECT_LINKER_FEEDBACK__
|
||||
.pushsection .text.intvec_feedback,"ax"
|
||||
.org (\vecnum << 5)
|
||||
FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
|
||||
FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
|
||||
jrp lr
|
||||
.popsection
|
||||
#endif
|
||||
|
@ -468,7 +457,7 @@ intvec_\vecname:
|
|||
}
|
||||
{
|
||||
auli r21, r21, ha16(__per_cpu_offset)
|
||||
mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1
|
||||
mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
|
||||
}
|
||||
s2a r20, r20, r21
|
||||
lw tp, r20
|
||||
|
@ -562,7 +551,6 @@ intvec_\vecname:
|
|||
.endif
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
||||
|
||||
#if CHIP_HAS_WH64()
|
||||
/*
|
||||
* Prepare the first 256 stack bytes to be rapidly accessible
|
||||
* without having to fetch the background data. We don't really
|
||||
|
@ -583,7 +571,6 @@ intvec_\vecname:
|
|||
addi r52, r52, -64
|
||||
}
|
||||
wh64 r52
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
.ifnc \function,handle_nmi
|
||||
|
@ -762,7 +749,7 @@ intvec_\vecname:
|
|||
.macro dc_dispatch vecnum, vecname
|
||||
.org (\vecnum << 8)
|
||||
intvec_\vecname:
|
||||
j hv_downcall_dispatch
|
||||
j _hv_downcall_dispatch
|
||||
ENDPROC(intvec_\vecname)
|
||||
.endm
|
||||
|
||||
|
@ -812,17 +799,34 @@ STD_ENTRY(interrupt_return)
|
|||
}
|
||||
lw r29, r29
|
||||
andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
bzt r29, .Lresume_userspace
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Returning to kernel space. Check if we need preemption. */
|
||||
GET_THREAD_INFO(r29)
|
||||
addli r28, r29, THREAD_INFO_FLAGS_OFFSET
|
||||
{
|
||||
bzt r29, .Lresume_userspace
|
||||
PTREGS_PTR(r29, PTREGS_OFFSET_PC)
|
||||
lw r28, r28
|
||||
addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
|
||||
}
|
||||
{
|
||||
andi r28, r28, _TIF_NEED_RESCHED
|
||||
lw r29, r29
|
||||
}
|
||||
bzt r28, 1f
|
||||
bnz r29, 1f
|
||||
jal preempt_schedule_irq
|
||||
FEEDBACK_REENTER(interrupt_return)
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
|
||||
{
|
||||
lw r28, r29
|
||||
PTREGS_PTR(r29, PTREGS_OFFSET_PC)
|
||||
moveli r27, lo16(_cpu_idle_nap)
|
||||
}
|
||||
{
|
||||
lw r28, r29
|
||||
auli r27, r27, ha16(_cpu_idle_nap)
|
||||
}
|
||||
{
|
||||
|
@ -1420,7 +1424,6 @@ handle_ill:
|
|||
{
|
||||
lw r0, r0 /* indirect thru thread_info to get task_info*/
|
||||
addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
|
||||
move r2, zero /* load error code into r2 */
|
||||
}
|
||||
|
||||
jal send_sigtrap /* issue a SIGTRAP */
|
||||
|
@ -1518,12 +1521,10 @@ STD_ENTRY(_sys_clone)
|
|||
__HEAD
|
||||
.align 64
|
||||
/* Align much later jump on the start of a cache line. */
|
||||
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
nop
|
||||
#if PAGE_SIZE >= 0x10000
|
||||
nop
|
||||
#endif
|
||||
#endif
|
||||
ENTRY(sys_cmpxchg)
|
||||
|
||||
/*
|
||||
|
@ -1557,45 +1558,6 @@ ENTRY(sys_cmpxchg)
|
|||
# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
|
||||
#endif
|
||||
|
||||
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
{
|
||||
/* Check for unaligned input. */
|
||||
bnz sp, .Lcmpxchg_badaddr
|
||||
mm r25, r0, zero, 3, PAGE_SHIFT-1
|
||||
}
|
||||
{
|
||||
crc32_32 r25, zero, r25
|
||||
moveli r21, lo16(atomic_lock_ptr)
|
||||
}
|
||||
{
|
||||
auli r21, r21, ha16(atomic_lock_ptr)
|
||||
auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
|
||||
}
|
||||
{
|
||||
shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
|
||||
slt_u r23, r0, r23
|
||||
lw r26, r0 /* see comment in the "#else" for the "lw r26". */
|
||||
}
|
||||
{
|
||||
s2a r21, r20, r21
|
||||
bbns r23, .Lcmpxchg_badaddr
|
||||
}
|
||||
{
|
||||
lw r21, r21
|
||||
seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
|
||||
andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
|
||||
}
|
||||
{
|
||||
/* Branch away at this point if we're doing a 64-bit cmpxchg. */
|
||||
bbs r23, .Lcmpxchg64
|
||||
andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
|
||||
}
|
||||
{
|
||||
s2a ATOMIC_LOCK_REG_NAME, r25, r21
|
||||
j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
|
||||
}
|
||||
|
||||
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
{
|
||||
/* Check for unaligned input. */
|
||||
bnz sp, .Lcmpxchg_badaddr
|
||||
|
@ -1609,7 +1571,7 @@ ENTRY(sys_cmpxchg)
|
|||
* Because of C pointer arithmetic, we want to compute this:
|
||||
*
|
||||
* ((char*)atomic_locks +
|
||||
* (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2))
|
||||
* (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
|
||||
*
|
||||
* Instead of two shifts we just ">> 1", and use 'mm'
|
||||
* to ignore the low and high bits we don't want.
|
||||
|
@ -1620,12 +1582,9 @@ ENTRY(sys_cmpxchg)
|
|||
|
||||
/*
|
||||
* Ensure that the TLB is loaded before we take out the lock.
|
||||
* On tilepro, this will start fetching the value all the way
|
||||
* into our L1 as well (and if it gets modified before we
|
||||
* grab the lock, it will be invalidated from our cache
|
||||
* before we reload it). On tile64, we'll start fetching it
|
||||
* into our L1 if we're the home, and if we're not, we'll
|
||||
* still at least start fetching it into the home's L2.
|
||||
* This will start fetching the value all the way into our L1
|
||||
* as well (and if it gets modified before we grab the lock,
|
||||
* it will be invalidated from our cache before we reload it).
|
||||
*/
|
||||
lw r26, r0
|
||||
}
|
||||
|
@ -1668,8 +1627,6 @@ ENTRY(sys_cmpxchg)
|
|||
j .Lcmpxchg32_tns
|
||||
}
|
||||
|
||||
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
|
||||
|
||||
/* Symbol for do_page_fault_ics() to use to compare against the PC. */
|
||||
.global __sys_cmpxchg_grab_lock
|
||||
__sys_cmpxchg_grab_lock:
|
||||
|
@ -1807,9 +1764,6 @@ __sys_cmpxchg_grab_lock:
|
|||
.align 64
|
||||
.Lcmpxchg64:
|
||||
{
|
||||
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
|
||||
s2a ATOMIC_LOCK_REG_NAME, r25, r21
|
||||
#endif
|
||||
bzt r23, .Lcmpxchg64_tns
|
||||
}
|
||||
j .Lcmpxchg_badaddr
|
||||
|
@ -1875,8 +1829,8 @@ int_unalign:
|
|||
push_extra_callee_saves r0
|
||||
j do_trap
|
||||
|
||||
/* Include .intrpt1 array of interrupt vectors */
|
||||
.section ".intrpt1", "ax"
|
||||
/* Include .intrpt array of interrupt vectors */
|
||||
.section ".intrpt", "ax"
|
||||
|
||||
#define op_handle_perf_interrupt bad_intr
|
||||
#define op_handle_aux_perf_interrupt bad_intr
|
||||
|
@ -1944,10 +1898,8 @@ int_unalign:
|
|||
do_page_fault
|
||||
int_hand INT_SN_CPL, SN_CPL, bad_intr
|
||||
int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
|
||||
op_handle_aux_perf_interrupt, handle_nmi
|
||||
#endif
|
||||
|
||||
/* Synthetic interrupt delivered only by the simulator */
|
||||
int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
|
||||
|
|
|
@ -17,25 +17,33 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/signal.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/abi.h>
|
||||
#include <arch/interrupts.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# error "No support for kernel preemption currently"
|
||||
#endif
|
||||
|
||||
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
|
||||
|
||||
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
|
||||
|
||||
#if CONFIG_KERNEL_PL == 1 || CONFIG_KERNEL_PL == 2
|
||||
/*
|
||||
* Set "result" non-zero if ex1 holds the PL of the kernel
|
||||
* (with or without ICS being set). Note this works only
|
||||
* because we never find the PL at level 3.
|
||||
*/
|
||||
# define IS_KERNEL_EX1(result, ex1) andi result, ex1, CONFIG_KERNEL_PL
|
||||
#else
|
||||
# error Recode IS_KERNEL_EX1 for CONFIG_KERNEL_PL
|
||||
#endif
|
||||
|
||||
.macro push_reg reg, ptr=sp, delta=-8
|
||||
{
|
||||
|
@ -98,6 +106,185 @@
|
|||
}
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Unalign data exception fast handling: In order to handle
|
||||
* unaligned data access, a fast JIT version is generated and stored
|
||||
* in a specific area in user space. We first need to do a quick poke
|
||||
* to see if the JIT is available. We use certain bits in the fault
|
||||
* PC (3 to 9 is used for 16KB page size) as index to address the JIT
|
||||
* code area. The first 64bit word is the fault PC, and the 2nd one is
|
||||
* the fault bundle itself. If these 2 words both match, then we
|
||||
* directly "iret" to JIT code. If not, a slow path is invoked to
|
||||
* generate new JIT code. Note: the current JIT code WILL be
|
||||
* overwritten if it existed. So, ideally we can handle 128 unalign
|
||||
* fixups via JIT. For lookup efficiency and to effectively support
|
||||
* tight loops with multiple unaligned reference, a simple
|
||||
* direct-mapped cache is used.
|
||||
*
|
||||
* SPR_EX_CONTEXT_K_0 is modified to return to JIT code.
|
||||
* SPR_EX_CONTEXT_K_1 has ICS set.
|
||||
* SPR_EX_CONTEXT_0_0 is setup to user program's next PC.
|
||||
* SPR_EX_CONTEXT_0_1 = 0.
|
||||
*/
|
||||
.macro int_hand_unalign_fast vecnum, vecname
|
||||
.org (\vecnum << 8)
|
||||
intvec_\vecname:
|
||||
/* Put r3 in SPR_SYSTEM_SAVE_K_1. */
|
||||
mtspr SPR_SYSTEM_SAVE_K_1, r3
|
||||
|
||||
mfspr r3, SPR_EX_CONTEXT_K_1
|
||||
/*
|
||||
* Examine if exception comes from user without ICS set.
|
||||
* If not, just go directly to the slow path.
|
||||
*/
|
||||
bnez r3, hand_unalign_slow_nonuser
|
||||
|
||||
mfspr r3, SPR_SYSTEM_SAVE_K_0
|
||||
|
||||
/* Get &thread_info->unalign_jit_tmp[0] in r3. */
|
||||
bfexts r3, r3, 0, CPU_SHIFT-1
|
||||
mm r3, zero, LOG2_THREAD_SIZE, 63
|
||||
addli r3, r3, THREAD_INFO_UNALIGN_JIT_TMP_OFFSET
|
||||
|
||||
/*
|
||||
* Save r0, r1, r2 into thread_info array r3 points to
|
||||
* from low to high memory in order.
|
||||
*/
|
||||
st_add r3, r0, 8
|
||||
st_add r3, r1, 8
|
||||
{
|
||||
st_add r3, r2, 8
|
||||
andi r2, sp, 7
|
||||
}
|
||||
|
||||
/* Save stored r3 value so we can revert it on a page fault. */
|
||||
mfspr r1, SPR_SYSTEM_SAVE_K_1
|
||||
st r3, r1
|
||||
|
||||
{
|
||||
/* Generate a SIGBUS if sp is not 8-byte aligned. */
|
||||
bnez r2, hand_unalign_slow_badsp
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the thread_info in r0; load r1 with pc. Set the low bit of sp
|
||||
* as an indicator to the page fault code in case we fault.
|
||||
*/
|
||||
{
|
||||
ori sp, sp, 1
|
||||
mfspr r1, SPR_EX_CONTEXT_K_0
|
||||
}
|
||||
|
||||
/* Add the jit_info offset in thread_info; extract r1 [3:9] into r2. */
|
||||
{
|
||||
addli r0, r3, THREAD_INFO_UNALIGN_JIT_BASE_OFFSET - \
|
||||
(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET + (3 * 8))
|
||||
bfextu r2, r1, 3, (2 + PAGE_SHIFT - UNALIGN_JIT_SHIFT)
|
||||
}
|
||||
|
||||
/* Load the jit_info; multiply r2 by 128. */
|
||||
{
|
||||
ld r0, r0
|
||||
shli r2, r2, UNALIGN_JIT_SHIFT
|
||||
}
|
||||
|
||||
/*
|
||||
* If r0 is NULL, the JIT page is not mapped, so go to slow path;
|
||||
* add offset r2 to r0 at the same time.
|
||||
*/
|
||||
{
|
||||
beqz r0, hand_unalign_slow
|
||||
add r2, r0, r2
|
||||
}
|
||||
|
||||
/*
|
||||
* We are loading from userspace (both the JIT info PC and
|
||||
* instruction word, and the instruction word we executed)
|
||||
* and since either could fault while holding the interrupt
|
||||
* critical section, we must tag this region and check it in
|
||||
* do_page_fault() to handle it properly.
|
||||
*/
|
||||
ENTRY(__start_unalign_asm_code)
|
||||
|
||||
/* Load first word of JIT in r0 and increment r2 by 8. */
|
||||
ld_add r0, r2, 8
|
||||
|
||||
/*
|
||||
* Compare the PC with the 1st word in JIT; load the fault bundle
|
||||
* into r1.
|
||||
*/
|
||||
{
|
||||
cmpeq r0, r0, r1
|
||||
ld r1, r1
|
||||
}
|
||||
|
||||
/* Go to slow path if PC doesn't match. */
|
||||
beqz r0, hand_unalign_slow
|
||||
|
||||
/*
|
||||
* Load the 2nd word of JIT, which is supposed to be the fault
|
||||
* bundle for a cache hit. Increment r2; after this bundle r2 will
|
||||
* point to the potential start of the JIT code we want to run.
|
||||
*/
|
||||
ld_add r0, r2, 8
|
||||
|
||||
/* No further accesses to userspace are done after this point. */
|
||||
ENTRY(__end_unalign_asm_code)
|
||||
|
||||
/* Compare the real bundle with what is saved in the JIT area. */
|
||||
{
|
||||
cmpeq r0, r1, r0
|
||||
mtspr SPR_EX_CONTEXT_0_1, zero
|
||||
}
|
||||
|
||||
/* Go to slow path if the fault bundle does not match. */
|
||||
beqz r0, hand_unalign_slow
|
||||
|
||||
/*
|
||||
* A cache hit is found.
|
||||
* r2 points to start of JIT code (3rd word).
|
||||
* r0 is the fault pc.
|
||||
* r1 is the fault bundle.
|
||||
* Reset the low bit of sp.
|
||||
*/
|
||||
{
|
||||
mfspr r0, SPR_EX_CONTEXT_K_0
|
||||
andi sp, sp, ~1
|
||||
}
|
||||
|
||||
/* Write r2 into EX_CONTEXT_K_0 and increment PC. */
|
||||
{
|
||||
mtspr SPR_EX_CONTEXT_K_0, r2
|
||||
addi r0, r0, 8
|
||||
}
|
||||
|
||||
/*
|
||||
* Set ICS on kernel EX_CONTEXT_K_1 in order to "iret" to
|
||||
* user with ICS set. This way, if the JIT fixup causes another
|
||||
* unalign exception (which shouldn't be possible) the user
|
||||
* process will be terminated with SIGBUS. Also, our fixup will
|
||||
* run without interleaving with external interrupts.
|
||||
* Each fixup is at most 14 bundles, so it won't hold ICS for long.
|
||||
*/
|
||||
{
|
||||
movei r1, PL_ICS_EX1(USER_PL, 1)
|
||||
mtspr SPR_EX_CONTEXT_0_0, r0
|
||||
}
|
||||
|
||||
{
|
||||
mtspr SPR_EX_CONTEXT_K_1, r1
|
||||
addi r3, r3, -(3 * 8)
|
||||
}
|
||||
|
||||
/* Restore r0..r3. */
|
||||
ld_add r0, r3, 8
|
||||
ld_add r1, r3, 8
|
||||
ld_add r2, r3, 8
|
||||
ld r3, r3
|
||||
|
||||
iret
|
||||
ENDPROC(intvec_\vecname)
|
||||
.endm
|
||||
|
||||
#ifdef __COLLECT_LINKER_FEEDBACK__
|
||||
.pushsection .text.intvec_feedback,"ax"
|
||||
|
@ -118,15 +305,21 @@ intvec_feedback:
|
|||
* The "processing" argument specifies the code for processing
|
||||
* the interrupt. Defaults to "handle_interrupt".
|
||||
*/
|
||||
.macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
|
||||
.org (\vecnum << 8)
|
||||
.macro __int_hand vecnum, vecname, c_routine,processing=handle_interrupt
|
||||
intvec_\vecname:
|
||||
/* Temporarily save a register so we have somewhere to work. */
|
||||
|
||||
mtspr SPR_SYSTEM_SAVE_K_1, r0
|
||||
mfspr r0, SPR_EX_CONTEXT_K_1
|
||||
|
||||
andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
/*
|
||||
* The unalign data fastpath code sets the low bit in sp to
|
||||
* force us to reset it here on fault.
|
||||
*/
|
||||
{
|
||||
blbs sp, 2f
|
||||
IS_KERNEL_EX1(r0, r0)
|
||||
}
|
||||
|
||||
.ifc \vecnum, INT_DOUBLE_FAULT
|
||||
/*
|
||||
|
@ -176,15 +369,15 @@ intvec_\vecname:
|
|||
}
|
||||
.endif
|
||||
|
||||
|
||||
2:
|
||||
/*
|
||||
* SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
|
||||
* the current stack top in the higher bits. So we recover
|
||||
* our stack top by just masking off the low bits, then
|
||||
* SYSTEM_SAVE_K_0 holds the cpu number in the high bits, and
|
||||
* the current stack top in the lower bits. So we recover
|
||||
* our starting stack value by sign-extending the low bits, then
|
||||
* point sp at the top aligned address on the actual stack page.
|
||||
*/
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_0
|
||||
mm r0, zero, LOG2_THREAD_SIZE, 63
|
||||
bfexts r0, r0, 0, CPU_SHIFT-1
|
||||
|
||||
0:
|
||||
/*
|
||||
|
@ -206,6 +399,9 @@ intvec_\vecname:
|
|||
* cache line 1: r6...r13
|
||||
* cache line 0: 2 x frame, r0..r5
|
||||
*/
|
||||
#if STACK_TOP_DELTA != 64
|
||||
#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
|
||||
#endif
|
||||
andi r0, r0, -64
|
||||
|
||||
/*
|
||||
|
@ -305,7 +501,7 @@ intvec_\vecname:
|
|||
mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
|
||||
.else
|
||||
.ifc \vecnum, INT_ILL_TRANS
|
||||
mfspr r2, ILL_TRANS_REASON
|
||||
mfspr r2, ILL_VA_PC
|
||||
.else
|
||||
.ifc \vecnum, INT_DOUBLE_FAULT
|
||||
mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
|
||||
|
@ -315,12 +511,10 @@ intvec_\vecname:
|
|||
.else
|
||||
.ifc \c_routine, op_handle_perf_interrupt
|
||||
mfspr r2, PERF_COUNT_STS
|
||||
#if CHIP_HAS_AUX_PERF_COUNTERS()
|
||||
.else
|
||||
.ifc \c_routine, op_handle_aux_perf_interrupt
|
||||
mfspr r2, AUX_PERF_COUNT_STS
|
||||
.endif
|
||||
#endif
|
||||
.endif
|
||||
.endif
|
||||
.endif
|
||||
|
@ -339,7 +533,7 @@ intvec_\vecname:
|
|||
#ifdef __COLLECT_LINKER_FEEDBACK__
|
||||
.pushsection .text.intvec_feedback,"ax"
|
||||
.org (\vecnum << 5)
|
||||
FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
|
||||
FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
|
||||
jrp lr
|
||||
.popsection
|
||||
#endif
|
||||
|
@ -455,11 +649,12 @@ intvec_\vecname:
|
|||
/*
|
||||
* If we will be returning to the kernel, we will need to
|
||||
* reset the interrupt masks to the state they had before.
|
||||
* Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
|
||||
* Set DISABLE_IRQ in flags iff we came from kernel pl with
|
||||
* irqs disabled.
|
||||
*/
|
||||
mfspr r32, SPR_EX_CONTEXT_K_1
|
||||
{
|
||||
andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
IS_KERNEL_EX1(r22, r22)
|
||||
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
|
||||
}
|
||||
beqzt r32, 1f /* zero if from user space */
|
||||
|
@ -503,7 +698,7 @@ intvec_\vecname:
|
|||
}
|
||||
{
|
||||
shl16insli r21, r21, hw1(__per_cpu_offset)
|
||||
bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
|
||||
bfextu r20, r20, CPU_SHIFT, 63
|
||||
}
|
||||
shl16insli r21, r21, hw0(__per_cpu_offset)
|
||||
shl3add r20, r20, r21
|
||||
|
@ -585,7 +780,7 @@ intvec_\vecname:
|
|||
.macro dc_dispatch vecnum, vecname
|
||||
.org (\vecnum << 8)
|
||||
intvec_\vecname:
|
||||
j hv_downcall_dispatch
|
||||
j _hv_downcall_dispatch
|
||||
ENDPROC(intvec_\vecname)
|
||||
.endm
|
||||
|
||||
|
@ -626,14 +821,36 @@ STD_ENTRY(interrupt_return)
|
|||
PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
|
||||
}
|
||||
ld r29, r29
|
||||
andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
IS_KERNEL_EX1(r29, r29)
|
||||
{
|
||||
beqzt r29, .Lresume_userspace
|
||||
PTREGS_PTR(r29, PTREGS_OFFSET_PC)
|
||||
move r29, sp
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Returning to kernel space. Check if we need preemption. */
|
||||
EXTRACT_THREAD_INFO(r29)
|
||||
addli r28, r29, THREAD_INFO_FLAGS_OFFSET
|
||||
{
|
||||
ld r28, r28
|
||||
addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
|
||||
}
|
||||
{
|
||||
andi r28, r28, _TIF_NEED_RESCHED
|
||||
ld4s r29, r29
|
||||
}
|
||||
beqzt r28, 1f
|
||||
bnez r29, 1f
|
||||
jal preempt_schedule_irq
|
||||
FEEDBACK_REENTER(interrupt_return)
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
|
||||
moveli r27, hw2_last(_cpu_idle_nap)
|
||||
{
|
||||
moveli r27, hw2_last(_cpu_idle_nap)
|
||||
PTREGS_PTR(r29, PTREGS_OFFSET_PC)
|
||||
}
|
||||
{
|
||||
ld r28, r29
|
||||
shl16insli r27, r27, hw1(_cpu_idle_nap)
|
||||
|
@ -728,7 +945,7 @@ STD_ENTRY(interrupt_return)
|
|||
PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
|
||||
}
|
||||
{
|
||||
andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
|
||||
IS_KERNEL_EX1(r0, r0)
|
||||
ld r32, r32
|
||||
}
|
||||
bnez r0, 1f
|
||||
|
@ -799,7 +1016,7 @@ STD_ENTRY(interrupt_return)
|
|||
pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
|
||||
{
|
||||
mtspr SPR_EX_CONTEXT_K_1, lr
|
||||
andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
IS_KERNEL_EX1(lr, lr)
|
||||
}
|
||||
{
|
||||
mtspr SPR_EX_CONTEXT_K_0, r21
|
||||
|
@ -1223,10 +1440,31 @@ STD_ENTRY(_sys_clone)
|
|||
j sys_clone
|
||||
STD_ENDPROC(_sys_clone)
|
||||
|
||||
/* The single-step support may need to read all the registers. */
|
||||
/*
|
||||
* Recover r3, r2, r1 and r0 here saved by unalign fast vector.
|
||||
* The vector area limit is 32 bundles, so we handle the reload here.
|
||||
* r0, r1, r2 are in thread_info from low to high memory in order.
|
||||
* r3 points to location the original r3 was saved.
|
||||
* We put this code in the __HEAD section so it can be reached
|
||||
* via a conditional branch from the fast path.
|
||||
*/
|
||||
__HEAD
|
||||
hand_unalign_slow:
|
||||
andi sp, sp, ~1
|
||||
hand_unalign_slow_badsp:
|
||||
addi r3, r3, -(3 * 8)
|
||||
ld_add r0, r3, 8
|
||||
ld_add r1, r3, 8
|
||||
ld r2, r3
|
||||
hand_unalign_slow_nonuser:
|
||||
mfspr r3, SPR_SYSTEM_SAVE_K_1
|
||||
__int_hand INT_UNALIGN_DATA, UNALIGN_DATA_SLOW, int_unalign
|
||||
|
||||
/* The unaligned data support needs to read all the registers. */
|
||||
int_unalign:
|
||||
push_extra_callee_saves r0
|
||||
j do_trap
|
||||
j do_unaligned
|
||||
ENDPROC(hand_unalign_slow)
|
||||
|
||||
/* Fill the return address stack with nonzero entries. */
|
||||
STD_ENTRY(fill_ra_stack)
|
||||
|
@ -1240,8 +1478,15 @@ STD_ENTRY(fill_ra_stack)
|
|||
4: jrp r0
|
||||
STD_ENDPROC(fill_ra_stack)
|
||||
|
||||
/* Include .intrpt1 array of interrupt vectors */
|
||||
.section ".intrpt1", "ax"
|
||||
.macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
|
||||
.org (\vecnum << 8)
|
||||
__int_hand \vecnum, \vecname, \c_routine, \processing
|
||||
.endm
|
||||
|
||||
/* Include .intrpt array of interrupt vectors */
|
||||
.section ".intrpt", "ax"
|
||||
.global intrpt_start
|
||||
intrpt_start:
|
||||
|
||||
#define op_handle_perf_interrupt bad_intr
|
||||
#define op_handle_aux_perf_interrupt bad_intr
|
||||
|
@ -1272,7 +1517,7 @@ STD_ENTRY(fill_ra_stack)
|
|||
int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
|
||||
int_hand INT_SWINT_0, SWINT_0, do_trap
|
||||
int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
|
||||
int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
|
||||
int_hand_unalign_fast INT_UNALIGN_DATA, UNALIGN_DATA
|
||||
int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
|
||||
int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
|
||||
int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
|
||||
|
|
|
@ -55,7 +55,8 @@ static DEFINE_PER_CPU(int, irq_depth);
|
|||
|
||||
/* State for allocating IRQs on Gx. */
|
||||
#if CHIP_HAS_IPI()
|
||||
static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE);
|
||||
static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) &
|
||||
(~(1UL << IRQ_RESCHEDULE));
|
||||
static DEFINE_SPINLOCK(available_irqs_lock);
|
||||
#endif
|
||||
|
||||
|
@ -73,7 +74,8 @@ static DEFINE_SPINLOCK(available_irqs_lock);
|
|||
|
||||
/*
|
||||
* The interrupt handling path, implemented in terms of HV interrupt
|
||||
* emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
|
||||
* emulation on TILEPro, and IPI hardware on TILE-Gx.
|
||||
* Entered with interrupts disabled.
|
||||
*/
|
||||
void tile_dev_intr(struct pt_regs *regs, int intnum)
|
||||
{
|
||||
|
@ -233,7 +235,7 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type)
|
|||
{
|
||||
/*
|
||||
* We use handle_level_irq() by default because the pending
|
||||
* interrupt vector (whether modeled by the HV on TILE64 and
|
||||
* interrupt vector (whether modeled by the HV on
|
||||
* TILEPro or implemented in hardware on TILE-Gx) has
|
||||
* level-style semantics for each bit. An interrupt fires
|
||||
* whenever a bit is high, not just at edges.
|
||||
|
|
|
@ -0,0 +1,499 @@
|
|||
/*
|
||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE-Gx KGDB support.
|
||||
*/
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
|
||||
static unsigned long stepped_addr;
|
||||
static tile_bundle_bits stepped_instr;
|
||||
|
||||
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
|
||||
{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
|
||||
{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
|
||||
{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
|
||||
{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
|
||||
{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
|
||||
{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
|
||||
{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
|
||||
{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
|
||||
{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
|
||||
{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
|
||||
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
|
||||
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
|
||||
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
|
||||
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
|
||||
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
|
||||
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
|
||||
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
|
||||
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
|
||||
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
|
||||
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
|
||||
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
|
||||
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
|
||||
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
|
||||
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
|
||||
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
|
||||
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
|
||||
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
|
||||
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
|
||||
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
|
||||
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
|
||||
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
|
||||
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
|
||||
{ "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
|
||||
{ "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
|
||||
{ "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
|
||||
{ "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
|
||||
{ "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
|
||||
{ "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
|
||||
{ "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
|
||||
{ "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
|
||||
{ "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
|
||||
{ "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
|
||||
{ "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
|
||||
{ "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
|
||||
{ "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
|
||||
{ "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
|
||||
{ "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
|
||||
{ "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
|
||||
{ "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
|
||||
{ "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
|
||||
{ "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
|
||||
{ "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
|
||||
{ "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
|
||||
{ "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
|
||||
{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
|
||||
{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
|
||||
{ "sn", GDB_SIZEOF_REG, -1},
|
||||
{ "idn0", GDB_SIZEOF_REG, -1},
|
||||
{ "idn1", GDB_SIZEOF_REG, -1},
|
||||
{ "udn0", GDB_SIZEOF_REG, -1},
|
||||
{ "udn1", GDB_SIZEOF_REG, -1},
|
||||
{ "udn2", GDB_SIZEOF_REG, -1},
|
||||
{ "udn3", GDB_SIZEOF_REG, -1},
|
||||
{ "zero", GDB_SIZEOF_REG, -1},
|
||||
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
|
||||
{ "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
|
||||
};
|
||||
|
||||
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
|
||||
{
|
||||
if (regno >= DBG_MAX_REG_NUM || regno < 0)
|
||||
return NULL;
|
||||
|
||||
if (dbg_reg_def[regno].offset != -1)
|
||||
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
|
||||
dbg_reg_def[regno].size);
|
||||
else
|
||||
memset(mem, 0, dbg_reg_def[regno].size);
|
||||
return dbg_reg_def[regno].name;
|
||||
}
|
||||
|
||||
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
|
||||
{
|
||||
if (regno >= DBG_MAX_REG_NUM || regno < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dbg_reg_def[regno].offset != -1)
|
||||
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
|
||||
dbg_reg_def[regno].size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
|
||||
* we may not be able to get all the info.
|
||||
*/
|
||||
void
|
||||
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
|
||||
{
|
||||
int reg;
|
||||
struct pt_regs *thread_regs;
|
||||
unsigned long *ptr = gdb_regs;
|
||||
|
||||
if (task == NULL)
|
||||
return;
|
||||
|
||||
/* Initialize to zero. */
|
||||
memset(gdb_regs, 0, NUMREGBYTES);
|
||||
|
||||
thread_regs = task_pt_regs(task);
|
||||
for (reg = 0; reg <= TREG_LAST_GPR; reg++)
|
||||
*(ptr++) = thread_regs->regs[reg];
|
||||
|
||||
gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
|
||||
gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
|
||||
}
|
||||
|
||||
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
||||
{
|
||||
regs->pc = pc;
|
||||
}
|
||||
|
||||
static void kgdb_call_nmi_hook(void *ignored)
|
||||
{
|
||||
kgdb_nmicallback(raw_smp_processor_id(), NULL);
|
||||
}
|
||||
|
||||
void kgdb_roundup_cpus(unsigned long flags)
|
||||
{
|
||||
local_irq_enable();
|
||||
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a kernel address to the writable kernel text mapping.
|
||||
*/
|
||||
static unsigned long writable_address(unsigned long addr)
|
||||
{
|
||||
unsigned long ret = 0;
|
||||
|
||||
if (core_kernel_text(addr))
|
||||
ret = addr - MEM_SV_START + PAGE_OFFSET;
|
||||
else if (is_module_text_address(addr))
|
||||
ret = addr;
|
||||
else
|
||||
pr_err("Unknown virtual address 0x%lx\n", addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the new address for after a step.
|
||||
*/
|
||||
static unsigned long get_step_address(struct pt_regs *regs)
|
||||
{
|
||||
int src_reg;
|
||||
int jump_off;
|
||||
int br_off;
|
||||
unsigned long addr;
|
||||
unsigned int opcode;
|
||||
tile_bundle_bits bundle;
|
||||
|
||||
/* Move to the next instruction by default. */
|
||||
addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
|
||||
bundle = *(unsigned long *)instruction_pointer(regs);
|
||||
|
||||
/* 0: X mode, Otherwise: Y mode. */
|
||||
if (bundle & TILEGX_BUNDLE_MODE_MASK) {
|
||||
if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
|
||||
get_RRROpcodeExtension_Y1(bundle) ==
|
||||
UNARY_RRR_1_OPCODE_Y1) {
|
||||
opcode = get_UnaryOpcodeExtension_Y1(bundle);
|
||||
|
||||
switch (opcode) {
|
||||
case JALR_UNARY_OPCODE_Y1:
|
||||
case JALRP_UNARY_OPCODE_Y1:
|
||||
case JR_UNARY_OPCODE_Y1:
|
||||
case JRP_UNARY_OPCODE_Y1:
|
||||
src_reg = get_SrcA_Y1(bundle);
|
||||
dbg_get_reg(src_reg, &addr, regs);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
|
||||
if (get_RRROpcodeExtension_X1(bundle) ==
|
||||
UNARY_RRR_0_OPCODE_X1) {
|
||||
opcode = get_UnaryOpcodeExtension_X1(bundle);
|
||||
|
||||
switch (opcode) {
|
||||
case JALR_UNARY_OPCODE_X1:
|
||||
case JALRP_UNARY_OPCODE_X1:
|
||||
case JR_UNARY_OPCODE_X1:
|
||||
case JRP_UNARY_OPCODE_X1:
|
||||
src_reg = get_SrcA_X1(bundle);
|
||||
dbg_get_reg(src_reg, &addr, regs);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
|
||||
opcode = get_JumpOpcodeExtension_X1(bundle);
|
||||
|
||||
switch (opcode) {
|
||||
case JAL_JUMP_OPCODE_X1:
|
||||
case J_JUMP_OPCODE_X1:
|
||||
jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
|
||||
addr = regs->pc +
|
||||
(jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
|
||||
break;
|
||||
}
|
||||
} else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
|
||||
br_off = 0;
|
||||
opcode = get_BrType_X1(bundle);
|
||||
|
||||
switch (opcode) {
|
||||
case BEQZT_BRANCH_OPCODE_X1:
|
||||
case BEQZ_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) == 0)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BGEZT_BRANCH_OPCODE_X1:
|
||||
case BGEZ_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) >= 0)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BGTZT_BRANCH_OPCODE_X1:
|
||||
case BGTZ_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) > 0)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BLBCT_BRANCH_OPCODE_X1:
|
||||
case BLBC_BRANCH_OPCODE_X1:
|
||||
if (!(get_SrcA_X1(bundle) & 1))
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BLBST_BRANCH_OPCODE_X1:
|
||||
case BLBS_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) & 1)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BLEZT_BRANCH_OPCODE_X1:
|
||||
case BLEZ_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) <= 0)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BLTZT_BRANCH_OPCODE_X1:
|
||||
case BLTZ_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) < 0)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
case BNEZT_BRANCH_OPCODE_X1:
|
||||
case BNEZ_BRANCH_OPCODE_X1:
|
||||
if (get_SrcA_X1(bundle) != 0)
|
||||
br_off = get_BrOff_X1(bundle);
|
||||
break;
|
||||
}
|
||||
|
||||
if (br_off != 0) {
|
||||
br_off = sign_extend(br_off, 17);
|
||||
addr = regs->pc +
|
||||
(br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
|
||||
}
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Replace the next instruction after the current instruction with a
|
||||
* breakpoint instruction.
|
||||
*/
|
||||
static void do_single_step(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long addr_wr;
|
||||
|
||||
/* Determine where the target instruction will send us to. */
|
||||
stepped_addr = get_step_address(regs);
|
||||
probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
|
||||
BREAK_INSTR_SIZE);
|
||||
|
||||
addr_wr = writable_address(stepped_addr);
|
||||
probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
|
||||
BREAK_INSTR_SIZE);
|
||||
smp_wmb();
|
||||
flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
|
||||
}
|
||||
|
||||
static void undo_single_step(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long addr_wr;
|
||||
|
||||
if (stepped_instr == 0)
|
||||
return;
|
||||
|
||||
addr_wr = writable_address(stepped_addr);
|
||||
probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
|
||||
BREAK_INSTR_SIZE);
|
||||
stepped_instr = 0;
|
||||
smp_wmb();
|
||||
flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
|
||||
* then try to fall into the debugger.
|
||||
*/
|
||||
static int
|
||||
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct die_args *args = (struct die_args *)ptr;
|
||||
struct pt_regs *regs = args->regs;
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
/*
|
||||
* Return immediately if the kprobes fault notifier has set
|
||||
* DIE_PAGE_FAULT.
|
||||
*/
|
||||
if (cmd == DIE_PAGE_FAULT)
|
||||
return NOTIFY_DONE;
|
||||
#endif /* CONFIG_KPROBES */
|
||||
|
||||
switch (cmd) {
|
||||
case DIE_BREAK:
|
||||
case DIE_COMPILED_BPT:
|
||||
break;
|
||||
case DIE_SSTEPBP:
|
||||
local_irq_save(flags);
|
||||
kgdb_handle_exception(0, SIGTRAP, 0, regs);
|
||||
local_irq_restore(flags);
|
||||
return NOTIFY_STOP;
|
||||
default:
|
||||
/* Userspace events, ignore. */
|
||||
if (user_mode(regs))
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
|
||||
local_irq_restore(flags);
|
||||
if (ret)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
||||
static struct notifier_block kgdb_notifier = {
|
||||
.notifier_call = kgdb_notify,
|
||||
};
|
||||
|
||||
/*
|
||||
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
|
||||
* @vector: The error vector of the exception that happened.
|
||||
* @signo: The signal number of the exception that happened.
|
||||
* @err_code: The error code of the exception that happened.
|
||||
* @remcom_in_buffer: The buffer of the packet we have read.
|
||||
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
|
||||
* @regs: The &struct pt_regs of the current process.
|
||||
*
|
||||
* This function MUST handle the 'c' and 's' command packets,
|
||||
* as well packets to set / remove a hardware breakpoint, if used.
|
||||
* If there are additional packets which the hardware needs to handle,
|
||||
* they are handled here. The code should return -1 if it wants to
|
||||
* process more packets, and a %0 or %1 if it wants to exit from the
|
||||
* kgdb callback.
|
||||
*/
|
||||
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
||||
char *remcom_in_buffer, char *remcom_out_buffer,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
char *ptr;
|
||||
unsigned long address;
|
||||
|
||||
/* Undo any stepping we may have done. */
|
||||
undo_single_step(regs);
|
||||
|
||||
switch (remcom_in_buffer[0]) {
|
||||
case 'c':
|
||||
case 's':
|
||||
case 'D':
|
||||
case 'k':
|
||||
/*
|
||||
* Try to read optional parameter, pc unchanged if no parm.
|
||||
* If this was a compiled-in breakpoint, we need to move
|
||||
* to the next instruction or we will just breakpoint
|
||||
* over and over again.
|
||||
*/
|
||||
ptr = &remcom_in_buffer[1];
|
||||
if (kgdb_hex2long(&ptr, &address))
|
||||
regs->pc = address;
|
||||
else if (*(unsigned long *)regs->pc == compiled_bpt)
|
||||
regs->pc += BREAK_INSTR_SIZE;
|
||||
|
||||
if (remcom_in_buffer[0] == 's') {
|
||||
do_single_step(regs);
|
||||
kgdb_single_step = 1;
|
||||
atomic_set(&kgdb_cpu_doing_single_step,
|
||||
raw_smp_processor_id());
|
||||
} else
|
||||
atomic_set(&kgdb_cpu_doing_single_step, -1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1; /* this means that we do not want to exit from the handler */
|
||||
}
|
||||
|
||||
struct kgdb_arch arch_kgdb_ops;
|
||||
|
||||
/*
|
||||
* kgdb_arch_init - Perform any architecture specific initalization.
|
||||
*
|
||||
* This function will handle the initalization of any architecture
|
||||
* specific callbacks.
|
||||
*/
|
||||
int kgdb_arch_init(void)
|
||||
{
|
||||
tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
|
||||
|
||||
memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
|
||||
return register_die_notifier(&kgdb_notifier);
|
||||
}
|
||||
|
||||
/*
|
||||
* kgdb_arch_exit - Perform any architecture specific uninitalization.
|
||||
*
|
||||
* This function will handle the uninitalization of any architecture
|
||||
* specific callbacks, for dynamic registration and unregistration.
|
||||
*/
|
||||
void kgdb_arch_exit(void)
|
||||
{
|
||||
unregister_die_notifier(&kgdb_notifier);
|
||||
}
|
||||
|
||||
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
|
||||
{
|
||||
int err;
|
||||
unsigned long addr_wr = writable_address(bpt->bpt_addr);
|
||||
|
||||
if (addr_wr == 0)
|
||||
return -1;
|
||||
|
||||
err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
|
||||
BREAK_INSTR_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
|
||||
BREAK_INSTR_SIZE);
|
||||
smp_wmb();
|
||||
flush_icache_range((unsigned long)bpt->bpt_addr,
|
||||
(unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
|
||||
return err;
|
||||
}
|
||||
|
||||
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
|
||||
{
|
||||
int err;
|
||||
unsigned long addr_wr = writable_address(bpt->bpt_addr);
|
||||
|
||||
if (addr_wr == 0)
|
||||
return -1;
|
||||
|
||||
err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
|
||||
BREAK_INSTR_SIZE);
|
||||
smp_wmb();
|
||||
flush_icache_range((unsigned long)bpt->bpt_addr,
|
||||
(unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
|
||||
return err;
|
||||
}
|
|
@ -0,0 +1,528 @@
|
|||
/*
|
||||
* arch/tile/kernel/kprobes.c
|
||||
* Kprobes on TILE-Gx
|
||||
*
|
||||
* Some portions copied from the MIPS version.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2002, 2004
|
||||
* Copyright 2006 Sony Corp.
|
||||
* Copyright 2010 Cavium Networks
|
||||
*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include <arch/opcode.h>
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
|
||||
tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
|
||||
|
||||
/*
|
||||
* Check whether instruction is branch or jump, or if executing it
|
||||
* has different results depending on where it is executed (e.g. lnk).
|
||||
*/
|
||||
static int __kprobes insn_has_control(kprobe_opcode_t insn)
|
||||
{
|
||||
if (get_Mode(insn) != 0) { /* Y-format bundle */
|
||||
if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
|
||||
get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
|
||||
return 0;
|
||||
|
||||
switch (get_UnaryOpcodeExtension_Y1(insn)) {
|
||||
case JALRP_UNARY_OPCODE_Y1:
|
||||
case JALR_UNARY_OPCODE_Y1:
|
||||
case JRP_UNARY_OPCODE_Y1:
|
||||
case JR_UNARY_OPCODE_Y1:
|
||||
case LNK_UNARY_OPCODE_Y1:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
switch (get_Opcode_X1(insn)) {
|
||||
case BRANCH_OPCODE_X1: /* branch instructions */
|
||||
case JUMP_OPCODE_X1: /* jump instructions: j and jal */
|
||||
return 1;
|
||||
|
||||
case RRR_0_OPCODE_X1: /* other jump instructions */
|
||||
if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
|
||||
return 0;
|
||||
switch (get_UnaryOpcodeExtension_X1(insn)) {
|
||||
case JALRP_UNARY_OPCODE_X1:
|
||||
case JALR_UNARY_OPCODE_X1:
|
||||
case JRP_UNARY_OPCODE_X1:
|
||||
case JR_UNARY_OPCODE_X1:
|
||||
case LNK_UNARY_OPCODE_X1:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long addr = (unsigned long)p->addr;
|
||||
|
||||
if (addr & (sizeof(kprobe_opcode_t) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (insn_has_control(*p->addr)) {
|
||||
pr_notice("Kprobes for control instructions are not "
|
||||
"supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* insn: must be on special executable page on tile. */
|
||||
p->ainsn.insn = get_insn_slot();
|
||||
if (!p->ainsn.insn)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* In the kprobe->ainsn.insn[] array we store the original
|
||||
* instruction at index zero and a break trap instruction at
|
||||
* index one.
|
||||
*/
|
||||
memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
|
||||
p->ainsn.insn[1] = breakpoint2_insn;
|
||||
p->opcode = *p->addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long addr_wr;
|
||||
|
||||
/* Operate on writable kernel text mapping. */
|
||||
addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
|
||||
|
||||
if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
|
||||
sizeof(breakpoint_insn)))
|
||||
pr_err("%s: failed to enable kprobe\n", __func__);
|
||||
|
||||
smp_wmb();
|
||||
flush_insn_slot(p);
|
||||
}
|
||||
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *kp)
|
||||
{
|
||||
unsigned long addr_wr;
|
||||
|
||||
/* Operate on writable kernel text mapping. */
|
||||
addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
|
||||
|
||||
if (probe_kernel_write((void *)addr_wr, &kp->opcode,
|
||||
sizeof(kp->opcode)))
|
||||
pr_err("%s: failed to enable kprobe\n", __func__);
|
||||
|
||||
smp_wmb();
|
||||
flush_insn_slot(kp);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->ainsn.insn) {
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
p->ainsn.insn = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
|
||||
}
|
||||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
|
||||
}
|
||||
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
kcb->kprobe_saved_pc = regs->pc;
|
||||
}
|
||||
|
||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
/* Single step inline if the instruction is a break. */
|
||||
if (p->opcode == breakpoint_insn ||
|
||||
p->opcode == breakpoint2_insn)
|
||||
regs->pc = (unsigned long)p->addr;
|
||||
else
|
||||
regs->pc = (unsigned long)&p->ainsn.insn[0];
|
||||
}
|
||||
|
||||
static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p;
|
||||
int ret = 0;
|
||||
kprobe_opcode_t *addr;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
addr = (kprobe_opcode_t *)regs->pc;
|
||||
|
||||
/*
|
||||
* We don't want to be preempted for the entire
|
||||
* duration of kprobe processing.
|
||||
*/
|
||||
preempt_disable();
|
||||
kcb = get_kprobe_ctlblk();
|
||||
|
||||
/* Check we're not actually recursing. */
|
||||
if (kprobe_running()) {
|
||||
p = get_kprobe(addr);
|
||||
if (p) {
|
||||
if (kcb->kprobe_status == KPROBE_HIT_SS &&
|
||||
p->ainsn.insn[0] == breakpoint_insn) {
|
||||
goto no_kprobe;
|
||||
}
|
||||
/*
|
||||
* We have reentered the kprobe_handler(), since
|
||||
* another probe was hit while within the handler.
|
||||
* We here save the original kprobes variables and
|
||||
* just single step on the instruction of the new probe
|
||||
* without calling any user handlers.
|
||||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
} else {
|
||||
if (*addr != breakpoint_insn) {
|
||||
/*
|
||||
* The breakpoint instruction was removed by
|
||||
* another cpu right after we hit, no further
|
||||
* handling of this interrupt is appropriate.
|
||||
*/
|
||||
ret = 1;
|
||||
goto no_kprobe;
|
||||
}
|
||||
p = __this_cpu_read(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs))
|
||||
goto ss_probe;
|
||||
}
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
p = get_kprobe(addr);
|
||||
if (!p) {
|
||||
if (*addr != breakpoint_insn) {
|
||||
/*
|
||||
* The breakpoint instruction was removed right
|
||||
* after we hit it. Another cpu has removed
|
||||
* either a probepoint or a debugger breakpoint
|
||||
* at this address. In either case, no further
|
||||
* handling of this interrupt is appropriate.
|
||||
*/
|
||||
ret = 1;
|
||||
}
|
||||
/* Not one of ours: let kernel handle it. */
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
|
||||
if (p->pre_handler && p->pre_handler(p, regs)) {
|
||||
/* Handler has already set things up, so skip ss setup. */
|
||||
return 1;
|
||||
}
|
||||
|
||||
ss_probe:
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
return 1;
|
||||
|
||||
no_kprobe:
|
||||
preempt_enable_no_resched();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called after single-stepping. p->addr is the address of the
|
||||
* instruction that has been replaced by the breakpoint. To avoid the
|
||||
* SMP problems that can occur when we temporarily put back the
|
||||
* original opcode to single-step, we single-stepped a copy of the
|
||||
* instruction. The address of this copy is p->ainsn.insn.
|
||||
*
|
||||
* This function prepares to return from the post-single-step
|
||||
* breakpoint trap.
|
||||
*/
|
||||
static void __kprobes resume_execution(struct kprobe *p,
|
||||
struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
unsigned long orig_pc = kcb->kprobe_saved_pc;
|
||||
regs->pc = orig_pc + 8;
|
||||
}
|
||||
|
||||
static inline int post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (!cur)
|
||||
return 0;
|
||||
|
||||
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
resume_execution(cur, regs, kcb);
|
||||
|
||||
/* Restore back the original saved kprobes variables and continue. */
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
goto out;
|
||||
}
|
||||
reset_current_kprobe();
|
||||
out:
|
||||
preempt_enable_no_resched();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
|
||||
return 1;
|
||||
|
||||
if (kcb->kprobe_status & KPROBE_HIT_SS) {
|
||||
/*
|
||||
* We are here because the instruction being single
|
||||
* stepped caused a page fault. We reset the current
|
||||
* kprobe and the ip points back to the probe address
|
||||
* and allow the page fault handler to continue as a
|
||||
* normal page fault.
|
||||
*/
|
||||
resume_execution(cur, regs, kcb);
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper routine for handling exceptions.
|
||||
*/
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct die_args *args = (struct die_args *)data;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
switch (val) {
|
||||
case DIE_BREAK:
|
||||
if (kprobe_handler(args->regs))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
case DIE_SSTEPBP:
|
||||
if (post_kprobe_handler(args->regs))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
case DIE_PAGE_FAULT:
|
||||
/* kprobe_running() needs smp_processor_id(). */
|
||||
preempt_disable();
|
||||
|
||||
if (kprobe_running()
|
||||
&& kprobe_fault_handler(args->regs, args->trapnr))
|
||||
ret = NOTIFY_STOP;
|
||||
preempt_enable();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
kcb->jprobe_saved_regs = *regs;
|
||||
kcb->jprobe_saved_sp = regs->sp;
|
||||
|
||||
memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
|
||||
MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
|
||||
|
||||
regs->pc = (unsigned long)(jp->entry);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Defined in the inline asm below. */
|
||||
void jprobe_return_end(void);
|
||||
|
||||
void __kprobes jprobe_return(void)
|
||||
{
|
||||
asm volatile(
|
||||
"bpt\n\t"
|
||||
".globl jprobe_return_end\n"
|
||||
"jprobe_return_end:\n");
|
||||
}
|
||||
|
||||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (regs->pc >= (unsigned long)jprobe_return &&
|
||||
regs->pc <= (unsigned long)jprobe_return_end) {
|
||||
*regs = kcb->jprobe_saved_regs;
|
||||
memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
|
||||
MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
|
||||
preempt_enable_no_resched();
|
||||
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function return probe trampoline:
|
||||
* - init_kprobes() establishes a probepoint here
|
||||
* - When the probed function returns, this probe causes the
|
||||
* handlers to fire
|
||||
*/
|
||||
static void __used kretprobe_trampoline_holder(void)
|
||||
{
|
||||
asm volatile(
|
||||
"nop\n\t"
|
||||
".global kretprobe_trampoline\n"
|
||||
"kretprobe_trampoline:\n\t"
|
||||
"nop\n\t"
|
||||
: : : "memory");
|
||||
}
|
||||
|
||||
void kretprobe_trampoline(void);
|
||||
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
ri->ret_addr = (kprobe_opcode_t *) regs->lr;
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->lr = (unsigned long)kretprobe_trampoline;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when the probe at kretprobe trampoline is hit.
|
||||
*/
|
||||
static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
|
||||
/*
|
||||
* It is possible to have multiple instances associated with a given
|
||||
* task either because multiple functions in the call path have
|
||||
* a return probe installed on them, and/or more than one return
|
||||
* return probe was registered for a target function.
|
||||
*
|
||||
* We can handle this because:
|
||||
* - instances are always inserted at the head of the list
|
||||
* - when multiple return probes are registered for the same
|
||||
* function, the first instance's ret_addr will point to the
|
||||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler)
|
||||
ri->rp->handler(ri, regs);
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address) {
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
instruction_pointer(regs) = orig_ret_address;
|
||||
|
||||
reset_current_kprobe();
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
/*
|
||||
* By returning a non-zero value, we are telling
|
||||
* kprobe_handler() that we don't want the post_handler
|
||||
* to run (and have re-enabled preemption)
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *)kretprobe_trampoline,
|
||||
.pre_handler = trampoline_probe_handler
|
||||
};
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
register_kprobe(&trampoline_p);
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE-Gx specific __mcount support
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#define REGSIZE 8
|
||||
|
||||
.text
|
||||
.global __mcount
|
||||
|
||||
.macro MCOUNT_SAVE_REGS
|
||||
addli sp, sp, -REGSIZE
|
||||
{
|
||||
st sp, lr
|
||||
addli r29, sp, - (12 * REGSIZE)
|
||||
}
|
||||
{
|
||||
addli sp, sp, - (13 * REGSIZE)
|
||||
st r29, sp
|
||||
}
|
||||
addli r29, r29, REGSIZE
|
||||
{ st r29, r0; addli r29, r29, REGSIZE }
|
||||
{ st r29, r1; addli r29, r29, REGSIZE }
|
||||
{ st r29, r2; addli r29, r29, REGSIZE }
|
||||
{ st r29, r3; addli r29, r29, REGSIZE }
|
||||
{ st r29, r4; addli r29, r29, REGSIZE }
|
||||
{ st r29, r5; addli r29, r29, REGSIZE }
|
||||
{ st r29, r6; addli r29, r29, REGSIZE }
|
||||
{ st r29, r7; addli r29, r29, REGSIZE }
|
||||
{ st r29, r8; addli r29, r29, REGSIZE }
|
||||
{ st r29, r9; addli r29, r29, REGSIZE }
|
||||
{ st r29, r10; addli r29, r29, REGSIZE }
|
||||
.endm
|
||||
|
||||
.macro MCOUNT_RESTORE_REGS
|
||||
addli r29, sp, (2 * REGSIZE)
|
||||
{ ld r0, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r1, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r2, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r3, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r4, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r5, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r6, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r7, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r8, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r9, r29; addli r29, r29, REGSIZE }
|
||||
{ ld r10, r29; addli lr, sp, (13 * REGSIZE) }
|
||||
{ ld lr, lr; addli sp, sp, (14 * REGSIZE) }
|
||||
.endm
|
||||
|
||||
.macro RETURN_BACK
|
||||
{ move r12, lr; move lr, r10 }
|
||||
jrp r12
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
.align 64
|
||||
STD_ENTRY(__mcount)
|
||||
__mcount:
|
||||
j ftrace_stub
|
||||
STD_ENDPROC(__mcount)
|
||||
|
||||
.align 64
|
||||
STD_ENTRY(ftrace_caller)
|
||||
moveli r11, hw2_last(function_trace_stop)
|
||||
{ shl16insli r11, r11, hw1(function_trace_stop); move r12, lr }
|
||||
{ shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 }
|
||||
ld r11, r11
|
||||
beqz r11, 1f
|
||||
jrp r12
|
||||
|
||||
1:
|
||||
{ move r10, lr; move lr, r12 }
|
||||
MCOUNT_SAVE_REGS
|
||||
|
||||
/* arg1: self return address */
|
||||
/* arg2: parent's return address */
|
||||
{ move r0, lr; move r1, r10 }
|
||||
|
||||
.global ftrace_call
|
||||
ftrace_call:
|
||||
/*
|
||||
* a placeholder for the call to a real tracing function, i.e.
|
||||
* ftrace_trace_function()
|
||||
*/
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.global ftrace_graph_call
|
||||
ftrace_graph_call:
|
||||
/*
|
||||
* a placeholder for the call to a real tracing function, i.e.
|
||||
* ftrace_graph_caller()
|
||||
*/
|
||||
nop
|
||||
#endif
|
||||
MCOUNT_RESTORE_REGS
|
||||
.global ftrace_stub
|
||||
ftrace_stub:
|
||||
RETURN_BACK
|
||||
STD_ENDPROC(ftrace_caller)
|
||||
|
||||
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
.align 64
|
||||
STD_ENTRY(__mcount)
|
||||
moveli r11, hw2_last(function_trace_stop)
|
||||
{ shl16insli r11, r11, hw1(function_trace_stop); move r12, lr }
|
||||
{ shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 }
|
||||
ld r11, r11
|
||||
beqz r11, 1f
|
||||
jrp r12
|
||||
|
||||
1:
|
||||
{ move r10, lr; move lr, r12 }
|
||||
{
|
||||
moveli r11, hw2_last(ftrace_trace_function)
|
||||
moveli r13, hw2_last(ftrace_stub)
|
||||
}
|
||||
{
|
||||
shl16insli r11, r11, hw1(ftrace_trace_function)
|
||||
shl16insli r13, r13, hw1(ftrace_stub)
|
||||
}
|
||||
{
|
||||
shl16insli r11, r11, hw0(ftrace_trace_function)
|
||||
shl16insli r13, r13, hw0(ftrace_stub)
|
||||
}
|
||||
|
||||
ld r11, r11
|
||||
sub r14, r13, r11
|
||||
bnez r14, static_trace
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
moveli r15, hw2_last(ftrace_graph_return)
|
||||
shl16insli r15, r15, hw1(ftrace_graph_return)
|
||||
shl16insli r15, r15, hw0(ftrace_graph_return)
|
||||
ld r15, r15
|
||||
sub r15, r15, r13
|
||||
bnez r15, ftrace_graph_caller
|
||||
|
||||
{
|
||||
moveli r16, hw2_last(ftrace_graph_entry)
|
||||
moveli r17, hw2_last(ftrace_graph_entry_stub)
|
||||
}
|
||||
{
|
||||
shl16insli r16, r16, hw1(ftrace_graph_entry)
|
||||
shl16insli r17, r17, hw1(ftrace_graph_entry_stub)
|
||||
}
|
||||
{
|
||||
shl16insli r16, r16, hw0(ftrace_graph_entry)
|
||||
shl16insli r17, r17, hw0(ftrace_graph_entry_stub)
|
||||
}
|
||||
ld r16, r16
|
||||
sub r17, r16, r17
|
||||
bnez r17, ftrace_graph_caller
|
||||
|
||||
#endif
|
||||
RETURN_BACK
|
||||
|
||||
static_trace:
|
||||
MCOUNT_SAVE_REGS
|
||||
|
||||
/* arg1: self return address */
|
||||
/* arg2: parent's return address */
|
||||
{ move r0, lr; move r1, r10 }
|
||||
|
||||
/* call ftrace_trace_function() */
|
||||
jalr r11
|
||||
|
||||
MCOUNT_RESTORE_REGS
|
||||
|
||||
.global ftrace_stub
|
||||
ftrace_stub:
|
||||
RETURN_BACK
|
||||
STD_ENDPROC(__mcount)
|
||||
|
||||
#endif /* ! CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
STD_ENTRY(ftrace_graph_caller)
|
||||
ftrace_graph_caller:
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
MCOUNT_SAVE_REGS
|
||||
#endif
|
||||
|
||||
/* arg1: Get the location of the parent's return address */
|
||||
addi r0, sp, 12 * REGSIZE
|
||||
/* arg2: Get self return address */
|
||||
move r1, lr
|
||||
|
||||
jal prepare_ftrace_return
|
||||
|
||||
MCOUNT_RESTORE_REGS
|
||||
RETURN_BACK
|
||||
STD_ENDPROC(ftrace_graph_caller)
|
||||
|
||||
.global return_to_handler
|
||||
return_to_handler:
|
||||
MCOUNT_SAVE_REGS
|
||||
|
||||
jal ftrace_return_to_handler
|
||||
/* restore the real parent address */
|
||||
move r11, r0
|
||||
|
||||
MCOUNT_RESTORE_REGS
|
||||
jr r11
|
||||
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
@ -36,8 +36,9 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
|
||||
int node = dev_to_node(dev);
|
||||
u64 dma_mask = (dev && dev->coherent_dma_mask) ?
|
||||
dev->coherent_dma_mask : DMA_BIT_MASK(32);
|
||||
int node = dev ? dev_to_node(dev) : 0;
|
||||
int order = get_order(size);
|
||||
struct page *pg;
|
||||
dma_addr_t addr;
|
||||
|
@ -256,7 +257,7 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
|||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
|
||||
dma_address & PAGE_OFFSET, size, direction);
|
||||
dma_address & (PAGE_SIZE - 1), size, direction);
|
||||
}
|
||||
|
||||
static void tile_dma_sync_single_for_cpu(struct device *dev,
|
||||
|
@ -357,7 +358,7 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
|
||||
addr = page_to_phys(pg);
|
||||
|
||||
*dma_handle = phys_to_dma(dev, addr);
|
||||
*dma_handle = addr + get_dma_offset(dev);
|
||||
|
||||
return page_address(pg);
|
||||
}
|
||||
|
@ -387,7 +388,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
sg->dma_address = sg_phys(sg);
|
||||
__dma_prep_pa_range(sg->dma_address, sg->length, direction);
|
||||
|
||||
sg->dma_address = phys_to_dma(dev, sg->dma_address);
|
||||
sg->dma_address = sg->dma_address + get_dma_offset(dev);
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
sg->dma_length = sg->length;
|
||||
#endif
|
||||
|
@ -422,7 +423,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
|
|||
BUG_ON(offset + size > PAGE_SIZE);
|
||||
__dma_prep_page(page, offset, size, direction);
|
||||
|
||||
return phys_to_dma(dev, page_to_pa(page) + offset);
|
||||
return page_to_pa(page) + offset + get_dma_offset(dev);
|
||||
}
|
||||
|
||||
static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||
|
@ -432,10 +433,10 @@ static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
|||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
dma_address = dma_to_phys(dev, dma_address);
|
||||
dma_address -= get_dma_offset(dev);
|
||||
|
||||
__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
|
||||
dma_address & PAGE_OFFSET, size, direction);
|
||||
dma_address & (PAGE_SIZE - 1), size, direction);
|
||||
}
|
||||
|
||||
static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
|
||||
|
@ -445,7 +446,7 @@ static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
|
|||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
dma_handle = dma_to_phys(dev, dma_handle);
|
||||
dma_handle -= get_dma_offset(dev);
|
||||
|
||||
__dma_complete_pa_range(dma_handle, size, direction);
|
||||
}
|
||||
|
@ -456,7 +457,7 @@ static void tile_pci_dma_sync_single_for_device(struct device *dev,
|
|||
enum dma_data_direction
|
||||
direction)
|
||||
{
|
||||
dma_handle = dma_to_phys(dev, dma_handle);
|
||||
dma_handle -= get_dma_offset(dev);
|
||||
|
||||
__dma_prep_pa_range(dma_handle, size, direction);
|
||||
}
|
||||
|
@ -558,22 +559,47 @@ static struct dma_map_ops pci_swiotlb_dma_ops = {
|
|||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
|
||||
static struct dma_map_ops pci_hybrid_dma_ops = {
|
||||
.alloc = tile_swiotlb_alloc_coherent,
|
||||
.free = tile_swiotlb_free_coherent,
|
||||
.map_page = tile_pci_dma_map_page,
|
||||
.unmap_page = tile_pci_dma_unmap_page,
|
||||
.map_sg = tile_pci_dma_map_sg,
|
||||
.unmap_sg = tile_pci_dma_unmap_sg,
|
||||
.sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
|
||||
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
|
||||
.mapping_error = tile_pci_dma_mapping_error,
|
||||
.dma_supported = tile_pci_dma_supported
|
||||
};
|
||||
|
||||
struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
|
||||
struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
|
||||
#else
|
||||
struct dma_map_ops *gx_legacy_pci_dma_map_ops;
|
||||
struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
|
||||
#endif
|
||||
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
|
||||
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
/* Handle legacy PCI devices with limited memory addressability. */
|
||||
if (((dma_ops == gx_pci_dma_map_ops) ||
|
||||
(dma_ops == gx_legacy_pci_dma_map_ops)) &&
|
||||
(mask <= DMA_BIT_MASK(32))) {
|
||||
if (mask > dev->archdata.max_direct_dma_addr)
|
||||
/*
|
||||
* For PCI devices with 64-bit DMA addressing capability, promote
|
||||
* the dma_ops to full capability for both streams and consistent
|
||||
* memory access. For 32-bit capable devices, limit the consistent
|
||||
* memory DMA range to max_direct_dma_addr.
|
||||
*/
|
||||
if (dma_ops == gx_pci_dma_map_ops ||
|
||||
dma_ops == gx_hybrid_pci_dma_map_ops ||
|
||||
dma_ops == gx_legacy_pci_dma_map_ops) {
|
||||
if (mask == DMA_BIT_MASK(64))
|
||||
set_dma_ops(dev, gx_pci_dma_map_ops);
|
||||
else if (mask > dev->archdata.max_direct_dma_addr)
|
||||
mask = dev->archdata.max_direct_dma_addr;
|
||||
}
|
||||
|
||||
|
@ -584,3 +610,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
|
|||
}
|
||||
EXPORT_SYMBOL(dma_set_coherent_mask);
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
/*
|
||||
* The generic dma_get_required_mask() uses the highest physical address
|
||||
* (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
|
||||
* 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
|
||||
* DMAs to use the full 64-bit PCI address space and not limited by
|
||||
* the physical memory space, we always let the PCI devices use
|
||||
* 64-bit DMA if they have that capability, by returning the 64-bit
|
||||
* DMA mask here. The device driver has the option to use 32-bit DMA if
|
||||
* the device is not capable of 64-bit DMA.
|
||||
*/
|
||||
u64 dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
return DMA_BIT_MASK(64);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_get_required_mask);
|
||||
#endif
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <linux/capability.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -52,6 +51,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
static int pci_probe = 1;
|
||||
|
||||
/*
|
||||
* This flag tells if the platform is TILEmpower that needs
|
||||
* special configuration for the PLX switch chip.
|
||||
|
@ -144,6 +145,11 @@ int __init tile_pci_init(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!pci_probe) {
|
||||
pr_info("PCI: disabled by boot argument\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_info("PCI: Searching for controllers...\n");
|
||||
|
||||
/* Re-init number of PCIe controllers to support hot-plug feature. */
|
||||
|
@ -192,7 +198,6 @@ int __init tile_pci_init(void)
|
|||
controller->hv_cfg_fd[0] = hv_cfg_fd0;
|
||||
controller->hv_cfg_fd[1] = hv_cfg_fd1;
|
||||
controller->hv_mem_fd = hv_mem_fd;
|
||||
controller->first_busno = 0;
|
||||
controller->last_busno = 0xff;
|
||||
controller->ops = &tile_cfg_ops;
|
||||
|
||||
|
@ -283,7 +288,7 @@ int __init pcibios_init(void)
|
|||
* known to require at least 20ms here, but we use a more
|
||||
* conservative value.
|
||||
*/
|
||||
mdelay(250);
|
||||
msleep(250);
|
||||
|
||||
/* Scan all of the recorded PCI controllers. */
|
||||
for (i = 0; i < TILE_NUM_PCIE; i++) {
|
||||
|
@ -304,18 +309,10 @@ int __init pcibios_init(void)
|
|||
|
||||
pr_info("PCI: initializing controller #%d\n", i);
|
||||
|
||||
/*
|
||||
* This comes from the generic Linux PCI driver.
|
||||
*
|
||||
* It reads the PCI tree for this bus into the Linux
|
||||
* data structures.
|
||||
*
|
||||
* This is inlined in linux/pci.h and calls into
|
||||
* pci_scan_bus_parented() in probe.c.
|
||||
*/
|
||||
pci_add_resource(&resources, &ioport_resource);
|
||||
pci_add_resource(&resources, &iomem_resource);
|
||||
bus = pci_scan_root_bus(NULL, 0, controller->ops, controller, &resources);
|
||||
bus = pci_scan_root_bus(NULL, 0, controller->ops,
|
||||
controller, &resources);
|
||||
controller->root_bus = bus;
|
||||
controller->last_busno = bus->busn_res.end;
|
||||
}
|
||||
|
@ -388,6 +385,16 @@ void pcibios_set_master(struct pci_dev *dev)
|
|||
/* No special bus mastering setup handling. */
|
||||
}
|
||||
|
||||
/* Process any "pci=" kernel boot arguments. */
|
||||
char *__init pcibios_setup(char *str)
|
||||
{
|
||||
if (!strcmp(str, "off")) {
|
||||
pci_probe = 0;
|
||||
return NULL;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable memory and/or address decoding, as appropriate, for the
|
||||
* device described by the 'dev' struct.
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -113,7 +113,6 @@ arch_initcall(proc_tile_init);
|
|||
* Support /proc/sys/tile directory
|
||||
*/
|
||||
|
||||
#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
|
||||
static ctl_table unaligned_subtable[] = {
|
||||
{
|
||||
.procname = "enabled",
|
||||
|
@ -160,4 +159,3 @@ static int __init proc_sys_tile_init(void)
|
|||
}
|
||||
|
||||
arch_initcall(proc_sys_tile_init);
|
||||
#endif
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <asm/syscalls.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uaccess.h>
|
||||
#ifdef CONFIG_HARDWALL
|
||||
#include <asm/hardwall.h>
|
||||
#endif
|
||||
|
@ -74,19 +75,6 @@ void arch_release_thread_info(struct thread_info *info)
|
|||
{
|
||||
struct single_step_state *step_state = info->step_state;
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/*
|
||||
* We free a thread_info from the context of the task that has
|
||||
* been scheduled next, so the original task is already dead.
|
||||
* Calling deactivate here just frees up the data structures.
|
||||
* If the task we're freeing held the last reference to a
|
||||
* hardwall fd, it would have been released prior to this point
|
||||
* anyway via exit_files(), and the hardwall_task.info pointers
|
||||
* would be NULL by now.
|
||||
*/
|
||||
hardwall_deactivate_all(info->task);
|
||||
#endif
|
||||
|
||||
if (step_state) {
|
||||
|
||||
/*
|
||||
|
@ -160,6 +148,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
*/
|
||||
task_thread_info(p)->step_state = NULL;
|
||||
|
||||
#ifdef __tilegx__
|
||||
/*
|
||||
* Do not clone unalign jit fixup from the parent; each thread
|
||||
* must allocate its own on demand.
|
||||
*/
|
||||
task_thread_info(p)->unalign_jit_base = NULL;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Copy the registers onto the kernel stack so the
|
||||
* return-from-interrupt code will reload it into registers.
|
||||
|
@ -191,16 +187,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/* Likewise, the new thread is not running static processor code. */
|
||||
p->thread.sn_proc_running = 0;
|
||||
memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
/* New thread has its miscellaneous processor state bits clear. */
|
||||
p->thread.proc_status = 0;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* New thread does not own any networks. */
|
||||
|
@ -218,19 +206,32 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
|
||||
{
|
||||
task_thread_info(tsk)->align_ctl = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
|
||||
{
|
||||
return put_user(task_thread_info(tsk)->align_ctl,
|
||||
(unsigned int __user *)adr);
|
||||
}
|
||||
|
||||
static struct task_struct corrupt_current = { .comm = "<corrupt>" };
|
||||
|
||||
/*
|
||||
* Return "current" if it looks plausible, or else a pointer to a dummy.
|
||||
* This can be helpful if we are just trying to emit a clean panic.
|
||||
*/
|
||||
struct task_struct *validate_current(void)
|
||||
{
|
||||
static struct task_struct corrupt = { .comm = "<corrupt>" };
|
||||
struct task_struct *tsk = current;
|
||||
if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
|
||||
(high_memory && (void *)tsk > high_memory) ||
|
||||
((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
|
||||
pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
|
||||
tsk = &corrupt;
|
||||
tsk = &corrupt_current;
|
||||
}
|
||||
return tsk;
|
||||
}
|
||||
|
@ -369,15 +370,11 @@ static void save_arch_state(struct thread_struct *t)
|
|||
t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
|
||||
t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
|
||||
t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
|
||||
#endif
|
||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
||||
t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_RTF_HWM()
|
||||
t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
|
||||
#endif
|
||||
#if CHIP_HAS_DSTREAM_PF()
|
||||
t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
|
||||
#endif
|
||||
|
@ -398,15 +395,11 @@ static void restore_arch_state(const struct thread_struct *t)
|
|||
__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
|
||||
__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
|
||||
#endif
|
||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
||||
__insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_RTF_HWM()
|
||||
__insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
|
||||
#endif
|
||||
#if CHIP_HAS_DSTREAM_PF()
|
||||
__insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
|
||||
#endif
|
||||
|
@ -415,26 +408,11 @@ static void restore_arch_state(const struct thread_struct *t)
|
|||
|
||||
void _prepare_arch_switch(struct task_struct *next)
|
||||
{
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
int snctl;
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
|
||||
if (dma->enabled)
|
||||
save_tile_dma_state(dma);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/*
|
||||
* Suspend the static network processor if it was running.
|
||||
* We do not suspend the fabric itself, just like we don't
|
||||
* try to suspend the UDN.
|
||||
*/
|
||||
snctl = __insn_mfspr(SPR_SNCTL);
|
||||
current->thread.sn_proc_running =
|
||||
(snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
|
||||
if (current->thread.sn_proc_running)
|
||||
__insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -462,17 +440,6 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
|||
/* Restore other arch state. */
|
||||
restore_arch_state(&next->thread);
|
||||
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/*
|
||||
* Restart static network processor in the new process
|
||||
* if it was running before.
|
||||
*/
|
||||
if (next->thread.sn_proc_running) {
|
||||
int snctl = __insn_mfspr(SPR_SNCTL);
|
||||
__insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/* Enable or disable access to the network registers appropriately. */
|
||||
hardwall_switch_tasks(prev, next);
|
||||
|
@ -514,7 +481,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
|
|||
schedule();
|
||||
return 1;
|
||||
}
|
||||
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
if (thread_info_flags & _TIF_ASYNC_TLB) {
|
||||
do_async_page_fault(regs);
|
||||
return 1;
|
||||
|
@ -564,7 +531,15 @@ void flush_thread(void)
|
|||
*/
|
||||
void exit_thread(void)
|
||||
{
|
||||
/* Nothing */
|
||||
#ifdef CONFIG_HARDWALL
|
||||
/*
|
||||
* Remove the task from the list of tasks that are associated
|
||||
* with any live hardwalls. (If the task that is exiting held
|
||||
* the last reference to a hardwall fd, it would already have
|
||||
* been released and deactivated at this point.)
|
||||
*/
|
||||
hardwall_deactivate_all(current);
|
||||
#endif
|
||||
}
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
|
@ -573,23 +548,24 @@ void show_regs(struct pt_regs *regs)
|
|||
int i;
|
||||
|
||||
pr_err("\n");
|
||||
show_regs_print_info(KERN_ERR);
|
||||
if (tsk != &corrupt_current)
|
||||
show_regs_print_info(KERN_ERR);
|
||||
#ifdef __tilegx__
|
||||
for (i = 0; i < 51; i += 3)
|
||||
for (i = 0; i < 17; i++)
|
||||
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
|
||||
i, regs->regs[i], i+1, regs->regs[i+1],
|
||||
i+2, regs->regs[i+2]);
|
||||
pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
|
||||
regs->regs[51], regs->regs[52], regs->tp);
|
||||
i, regs->regs[i], i+18, regs->regs[i+18],
|
||||
i+36, regs->regs[i+36]);
|
||||
pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
|
||||
regs->regs[17], regs->regs[35], regs->tp);
|
||||
pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
|
||||
#else
|
||||
for (i = 0; i < 52; i += 4)
|
||||
for (i = 0; i < 13; i++)
|
||||
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
|
||||
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
|
||||
i, regs->regs[i], i+1, regs->regs[i+1],
|
||||
i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
|
||||
pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
|
||||
regs->regs[52], regs->tp, regs->sp, regs->lr);
|
||||
i, regs->regs[i], i+14, regs->regs[i+14],
|
||||
i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
|
||||
pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
|
||||
regs->regs[13], regs->tp, regs->sp, regs->lr);
|
||||
#endif
|
||||
pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
|
||||
regs->pc, regs->ex1, regs->faultnum);
|
||||
|
|
|
@ -265,6 +265,21 @@ int do_syscall_trace_enter(struct pt_regs *regs)
|
|||
|
||||
void do_syscall_trace_exit(struct pt_regs *regs)
|
||||
{
|
||||
long errno;
|
||||
|
||||
/*
|
||||
* The standard tile calling convention returns the value (or negative
|
||||
* errno) in r0, and zero (or positive errno) in r1.
|
||||
* It saves a couple of cycles on the hot path to do this work in
|
||||
* registers only as we return, rather than updating the in-memory
|
||||
* struct ptregs.
|
||||
*/
|
||||
errno = (long) regs->regs[0];
|
||||
if (errno < 0 && errno > -4096)
|
||||
regs->regs[1] = -errno;
|
||||
else
|
||||
regs->regs[1] = 0;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, 0);
|
||||
|
||||
|
@ -272,7 +287,7 @@ void do_syscall_trace_exit(struct pt_regs *regs)
|
|||
trace_sys_exit(regs, regs->regs[0]);
|
||||
}
|
||||
|
||||
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
|
||||
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
|
||||
{
|
||||
struct siginfo info;
|
||||
|
||||
|
@ -288,5 +303,5 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
|
|||
/* Handle synthetic interrupt delivered only by the simulator. */
|
||||
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
|
||||
{
|
||||
send_sigtrap(current, regs, fault_num);
|
||||
send_sigtrap(current, regs);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
|
||||
void machine_halt(void)
|
||||
{
|
||||
warn_early_printk();
|
||||
arch_local_irq_disable_all();
|
||||
smp_send_stop();
|
||||
hv_halt();
|
||||
|
@ -35,7 +34,6 @@ void machine_halt(void)
|
|||
|
||||
void machine_power_off(void)
|
||||
{
|
||||
warn_early_printk();
|
||||
arch_local_irq_disable_all();
|
||||
smp_send_stop();
|
||||
hv_power_off();
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <asm/switch_to.h>
|
||||
|
||||
/*
|
||||
* See <asm/system.h>; called with prev and next task_struct pointers.
|
||||
* See <asm/switch_to.h>; called with prev and next task_struct pointers.
|
||||
* "prev" is returned in r0 for _switch_to and also for ret_from_fork.
|
||||
*
|
||||
* We want to save pc/sp in "prev", and get the new pc/sp from "next".
|
||||
|
@ -39,7 +39,7 @@
|
|||
*/
|
||||
|
||||
#if CALLEE_SAVED_REGS_COUNT != 24
|
||||
# error Mismatch between <asm/system.h> and kernel/entry.S
|
||||
# error Mismatch between <asm/switch_to.h> and kernel/entry.S
|
||||
#endif
|
||||
#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <asm/switch_to.h>
|
||||
|
||||
/*
|
||||
* See <asm/system.h>; called with prev and next task_struct pointers.
|
||||
* See <asm/switch_to.h>; called with prev and next task_struct pointers.
|
||||
* "prev" is returned in r0 for _switch_to and also for ret_from_fork.
|
||||
*
|
||||
* We want to save pc/sp in "prev", and get the new pc/sp from "next".
|
||||
|
@ -39,7 +39,7 @@
|
|||
*/
|
||||
|
||||
#if CALLEE_SAVED_REGS_COUNT != 24
|
||||
# error Mismatch between <asm/system.h> and kernel/entry.S
|
||||
# error Mismatch between <asm/switch_to.h> and kernel/entry.S
|
||||
#endif
|
||||
#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
|
||||
|
||||
|
|
|
@ -20,15 +20,6 @@
|
|||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
#define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA
|
||||
|
||||
#define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f))
|
||||
|
||||
#define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC)
|
||||
#define ___hv_halt ___hv_dispatch(HV_DISPATCH_HALT)
|
||||
#define ___hv_reexec ___hv_dispatch(HV_DISPATCH_REEXEC)
|
||||
#define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE)
|
||||
|
||||
#undef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
|
||||
STD_ENTRY(relocate_new_kernel)
|
||||
|
@ -43,8 +34,8 @@ STD_ENTRY(relocate_new_kernel)
|
|||
addi sp, sp, -8
|
||||
/* we now have a stack (whether we need one or not) */
|
||||
|
||||
moveli r40, lo16(___hv_console_putc)
|
||||
auli r40, r40, ha16(___hv_console_putc)
|
||||
moveli r40, lo16(hv_console_putc)
|
||||
auli r40, r40, ha16(hv_console_putc)
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'r'
|
||||
|
@ -86,7 +77,6 @@ STD_ENTRY(relocate_new_kernel)
|
|||
move r30, sp
|
||||
addi sp, sp, -8
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* On TILEPro, we need to flush all tiles' caches, since we may
|
||||
* have been doing hash-for-home caching there. Note that we
|
||||
|
@ -114,15 +104,14 @@ STD_ENTRY(relocate_new_kernel)
|
|||
}
|
||||
{
|
||||
move r8, zero /* asids */
|
||||
moveli r20, lo16(___hv_flush_remote)
|
||||
moveli r20, lo16(hv_flush_remote)
|
||||
}
|
||||
{
|
||||
move r9, zero /* asidcount */
|
||||
auli r20, r20, ha16(___hv_flush_remote)
|
||||
auli r20, r20, ha16(hv_flush_remote)
|
||||
}
|
||||
|
||||
jalr r20
|
||||
#endif
|
||||
|
||||
/* r33 is destination pointer, default to zero */
|
||||
|
||||
|
@ -175,8 +164,8 @@ STD_ENTRY(relocate_new_kernel)
|
|||
move r0, r32
|
||||
moveli r1, 0 /* arg to hv_reexec is 64 bits */
|
||||
|
||||
moveli r41, lo16(___hv_reexec)
|
||||
auli r41, r41, ha16(___hv_reexec)
|
||||
moveli r41, lo16(hv_reexec)
|
||||
auli r41, r41, ha16(hv_reexec)
|
||||
|
||||
jalr r41
|
||||
|
||||
|
@ -267,8 +256,8 @@ STD_ENTRY(relocate_new_kernel)
|
|||
moveli r0, '\n'
|
||||
jalr r40
|
||||
.Lhalt:
|
||||
moveli r41, lo16(___hv_halt)
|
||||
auli r41, r41, ha16(___hv_halt)
|
||||
moveli r41, lo16(hv_halt)
|
||||
auli r41, r41, ha16(hv_halt)
|
||||
|
||||
jalr r41
|
||||
STD_ENDPROC(relocate_new_kernel)
|
||||
|
|
|
@ -34,11 +34,11 @@ STD_ENTRY(relocate_new_kernel)
|
|||
addi sp, sp, -8
|
||||
/* we now have a stack (whether we need one or not) */
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r40, hw2_last(hv_console_putc)
|
||||
shl16insli r40, r40, hw1(hv_console_putc)
|
||||
shl16insli r40, r40, hw0(hv_console_putc)
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
|
||||
|
@ -78,7 +78,6 @@ STD_ENTRY(relocate_new_kernel)
|
|||
move r30, sp
|
||||
addi sp, sp, -16
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* On TILE-GX, we need to flush all tiles' caches, since we may
|
||||
* have been doing hash-for-home caching there. Note that we
|
||||
|
@ -116,7 +115,6 @@ STD_ENTRY(relocate_new_kernel)
|
|||
shl16insli r20, r20, hw0(hv_flush_remote)
|
||||
|
||||
jalr r20
|
||||
#endif
|
||||
|
||||
/* r33 is destination pointer, default to zero */
|
||||
|
||||
|
@ -176,10 +174,12 @@ STD_ENTRY(relocate_new_kernel)
|
|||
|
||||
/* we should not get here */
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, '?'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
j .Lhalt
|
||||
|
||||
|
@ -237,7 +237,9 @@ STD_ENTRY(relocate_new_kernel)
|
|||
j .Lloop
|
||||
|
||||
|
||||
.Lerr: moveli r0, 'e'
|
||||
.Lerr:
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'e'
|
||||
jalr r40
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
|
@ -245,6 +247,7 @@ STD_ENTRY(relocate_new_kernel)
|
|||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
#endif
|
||||
.Lhalt:
|
||||
moveli r41, hw2_last(hv_halt)
|
||||
shl16insli r41, r41, hw1(hv_halt)
|
||||
|
|
|
@ -154,6 +154,65 @@ static int __init setup_maxnodemem(char *str)
|
|||
}
|
||||
early_param("maxnodemem", setup_maxnodemem);
|
||||
|
||||
struct memmap_entry {
|
||||
u64 addr; /* start of memory segment */
|
||||
u64 size; /* size of memory segment */
|
||||
};
|
||||
static struct memmap_entry memmap_map[64];
|
||||
static int memmap_nr;
|
||||
|
||||
static void add_memmap_region(u64 addr, u64 size)
|
||||
{
|
||||
if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
|
||||
pr_err("Ooops! Too many entries in the memory map!\n");
|
||||
return;
|
||||
}
|
||||
memmap_map[memmap_nr].addr = addr;
|
||||
memmap_map[memmap_nr].size = size;
|
||||
memmap_nr++;
|
||||
}
|
||||
|
||||
static int __init setup_memmap(char *p)
|
||||
{
|
||||
char *oldp;
|
||||
u64 start_at, mem_size;
|
||||
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strncmp(p, "exactmap", 8)) {
|
||||
pr_err("\"memmap=exactmap\" not valid on tile\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
oldp = p;
|
||||
mem_size = memparse(p, &p);
|
||||
if (p == oldp)
|
||||
return -EINVAL;
|
||||
|
||||
if (*p == '@') {
|
||||
pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
|
||||
} else if (*p == '#') {
|
||||
pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
|
||||
} else if (*p == '$') {
|
||||
start_at = memparse(p+1, &p);
|
||||
add_memmap_region(start_at, mem_size);
|
||||
} else {
|
||||
if (mem_size == 0)
|
||||
return -EINVAL;
|
||||
maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
|
||||
(HPAGE_SHIFT - PAGE_SHIFT);
|
||||
}
|
||||
return *p == '\0' ? 0 : -EINVAL;
|
||||
}
|
||||
early_param("memmap", setup_memmap);
|
||||
|
||||
static int __init setup_mem(char *str)
|
||||
{
|
||||
return setup_maxmem(str);
|
||||
}
|
||||
early_param("mem", setup_mem); /* compatibility with x86 */
|
||||
|
||||
static int __init setup_isolnodes(char *str)
|
||||
{
|
||||
char buf[MAX_NUMNODES * 5];
|
||||
|
@ -209,7 +268,7 @@ early_param("vmalloc", parse_vmalloc);
|
|||
/*
|
||||
* Determine for each controller where its lowmem is mapped and how much of
|
||||
* it is mapped there. On controller zero, the first few megabytes are
|
||||
* already mapped in as code at MEM_SV_INTRPT, so in principle we could
|
||||
* already mapped in as code at MEM_SV_START, so in principle we could
|
||||
* start our data mappings higher up, but for now we don't bother, to avoid
|
||||
* additional confusion.
|
||||
*
|
||||
|
@ -614,11 +673,12 @@ static void __init setup_bootmem_allocator_node(int i)
|
|||
/*
|
||||
* Throw away any memory aliased by the PCI region.
|
||||
*/
|
||||
if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
|
||||
reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
|
||||
PFN_PHYS(pci_reserve_end_pfn -
|
||||
pci_reserve_start_pfn),
|
||||
if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
|
||||
start = max(pci_reserve_start_pfn, start);
|
||||
end = min(pci_reserve_end_pfn, end);
|
||||
reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
|
||||
BOOTMEM_EXCLUSIVE);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -628,6 +688,31 @@ static void __init setup_bootmem_allocator(void)
|
|||
for (i = 0; i < MAX_NUMNODES; ++i)
|
||||
setup_bootmem_allocator_node(i);
|
||||
|
||||
/* Reserve any memory excluded by "memmap" arguments. */
|
||||
for (i = 0; i < memmap_nr; ++i) {
|
||||
struct memmap_entry *m = &memmap_map[i];
|
||||
reserve_bootmem(m->addr, m->size, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start) {
|
||||
/* Make sure the initrd memory region is not modified. */
|
||||
if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
|
||||
BOOTMEM_EXCLUSIVE)) {
|
||||
pr_crit("The initrd memory region has been polluted. Disabling it.\n");
|
||||
initrd_start = 0;
|
||||
initrd_end = 0;
|
||||
} else {
|
||||
/*
|
||||
* Translate initrd_start & initrd_end from PA to VA for
|
||||
* future access.
|
||||
*/
|
||||
initrd_start += PAGE_OFFSET;
|
||||
initrd_end += PAGE_OFFSET;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
if (crashk_res.start != crashk_res.end)
|
||||
reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
|
||||
|
@ -961,9 +1046,6 @@ void setup_cpu(int boot)
|
|||
arch_local_irq_unmask(INT_DMATLB_MISS);
|
||||
arch_local_irq_unmask(INT_DMATLB_ACCESS);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
arch_local_irq_unmask(INT_SNITLB_MISS);
|
||||
#endif
|
||||
#ifdef __tilegx__
|
||||
arch_local_irq_unmask(INT_SINGLE_STEP_K);
|
||||
#endif
|
||||
|
@ -978,10 +1060,6 @@ void setup_cpu(int boot)
|
|||
/* Static network is not restricted. */
|
||||
__insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
__insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set the MPL for interrupt control 0 & 1 to the corresponding
|
||||
|
@ -1029,6 +1107,10 @@ static void __init load_hv_initrd(void)
|
|||
int fd, rc;
|
||||
void *initrd;
|
||||
|
||||
/* If initrd has already been set, skip initramfs file in hvfs. */
|
||||
if (initrd_start)
|
||||
return;
|
||||
|
||||
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
|
||||
if (fd == HV_ENOENT) {
|
||||
if (set_initramfs_file) {
|
||||
|
@ -1067,6 +1149,25 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
|
|||
free_bootmem(__pa(begin), end - begin);
|
||||
}
|
||||
|
||||
static int __init setup_initrd(char *str)
|
||||
{
|
||||
char *endp;
|
||||
unsigned long initrd_size;
|
||||
|
||||
initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
|
||||
if (initrd_size == 0 || *endp != '@')
|
||||
return -EINVAL;
|
||||
|
||||
initrd_start = simple_strtoul(endp+1, &endp, 0);
|
||||
if (initrd_start == 0)
|
||||
return -EINVAL;
|
||||
|
||||
initrd_end = initrd_start + initrd_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("initrd", setup_initrd);
|
||||
|
||||
#else
|
||||
static inline void load_hv_initrd(void) {}
|
||||
#endif /* CONFIG_BLK_DEV_INITRD */
|
||||
|
@ -1134,7 +1235,7 @@ static void __init validate_va(void)
|
|||
#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
|
||||
/*
|
||||
* Similarly, make sure we're only using allowed VAs.
|
||||
* We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
|
||||
* We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
|
||||
* and 0 .. KERNEL_HIGH_VADDR.
|
||||
* In addition, make sure we CAN'T use the end of memory, since
|
||||
* we use the last chunk of each pgd for the pgd_list.
|
||||
|
@ -1149,7 +1250,7 @@ static void __init validate_va(void)
|
|||
if (range.size == 0)
|
||||
break;
|
||||
if (range.start <= MEM_USER_INTRPT &&
|
||||
range.start + range.size >= MEM_HV_INTRPT)
|
||||
range.start + range.size >= MEM_HV_START)
|
||||
user_kernel_ok = 1;
|
||||
if (range.start == 0)
|
||||
max_va = range.size;
|
||||
|
@ -1183,7 +1284,6 @@ static void __init validate_va(void)
|
|||
struct cpumask __write_once cpu_lotar_map;
|
||||
EXPORT_SYMBOL(cpu_lotar_map);
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* hash_for_home_map lists all the tiles that hash-for-home data
|
||||
* will be cached on. Note that this may includes tiles that are not
|
||||
|
@ -1193,7 +1293,6 @@ EXPORT_SYMBOL(cpu_lotar_map);
|
|||
*/
|
||||
struct cpumask hash_for_home_map;
|
||||
EXPORT_SYMBOL(hash_for_home_map);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cpu_cacheable_map lists all the cpus whose caches the hypervisor can
|
||||
|
@ -1286,7 +1385,6 @@ static void __init setup_cpu_maps(void)
|
|||
cpu_lotar_map = *cpu_possible_mask;
|
||||
}
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/* Retrieve set of CPUs used for hash-for-home caching */
|
||||
rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
|
||||
(HV_VirtAddr) hash_for_home_map.bits,
|
||||
|
@ -1294,9 +1392,6 @@ static void __init setup_cpu_maps(void)
|
|||
if (rc < 0)
|
||||
early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
|
||||
cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
|
||||
#else
|
||||
cpu_cacheable_map = *cpu_possible_mask;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1492,7 +1587,7 @@ void __init setup_per_cpu_areas(void)
|
|||
|
||||
/* Update the vmalloc mapping and page home. */
|
||||
unsigned long addr = (unsigned long)ptr + i;
|
||||
pte_t *ptep = virt_to_pte(NULL, addr);
|
||||
pte_t *ptep = virt_to_kpte(addr);
|
||||
pte_t pte = *ptep;
|
||||
BUG_ON(pfn != pte_pfn(pte));
|
||||
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
|
||||
|
@ -1501,12 +1596,12 @@ void __init setup_per_cpu_areas(void)
|
|||
|
||||
/* Update the lowmem mapping for consistency. */
|
||||
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
|
||||
ptep = virt_to_pte(NULL, lowmem_va);
|
||||
ptep = virt_to_kpte(lowmem_va);
|
||||
if (pte_huge(*ptep)) {
|
||||
printk(KERN_DEBUG "early shatter of huge page"
|
||||
" at %#lx\n", lowmem_va);
|
||||
shatter_pmd((pmd_t *)ptep);
|
||||
ptep = virt_to_pte(NULL, lowmem_va);
|
||||
ptep = virt_to_kpte(lowmem_va);
|
||||
BUG_ON(pte_huge(*ptep));
|
||||
}
|
||||
BUG_ON(pfn != pte_pfn(*ptep));
|
||||
|
@ -1548,6 +1643,8 @@ insert_non_bus_resource(void)
|
|||
{
|
||||
struct resource *res =
|
||||
kzalloc(sizeof(struct resource), GFP_ATOMIC);
|
||||
if (!res)
|
||||
return NULL;
|
||||
res->name = "Non-Bus Physical Address Space";
|
||||
res->start = (1ULL << 32);
|
||||
res->end = -1LL;
|
||||
|
@ -1561,11 +1658,13 @@ insert_non_bus_resource(void)
|
|||
#endif
|
||||
|
||||
static struct resource* __init
|
||||
insert_ram_resource(u64 start_pfn, u64 end_pfn)
|
||||
insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
|
||||
{
|
||||
struct resource *res =
|
||||
kzalloc(sizeof(struct resource), GFP_ATOMIC);
|
||||
res->name = "System RAM";
|
||||
if (!res)
|
||||
return NULL;
|
||||
res->name = reserved ? "Reserved" : "System RAM";
|
||||
res->start = start_pfn << PAGE_SHIFT;
|
||||
res->end = (end_pfn << PAGE_SHIFT) - 1;
|
||||
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
||||
|
@ -1585,7 +1684,7 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn)
|
|||
static int __init request_standard_resources(void)
|
||||
{
|
||||
int i;
|
||||
enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
|
||||
enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
|
||||
|
||||
#if defined(CONFIG_PCI) && !defined(__tilegx__)
|
||||
insert_non_bus_resource();
|
||||
|
@ -1600,11 +1699,11 @@ static int __init request_standard_resources(void)
|
|||
end_pfn > pci_reserve_start_pfn) {
|
||||
if (end_pfn > pci_reserve_end_pfn)
|
||||
insert_ram_resource(pci_reserve_end_pfn,
|
||||
end_pfn);
|
||||
end_pfn, 0);
|
||||
end_pfn = pci_reserve_start_pfn;
|
||||
}
|
||||
#endif
|
||||
insert_ram_resource(start_pfn, end_pfn);
|
||||
insert_ram_resource(start_pfn, end_pfn, 0);
|
||||
}
|
||||
|
||||
code_resource.start = __pa(_text - CODE_DELTA);
|
||||
|
@ -1615,6 +1714,13 @@ static int __init request_standard_resources(void)
|
|||
insert_resource(&iomem_resource, &code_resource);
|
||||
insert_resource(&iomem_resource, &data_resource);
|
||||
|
||||
/* Mark any "memmap" regions busy for the resource manager. */
|
||||
for (i = 0; i < memmap_nr; ++i) {
|
||||
struct memmap_entry *m = &memmap_map[i];
|
||||
insert_ram_resource(PFN_DOWN(m->addr),
|
||||
PFN_UP(m->addr + m->size - 1), 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
insert_resource(&iomem_resource, &crashk_res);
|
||||
#endif
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче