ARC updates for 4.6-rc1
- Big Endian io accessors fix [Lada] - Spellos fixes [Adam] - Fix for DW GMAC breakage [Alexey] - Making DMA API 64-bit ready - Shutting up -Wmaybe-uninitialized noise for ARC - Other minor fixes here and there, comments update -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJW738lAAoJEGnX8d3iisJeProP/icm32aIHY0QXmJCBXCmQfLa HHzfBeJ2KsG8pIRgrvraK3FJkmFr+WxZ7x6b5hPNYeHIT3c179/GZ3DlssM1md0u sa50o5jmwd/J4o5jCKpUB/hx7wiAjpC2CYb6qIg39A2Nq5JhOFJV30XMbCscXkLI ae/o8oATi1502cf1OQ2EqNWKfME4ogG1KsEUNrSzcd+1P8LZxsnEVBmXuPHVdHLw kTHVgmCELsEchaV/QY9pY+uHkm9Y4vV18v0vqbklwED+cHkjmXQ2UysP3/J8KXKN PVSqmtUJIS2vxDGK5mWvz6jkWmU8gRXoT14ZqdmMARmhVhp3+JTm2fQ53NUwZ+b2 JpPNGWVQRi86AaiUE8Fm+eWjC242CAm+lsBfx+mvqWpEvFGMlnRKw8oZiyeJhhIw 3M1yrulQG7QbTSuQrgQwfGqtrhl2nnq+X0uoMJXYHupNDQ42QK8wmJ9bT7cmutD0 K3Tmi84qoiSnN/HhWK/D9d60bLGvUY4RKiLjAcJz7lbMjtRhT/rpFFcFYCIhJyZs y//jOZK67o1ecDXBTaUcvT+edOrQVsmatn3w0p9VwATe8OiKHsLA/0UD34gwiECy o9g/i4tc2GfOLFoLv66czXTU9IuoKDh3HrTJgET7r1Re/+FKgJ+2+GX6AbiJzbhY 9jsAAI/ZpsS6qMhvSz3d =n0fk -----END PGP SIGNATURE----- Merge tag 'arc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC architecture updates from Vineet Gupta: - Big Endian io accessors fix [Lada] - Spellos fixes [Adam] - Fix for DW GMAC breakage [Alexey] - Making DMA API 64-bit ready - Shutting up -Wmaybe-uninitialized noise for ARC - Other minor fixes here and there, comments update * tag 'arc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (21 commits) ARCv2: ioremap: Support dynamic peripheral address space ARC: dma: reintroduce platform specific dma<->phys ARC: dma: ioremap: use phys_addr_t consistenctly in code paths ARC: dma: pass_phys() not sg_virt() to cache ops ARC: dma: non-coherent pages need V-P mapping if in HIGHMEM ARC: dma: Use struct page based page allocator helpers ARC: build: Turn off -Wmaybe-uninitialized for ARC gcc 4.8 ARC: [plat-axs10x] add Ethernet PHY description in .dts arc: use of_platform_default_populate() to populate default bus ARC: thp: unbork !CONFIG_TRANSPARENT_HUGEPAGE build arc: [plat-nsimosci*] use ezchip network driver ARCv2: LLSC: software backoff is NOT needed starting HS2.1c ARC: mm: Use virt_to_pfn() for addr >> PAGE_SHIFT pattern ARC: [plat-nsim] document ranges ARC: build: Better way to detect ISA compatible toolchain ARCv2: Allow enabling PAE40 w/o HIGHMEM ARC: [BE] readl()/writel() to work in Big Endian CPU configuration ARC: [*defconfig] No need to specify CONFIG_CROSS_COMPILE ARC: [BE] Select correct CROSS_COMPILE prefix ARC: bitops: Remove non relevant comments ...
This commit is contained in:
Коммит
d34687ab97
|
@ -391,7 +391,7 @@ config ARC_HAS_LLSC
|
|||
|
||||
config ARC_STAR_9000923308
|
||||
bool "Workaround for llock/scond livelock"
|
||||
default y
|
||||
default n
|
||||
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
|
||||
|
||||
config ARC_HAS_SWAPE
|
||||
|
@ -462,7 +462,6 @@ config ARC_HAS_PAE40
|
|||
bool "Support for the 40-bit Physical Address Extension"
|
||||
default n
|
||||
depends on ISA_ARCV2
|
||||
select HIGHMEM
|
||||
help
|
||||
Enable access to physical memory beyond 4G, only supported on
|
||||
ARC cores with 40 bit Physical Addressing support
|
||||
|
@ -473,6 +472,9 @@ config ARCH_PHYS_ADDR_T_64BIT
|
|||
config ARCH_DMA_ADDR_T_64BIT
|
||||
bool
|
||||
|
||||
config ARC_PLAT_NEEDS_PHYS_TO_DMA
|
||||
bool
|
||||
|
||||
config ARC_CURR_IN_REG
|
||||
bool "Dedicate Register r25 for current_task pointer"
|
||||
default y
|
||||
|
|
|
@ -9,7 +9,11 @@
|
|||
UTS_MACHINE := arc
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
CROSS_COMPILE := arc-linux-
|
||||
else
|
||||
CROSS_COMPILE := arceb-linux-
|
||||
endif
|
||||
endif
|
||||
|
||||
KBUILD_DEFCONFIG := nsim_700_defconfig
|
||||
|
@ -18,6 +22,20 @@ cflags-y += -fno-common -pipe -fno-builtin -D__linux__
|
|||
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
||||
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
|
||||
|
||||
is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
|
||||
|
||||
ifdef CONFIG_ISA_ARCOMPACT
|
||||
ifeq ($(is_700), 0)
|
||||
$(error Toolchain not configured for ARCompact builds)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ISA_ARCV2
|
||||
ifeq ($(is_700), 1)
|
||||
$(error Toolchain not configured for ARCv2 builds)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ARC_CURR_IN_REG
|
||||
# For a global register defintion, make sure it gets passed to every file
|
||||
# We had a customer reported bug where some code built in kernel was NOT using
|
||||
|
@ -58,7 +76,9 @@ endif
|
|||
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
# Generic build system uses -O2, we want -O3
|
||||
# Note: No need to add to cflags-y as that happens anyways
|
||||
ARCH_CFLAGS += -O3
|
||||
#
|
||||
# Disable the false maybe-uninitialized warings gcc spits out at -O3
|
||||
ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
|
||||
endif
|
||||
|
||||
# small data is default for elf32 tool-chain. If not usable, disable it
|
||||
|
|
|
@ -47,6 +47,14 @@
|
|||
clocks = <&apbclk>;
|
||||
clock-names = "stmmaceth";
|
||||
max-speed = <100>;
|
||||
mdio0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "snps,dwmac-mdio";
|
||||
phy1: ethernet-phy@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
ehci@0x40000 {
|
||||
|
|
|
@ -35,7 +35,8 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
/* only perip space at end of low mem accessible */
|
||||
/* only perip space at end of low mem accessible
|
||||
bus addr, parent bus addr, size */
|
||||
ranges = <0x80000000 0x0 0x80000000 0x80000000>;
|
||||
|
||||
core_intc: core-interrupt-controller {
|
||||
|
|
|
@ -65,10 +65,9 @@
|
|||
};
|
||||
|
||||
eth0: ethernet@f0003000 {
|
||||
compatible = "snps,oscilan";
|
||||
compatible = "ezchip,nps-mgt-enet";
|
||||
reg = <0xf0003000 0x44>;
|
||||
interrupts = <7>, <8>;
|
||||
interrupt-names = "rx", "tx";
|
||||
interrupts = <7>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -65,10 +65,9 @@
|
|||
};
|
||||
|
||||
eth0: ethernet@f0003000 {
|
||||
compatible = "snps,oscilan";
|
||||
compatible = "ezchip,nps-mgt-enet";
|
||||
reg = <0xf0003000 0x44>;
|
||||
interrupts = <25>, <26>;
|
||||
interrupt-names = "rx", "tx";
|
||||
interrupts = <25>;
|
||||
};
|
||||
|
||||
arcpct0: pct {
|
||||
|
|
|
@ -85,11 +85,10 @@
|
|||
};
|
||||
|
||||
eth0: ethernet@f0003000 {
|
||||
compatible = "snps,oscilan";
|
||||
compatible = "ezchip,nps-mgt-enet";
|
||||
reg = <0xf0003000 0x44>;
|
||||
interrupt-parent = <&idu_intc>;
|
||||
interrupts = <1 2>, <2 2>;
|
||||
interrupt-names = "rx", "tx";
|
||||
interrupts = <1 2>;
|
||||
};
|
||||
|
||||
arcpct0: pct {
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
|
@ -39,6 +38,7 @@ CONFIG_DEVTMPFS=y
|
|||
# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||||
# CONFIG_BLK_DEV is not set
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
|
||||
# CONFIG_INPUT_MOUSEDEV is not set
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_MOUSE_PS2_ALPS is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
|
@ -40,6 +39,7 @@ CONFIG_DEVTMPFS=y
|
|||
# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||||
# CONFIG_BLK_DEV is not set
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_MOUSE_PS2_ALPS is not set
|
||||
# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
|
@ -21,6 +20,7 @@ CONFIG_MODULES=y
|
|||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
# CONFIG_ARC_HAS_GFRC is not set
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
|
||||
CONFIG_PREEMPT=y
|
||||
# CONFIG_COMPACTION is not set
|
||||
|
@ -46,6 +46,7 @@ CONFIG_NETDEVICES=y
|
|||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_CADENCE is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
# CONFIG_NET_VENDOR_MICREL is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="tb10x"
|
||||
CONFIG_SYSVIPC=y
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
||||
|
|
|
@ -381,12 +381,6 @@ static inline int is_isa_arcompact(void)
|
|||
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
|
||||
#error "Toolchain not configured for ARCompact builds"
|
||||
#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
|
||||
#error "Toolchain not configured for ARCv2 builds"
|
||||
#endif
|
||||
|
||||
#endif /* __ASEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARC_ARCREGS_H */
|
||||
|
|
|
@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
|
|||
\
|
||||
m += nr >> 5; \
|
||||
\
|
||||
/* \
|
||||
* ARC ISA micro-optimization: \
|
||||
* \
|
||||
* Instructions dealing with bitpos only consider lower 5 bits \
|
||||
* e.g (x << 33) is handled like (x << 1) by ASL instruction \
|
||||
* (mem pointer still needs adjustment to point to next word) \
|
||||
* \
|
||||
* Hence the masking to clamp @nr arg can be elided in general. \
|
||||
* \
|
||||
* However if @nr is a constant (above assumed in a register), \
|
||||
* and greater than 31, gcc can optimize away (x << 33) to 0, \
|
||||
* as overflow, given the 32-bit ISA. Thus masking needs to be \
|
||||
* done for const @nr, but no code is generated due to gcc \
|
||||
* const prop. \
|
||||
*/ \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
|
|
|
@ -54,6 +54,7 @@ extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
|
|||
extern void read_decode_cache_bcr(void);
|
||||
|
||||
extern int ioc_exists;
|
||||
extern unsigned long perip_base;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -40,9 +40,9 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
|||
|
||||
void flush_dcache_page(struct page *page);
|
||||
|
||||
void dma_cache_wback_inv(unsigned long start, unsigned long sz);
|
||||
void dma_cache_inv(unsigned long start, unsigned long sz);
|
||||
void dma_cache_wback(unsigned long start, unsigned long sz);
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_wback(phys_addr_t start, unsigned long sz);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
|
|
@ -149,7 +149,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
* Since xchg() doesn't always do that, it would seem that following defintion
|
||||
* is incorrect. But here's the rationale:
|
||||
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
|
||||
* LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
|
||||
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
|
||||
* is natively "SMP safe", no serialization required).
|
||||
* UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
|
||||
* could clobber them. atomic_xchg() itself would be 1 insn, so it
|
||||
|
|
|
@ -11,6 +11,13 @@
|
|||
#ifndef ASM_ARC_DMA_MAPPING_H
|
||||
#define ASM_ARC_DMA_MAPPING_H
|
||||
|
||||
#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA
|
||||
#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle))
|
||||
#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr))
|
||||
#else
|
||||
#include <plat/dma.h>
|
||||
#endif
|
||||
|
||||
extern struct dma_map_ops arc_dma_ops;
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
|
|
|
@ -231,7 +231,7 @@
|
|||
/* free up r9 as scratchpad */
|
||||
PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
|
||||
|
||||
/* Which mode (user/kernel) was the system in when intr occured */
|
||||
/* Which mode (user/kernel) was the system in when intr occurred */
|
||||
lr r9, [status32_l\LVL\()]
|
||||
|
||||
SWITCH_TO_KERNEL_STK
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
||||
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags);
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
||||
{
|
||||
|
@ -138,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
|
|||
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
|
||||
|
||||
/*
|
||||
* Relaxed API for drivers which can handle any ordering themselves
|
||||
* Relaxed API for drivers which can handle barrier ordering themselves
|
||||
*
|
||||
* Also these are defined to perform little endian accesses.
|
||||
* To provide the typical device register semantics of fixed endian,
|
||||
* swap the byte order for Big Endian
|
||||
*
|
||||
* http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
|
||||
*/
|
||||
#define readb_relaxed(c) __raw_readb(c)
|
||||
#define readw_relaxed(c) __raw_readw(c)
|
||||
#define readl_relaxed(c) __raw_readl(c)
|
||||
#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
|
||||
__raw_readw(c)); __r; })
|
||||
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
|
||||
__raw_readl(c)); __r; })
|
||||
|
||||
#define writeb_relaxed(v,c) __raw_writeb(v,c)
|
||||
#define writew_relaxed(v,c) __raw_writew(v,c)
|
||||
#define writel_relaxed(v,c) __raw_writel(v,c)
|
||||
#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
|
||||
#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
|
||||
#include <uapi/asm/page.h>
|
||||
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
|
||||
|
@ -76,30 +75,26 @@ typedef unsigned long pgprot_t;
|
|||
|
||||
typedef pte_t * pgtable_t;
|
||||
|
||||
#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
|
||||
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
|
||||
#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE)
|
||||
|
||||
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
|
||||
|
||||
/*
|
||||
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
|
||||
*
|
||||
* These macros have historically been misnamed
|
||||
* virt here means link-address/program-address as embedded in object code.
|
||||
* So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
|
||||
* 128th page, and virt_to_page( ) will return the struct page corresp to it.
|
||||
* mem_map[ ] is an array of struct page for each page frame in the system
|
||||
*
|
||||
* Independent of where linux is linked at, link-addr = physical address
|
||||
* So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
|
||||
* would have been wrong in case kernel is not at 0x8zs
|
||||
* And for ARC, link-addr = physical address
|
||||
*/
|
||||
#define __pa(vaddr) ((unsigned long)vaddr)
|
||||
#define __va(paddr) ((void *)((unsigned long)(paddr)))
|
||||
|
||||
#define virt_to_page(kaddr) \
|
||||
(mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
|
||||
(mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
|
||||
|
||||
/* Default Permissions for stack/heaps pages (Non Executable) */
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
* - Utilise some unused free bits to confine PTE flags to 12 bits
|
||||
* This is a must for 4k pg-sz
|
||||
*
|
||||
* vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
|
||||
* vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
|
||||
* -TLB Locking never really existed, except for initial specs
|
||||
* -SILENT_xxx not needed for our port
|
||||
* -Per my request, MMU V3 changes the layout of some of the bits
|
||||
|
@ -278,15 +278,14 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|||
#define pmd_present(x) (pmd_val(x))
|
||||
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
|
||||
|
||||
#define pte_page(x) (mem_map + \
|
||||
(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
|
||||
PAGE_SHIFT)))
|
||||
#define pte_page(pte) \
|
||||
(mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
|
||||
|
||||
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
#define pte_pfn(pte) virt_to_pfn(pte_val(pte))
|
||||
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
|
||||
pgprot_val(prot)))
|
||||
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
|
||||
|
||||
/*
|
||||
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
|
||||
|
|
|
@ -17,8 +17,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
|
|||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
|
||||
|
@ -26,7 +28,9 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
|
||||
#define flush_tlb_all() local_flush_tlb_all()
|
||||
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
|
||||
#endif
|
||||
#else
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
|
@ -34,7 +38,8 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
|
|||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif
|
||||
|
|
|
@ -91,11 +91,9 @@ static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
|
|||
|
||||
static void read_arc_build_cfg_regs(void)
|
||||
{
|
||||
struct bcr_perip uncached_space;
|
||||
struct bcr_timer timer;
|
||||
struct bcr_generic bcr;
|
||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||
unsigned long perip_space;
|
||||
FIX_PTR(cpu);
|
||||
|
||||
READ_BCR(AUX_IDENTITY, cpu->core);
|
||||
|
@ -108,14 +106,6 @@ static void read_arc_build_cfg_regs(void)
|
|||
|
||||
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
|
||||
|
||||
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
|
||||
if (uncached_space.ver < 3)
|
||||
perip_space = uncached_space.start << 24;
|
||||
else
|
||||
perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
|
||||
|
||||
BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
|
||||
|
||||
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
|
||||
|
||||
cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
|
||||
|
@ -288,8 +278,8 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
FIX_PTR(cpu);
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"Vector Table\t: %#x\nUncached Base\t: %#x\n",
|
||||
cpu->vec_base, ARC_UNCACHED_ADDR_SPACE);
|
||||
"Vector Table\t: %#x\nUncached Base\t: %#lx\n",
|
||||
cpu->vec_base, perip_base);
|
||||
|
||||
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
|
||||
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
|
||||
|
@ -357,11 +347,6 @@ static void arc_chk_core_config(void)
|
|||
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
|
||||
else if (!cpu->extn.fpu_dp && fpu_enabled)
|
||||
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
|
||||
|
||||
if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
|
||||
IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
|
||||
!IS_ENABLED(CONFIG_ARC_STAR_9000923308))
|
||||
panic("llock/scond livelock workaround missing\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -464,7 +449,7 @@ static int __init customize_machine(void)
|
|||
* Traverses flattened DeviceTree - registering platform devices
|
||||
* (if any) complete with their resources
|
||||
*/
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
of_platform_default_populate(NULL, NULL, NULL);
|
||||
|
||||
if (machine_desc->init_machine)
|
||||
machine_desc->init_machine();
|
||||
|
|
|
@ -232,7 +232,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
|
|||
}
|
||||
|
||||
/* Another API expected by schedular, shows up in "ps" as Wait Channel
|
||||
* Ofcourse just returning schedule( ) would be pointless so unwind until
|
||||
* Of course just returning schedule( ) would be pointless so unwind until
|
||||
* the function is not in schedular code
|
||||
*/
|
||||
unsigned int get_wchan(struct task_struct *tsk)
|
||||
|
|
|
@ -55,8 +55,8 @@
|
|||
#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
|
||||
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
|
||||
|
||||
#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
|
||||
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
|
||||
#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
|
||||
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
|
||||
|
||||
#define ARC_TIMER_MAX 0xFFFFFFFF
|
||||
|
||||
|
|
|
@ -24,13 +24,14 @@
|
|||
static int l2_line_sz;
|
||||
int ioc_exists;
|
||||
volatile int slc_enable = 1, ioc_enable = 1;
|
||||
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
|
||||
|
||||
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
|
||||
unsigned long sz, const int cacheop);
|
||||
|
||||
void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
|
||||
void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
|
||||
void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
|
||||
void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
|
||||
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
|
||||
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
|
||||
|
||||
char *arc_cache_mumbojumbo(int c, char *buf, int len)
|
||||
{
|
||||
|
@ -75,6 +76,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
|
|||
static void read_decode_cache_bcr_arcv2(int cpu)
|
||||
{
|
||||
struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
|
||||
struct bcr_generic uncached_space;
|
||||
struct bcr_generic sbcr;
|
||||
|
||||
struct bcr_slc_cfg {
|
||||
|
@ -104,6 +106,11 @@ static void read_decode_cache_bcr_arcv2(int cpu)
|
|||
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
|
||||
if (cbcr.c && ioc_enable)
|
||||
ioc_exists = 1;
|
||||
|
||||
/* Legacy Data Uncached BCR is deprecated from v3 onwards */
|
||||
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
|
||||
if (uncached_space.ver > 2)
|
||||
perip_base = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
|
||||
}
|
||||
|
||||
void read_decode_cache_bcr(void)
|
||||
|
@ -633,38 +640,38 @@ EXPORT_SYMBOL(flush_dcache_page);
|
|||
* DMA ops for systems with L1 cache only
|
||||
* Make memory coherent with L1 cache by flushing/invalidating L1 lines
|
||||
*/
|
||||
static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH);
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA ops for systems with both L1 and L2 caches, but without IOC
|
||||
* Both L1 and L2 lines need to be explicity flushed/invalidated
|
||||
* Both L1 and L2 lines need to be explicitly flushed/invalidated
|
||||
*/
|
||||
static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
|
||||
slc_op(start, sz, OP_FLUSH_N_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_INV);
|
||||
slc_op(start, sz, OP_INV);
|
||||
}
|
||||
|
||||
static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
|
||||
static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dc_line_op_k(start, sz, OP_FLUSH);
|
||||
slc_op(start, sz, OP_FLUSH);
|
||||
|
@ -675,26 +682,26 @@ static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
|
|||
* IOC hardware snoops all DMA traffic keeping the caches consistent with
|
||||
* memory - eliding need for any explicit cache maintenance of DMA buffers
|
||||
*/
|
||||
static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
|
||||
static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
|
||||
static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
|
||||
static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
|
||||
static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
|
||||
static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
|
||||
|
||||
/*
|
||||
* Exported DMA API
|
||||
*/
|
||||
void dma_cache_wback_inv(unsigned long start, unsigned long sz)
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dma_cache_wback_inv(start, sz);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_wback_inv);
|
||||
|
||||
void dma_cache_inv(unsigned long start, unsigned long sz)
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dma_cache_inv(start, sz);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_inv);
|
||||
|
||||
void dma_cache_wback(unsigned long start, unsigned long sz)
|
||||
void dma_cache_wback(phys_addr_t start, unsigned long sz)
|
||||
{
|
||||
__dma_cache_wback(start, sz);
|
||||
}
|
||||
|
|
|
@ -24,22 +24,22 @@
|
|||
static void *arc_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *paddr, *kvaddr;
|
||||
unsigned long order = get_order(size);
|
||||
struct page *page;
|
||||
phys_addr_t paddr;
|
||||
void *kvaddr;
|
||||
int need_coh = 1, need_kvaddr = 0;
|
||||
|
||||
/* This is linear addr (0x8000_0000 based) */
|
||||
paddr = alloc_pages_exact(size, gfp);
|
||||
if (!paddr)
|
||||
page = alloc_pages(gfp, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
/* This is bus address, platform dependent */
|
||||
*dma_handle = (dma_addr_t)paddr;
|
||||
|
||||
/*
|
||||
* IOC relies on all data (even coherent DMA data) being in cache
|
||||
* Thus allocate normal cached memory
|
||||
*
|
||||
* The gains with IOC are two pronged:
|
||||
* -For streaming data, elides needs for cache maintenance, saving
|
||||
* -For streaming data, elides need for cache maintenance, saving
|
||||
* cycles in flush code, and bus bandwidth as all the lines of a
|
||||
* buffer need to be flushed out to memory
|
||||
* -For coherent data, Read/Write to buffers terminate early in cache
|
||||
|
@ -47,12 +47,31 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
|||
*/
|
||||
if ((is_isa_arcv2() && ioc_exists) ||
|
||||
dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
|
||||
return paddr;
|
||||
need_coh = 0;
|
||||
|
||||
/*
|
||||
* - A coherent buffer needs MMU mapping to enforce non-cachability
|
||||
* - A highmem page needs a virtual handle (hence MMU mapping)
|
||||
* independent of cachability
|
||||
*/
|
||||
if (PageHighMem(page) || need_coh)
|
||||
need_kvaddr = 1;
|
||||
|
||||
/* This is linear addr (0x8000_0000 based) */
|
||||
paddr = page_to_phys(page);
|
||||
|
||||
*dma_handle = plat_phys_to_dma(dev, paddr);
|
||||
|
||||
/* This is kernel Virtual address (0x7000_0000 based) */
|
||||
kvaddr = ioremap_nocache((unsigned long)paddr, size);
|
||||
if (kvaddr == NULL)
|
||||
return NULL;
|
||||
if (need_kvaddr) {
|
||||
kvaddr = ioremap_nocache(paddr, size);
|
||||
if (kvaddr == NULL) {
|
||||
__free_pages(page, order);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
kvaddr = (void *)(u32)paddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Evict any existing L1 and/or L2 lines for the backing page
|
||||
|
@ -64,7 +83,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
|||
* Currently flush_cache_vmap nukes the L1 cache completely which
|
||||
* will be optimized as a separate commit
|
||||
*/
|
||||
dma_cache_wback_inv((unsigned long)paddr, size);
|
||||
if (need_coh)
|
||||
dma_cache_wback_inv(paddr, size);
|
||||
|
||||
return kvaddr;
|
||||
}
|
||||
|
@ -72,11 +92,16 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
|||
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
|
||||
!(is_isa_arcv2() && ioc_exists))
|
||||
struct page *page = virt_to_page(dma_handle);
|
||||
int is_non_coh = 1;
|
||||
|
||||
is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
|
||||
(is_isa_arcv2() && ioc_exists);
|
||||
|
||||
if (PageHighMem(page) || !is_non_coh)
|
||||
iounmap((void __force __iomem *)vaddr);
|
||||
|
||||
free_pages_exact((void *)dma_handle, size);
|
||||
__free_pages(page, get_order(size));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -84,7 +109,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||
* CPU accesses page via normal paddr, thus needs to explicitly made
|
||||
* consistent before each use
|
||||
*/
|
||||
static void _dma_cache_sync(unsigned long paddr, size_t size,
|
||||
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
|
@ -98,7 +123,7 @@ static void _dma_cache_sync(unsigned long paddr, size_t size,
|
|||
dma_cache_wback_inv(paddr, size);
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
|
||||
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,9 +131,9 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
|||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long paddr = page_to_phys(page) + offset;
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
_dma_cache_sync(paddr, size, dir);
|
||||
return (dma_addr_t)paddr;
|
||||
return plat_phys_to_dma(dev, paddr);
|
||||
}
|
||||
|
||||
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
|
@ -127,13 +152,13 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
static void arc_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
|
||||
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
|
||||
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_sg_for_cpu(struct device *dev,
|
||||
|
@ -144,7 +169,7 @@ static void arc_dma_sync_sg_for_cpu(struct device *dev,
|
|||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i)
|
||||
_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
|
||||
_dma_cache_sync(sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_sg_for_device(struct device *dev,
|
||||
|
@ -155,7 +180,7 @@ static void arc_dma_sync_sg_for_device(struct device *dev,
|
|||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i)
|
||||
_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
|
||||
_dma_cache_sync(sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static int arc_dma_supported(struct device *dev, u64 dma_mask)
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
/*
|
||||
* HIGHMEM API:
|
||||
*
|
||||
* kmap() API provides sleep semantics hence refered to as "permanent maps"
|
||||
* kmap() API provides sleep semantics hence referred to as "permanent maps"
|
||||
* It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
|
||||
* for book-keeping
|
||||
*
|
||||
|
|
|
@ -14,18 +14,33 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
void __iomem *ioremap(unsigned long paddr, unsigned long size)
|
||||
static inline bool arc_uncached_addr_space(phys_addr_t paddr)
|
||||
{
|
||||
unsigned long end;
|
||||
if (is_isa_arcompact()) {
|
||||
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
|
||||
return true;
|
||||
} else if (paddr >= perip_base && paddr <= 0xFFFFFFFF) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
|
||||
{
|
||||
phys_addr_t end;
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
end = paddr + size - 1;
|
||||
if (!size || (end < paddr))
|
||||
return NULL;
|
||||
|
||||
/* If the region is h/w uncached, avoid MMU mappings */
|
||||
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
|
||||
return (void __iomem *)paddr;
|
||||
/*
|
||||
* If the region is h/w uncached, MMU mapping can be elided as optim
|
||||
* The cast to u32 is fine as this region can only be inside 4GB
|
||||
*/
|
||||
if (arc_uncached_addr_space(paddr))
|
||||
return (void __iomem *)(u32)paddr;
|
||||
|
||||
return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
|
||||
}
|
||||
|
@ -41,9 +56,9 @@ EXPORT_SYMBOL(ioremap);
|
|||
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
void __iomem *vaddr;
|
||||
unsigned long vaddr;
|
||||
struct vm_struct *area;
|
||||
unsigned long off, end;
|
||||
phys_addr_t off, end;
|
||||
pgprot_t prot = __pgprot(flags);
|
||||
|
||||
/* Don't allow wraparound, zero size */
|
||||
|
@ -70,9 +85,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
|||
if (!area)
|
||||
return NULL;
|
||||
area->phys_addr = paddr;
|
||||
vaddr = (void __iomem *)area->addr;
|
||||
if (ioremap_page_range((unsigned long)vaddr,
|
||||
(unsigned long)vaddr + size, paddr, prot)) {
|
||||
vaddr = (unsigned long)area->addr;
|
||||
if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
|
||||
vunmap((void __force *)vaddr);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -83,7 +97,8 @@ EXPORT_SYMBOL(ioremap_prot);
|
|||
|
||||
void iounmap(const void __iomem *addr)
|
||||
{
|
||||
if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
|
||||
/* weird double cast to handle phys_addr_t > 32 bits */
|
||||
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
|
||||
return;
|
||||
|
||||
vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
* in interrupt-safe region.
|
||||
*
|
||||
* Vineetg: April 23rd Bug #93131
|
||||
* Problem: tlb_flush_kernel_range() doesnt do anything if the range to
|
||||
* Problem: tlb_flush_kernel_range() doesn't do anything if the range to
|
||||
* flush is more than the size of TLB itself.
|
||||
*
|
||||
* Rahul Trivedi : Codito Technologies 2004
|
||||
|
@ -167,7 +167,7 @@ static void utlb_invalidate(void)
|
|||
/* MMU v2 introduced the uTLB Flush command.
|
||||
* There was however an obscure hardware bug, where uTLB flush would
|
||||
* fail when a prior probe for J-TLB (both totally unrelated) would
|
||||
* return lkup err - because the entry didnt exist in MMU.
|
||||
* return lkup err - because the entry didn't exist in MMU.
|
||||
* The Workround was to set Index reg with some valid value, prior to
|
||||
* flush. This was fixed in MMU v3 hence not needed any more
|
||||
*/
|
||||
|
@ -210,7 +210,7 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
|
|||
|
||||
/*
|
||||
* Commit the Entry to MMU
|
||||
* It doesnt sound safe to use the TLBWriteNI cmd here
|
||||
* It doesn't sound safe to use the TLBWriteNI cmd here
|
||||
* which doesn't flush uTLBs. I'd rather be safe than sorry.
|
||||
*/
|
||||
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
|
||||
|
@ -636,7 +636,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
|
|||
* support.
|
||||
*
|
||||
* Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
|
||||
* new bit "SZ" in TLB page desciptor to distinguish between them.
|
||||
* new bit "SZ" in TLB page descriptor to distinguish between them.
|
||||
* Super Page size is configurable in hardware (4K to 16M), but fixed once
|
||||
* RTL builds.
|
||||
*
|
||||
|
|
Загрузка…
Ссылка в новой задаче