Merge branch 'for-linus' into for-next
This commit is contained in:
Коммит
3fb42daaf1
|
@ -587,7 +587,7 @@ used to control it:
|
|||
|
||||
modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
|
||||
preaction=<preaction type> preop=<preop type> start_now=x
|
||||
nowayout=x ifnum_to_use=n
|
||||
nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
|
||||
|
||||
ifnum_to_use specifies which interface the watchdog timer should use.
|
||||
The default is -1, which means to pick the first one registered.
|
||||
|
@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
|
|||
occur (if pretimeout is zero, then pretimeout will not be enabled). Note
|
||||
that the pretimeout is the time before the final timeout. So if the
|
||||
timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
|
||||
will occur in 40 second (10 seconds before the timeout).
|
||||
will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
|
||||
is the value of timeout which is set on kernel panic, in order to let actions
|
||||
such as kdump to occur during panic.
|
||||
|
||||
The action may be "reset", "power_cycle", or "power_off", and
|
||||
specifies what to do when the timer times out, and defaults to
|
||||
|
@ -634,6 +636,7 @@ for configuring the watchdog:
|
|||
ipmi_watchdog.preop=<preop type>
|
||||
ipmi_watchdog.start_now=x
|
||||
ipmi_watchdog.nowayout=x
|
||||
ipmi_watchdog.panic_wdt_timeout=<t>
|
||||
|
||||
The options are the same as the module parameter options.
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ Supported adapters:
|
|||
* Intel Sunrise Point-LP (PCH)
|
||||
* Intel DNV (SOC)
|
||||
* Intel Broxton (SOC)
|
||||
* Intel Lewisburg (PCH)
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
||||
|
|
|
@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
hwp_only
|
||||
Only load intel_pstate on systems which support
|
||||
hardware P state control (HWP) if available.
|
||||
no_acpi
|
||||
Don't use ACPI processor performance control objects
|
||||
_PSS and _PPC specified limits.
|
||||
|
||||
intremap= [X86-64, Intel-IOMMU]
|
||||
on enable Interrupt Remapping (default)
|
||||
|
|
40
MAINTAINERS
40
MAINTAINERS
|
@ -2449,7 +2449,9 @@ F: drivers/firmware/broadcom/*
|
|||
|
||||
BROADCOM STB NAND FLASH DRIVER
|
||||
M: Brian Norris <computersforpeace@gmail.com>
|
||||
M: Kamal Dasu <kdasu.kdev@gmail.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
S: Maintained
|
||||
F: drivers/mtd/nand/brcmnand/
|
||||
|
||||
|
@ -2546,7 +2548,7 @@ F: arch/c6x/
|
|||
|
||||
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
|
||||
M: David Howells <dhowells@redhat.com>
|
||||
L: linux-cachefs@redhat.com
|
||||
L: linux-cachefs@redhat.com (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/filesystems/caching/cachefiles.txt
|
||||
F: fs/cachefiles/
|
||||
|
@ -2929,10 +2931,9 @@ S: Maintained
|
|||
F: drivers/platform/x86/compal-laptop.c
|
||||
|
||||
CONEXANT ACCESSRUNNER USB DRIVER
|
||||
M: Simon Arlott <cxacru@fire.lp0.eu>
|
||||
L: accessrunner-general@lists.sourceforge.net
|
||||
W: http://accessrunner.sourceforge.net/
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/usb/atm/cxacru.c
|
||||
|
||||
CONFIGFS
|
||||
|
@ -4409,6 +4410,7 @@ K: fmc_d.*register
|
|||
|
||||
FPGA MANAGER FRAMEWORK
|
||||
M: Alan Tull <atull@opensource.altera.com>
|
||||
R: Moritz Fischer <moritz.fischer@ettus.com>
|
||||
S: Maintained
|
||||
F: drivers/fpga/
|
||||
F: include/linux/fpga/fpga-mgr.h
|
||||
|
@ -4559,7 +4561,7 @@ F: include/linux/frontswap.h
|
|||
|
||||
FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
|
||||
M: David Howells <dhowells@redhat.com>
|
||||
L: linux-cachefs@redhat.com
|
||||
L: linux-cachefs@redhat.com (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/filesystems/caching/
|
||||
F: fs/fscache/
|
||||
|
@ -5711,13 +5713,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
|
|||
S: Maintained
|
||||
F: net/ipv4/netfilter/ipt_MASQUERADE.c
|
||||
|
||||
IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
|
||||
M: Francois Romieu <romieu@fr.zoreil.com>
|
||||
M: Sorbica Shieh <sorbica@icplus.com.tw>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/icplus/ipg.*
|
||||
|
||||
IPATH DRIVER
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
|
@ -6923,13 +6918,21 @@ F: drivers/scsi/megaraid.*
|
|||
F: drivers/scsi/megaraid/
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx4_en)
|
||||
M: Amir Vadai <amirv@mellanox.com>
|
||||
M: Eugenia Emantayev <eugenia@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx4/en_*
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
|
||||
|
||||
MELLANOX ETHERNET SWITCH DRIVERS
|
||||
M: Jiri Pirko <jiri@mellanox.com>
|
||||
M: Ido Schimmel <idosch@mellanox.com>
|
||||
|
@ -7901,6 +7904,18 @@ S: Maintained
|
|||
F: net/openvswitch/
|
||||
F: include/uapi/linux/openvswitch.h
|
||||
|
||||
OPERATING PERFORMANCE POINTS (OPP)
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Stephen Boyd <sboyd@codeaurora.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
|
||||
F: drivers/base/power/opp/
|
||||
F: include/linux/pm_opp.h
|
||||
F: Documentation/power/opp.txt
|
||||
F: Documentation/devicetree/bindings/opp/
|
||||
|
||||
OPL4 DRIVER
|
||||
M: Clemens Ladisch <clemens@ladisch.de>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
|
@ -9314,7 +9329,6 @@ F: drivers/i2c/busses/i2c-designware-*
|
|||
F: include/linux/platform_data/i2c-designware.h
|
||||
|
||||
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
|
||||
M: Seungwon Jeon <tgih.jun@samsung.com>
|
||||
M: Jaehoon Chung <jh80.chung@samsung.com>
|
||||
L: linux-mmc@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -486,7 +486,10 @@
|
|||
compatible = "fsl,imx27-usb";
|
||||
reg = <0x10024000 0x200>;
|
||||
interrupts = <56>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
|
||||
<&clks IMX27_CLK_USB_AHB_GATE>,
|
||||
<&clks IMX27_CLK_USB_DIV>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
fsl,usbmisc = <&usbmisc 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -495,7 +498,10 @@
|
|||
compatible = "fsl,imx27-usb";
|
||||
reg = <0x10024200 0x200>;
|
||||
interrupts = <54>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
|
||||
<&clks IMX27_CLK_USB_AHB_GATE>,
|
||||
<&clks IMX27_CLK_USB_DIV>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
fsl,usbmisc = <&usbmisc 1>;
|
||||
dr_mode = "host";
|
||||
status = "disabled";
|
||||
|
@ -505,7 +511,10 @@
|
|||
compatible = "fsl,imx27-usb";
|
||||
reg = <0x10024400 0x200>;
|
||||
interrupts = <55>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
|
||||
clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
|
||||
<&clks IMX27_CLK_USB_AHB_GATE>,
|
||||
<&clks IMX27_CLK_USB_DIV>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
fsl,usbmisc = <&usbmisc 2>;
|
||||
dr_mode = "host";
|
||||
status = "disabled";
|
||||
|
@ -515,7 +524,6 @@
|
|||
#index-cells = <1>;
|
||||
compatible = "fsl,imx27-usbmisc";
|
||||
reg = <0x10024600 0x200>;
|
||||
clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
|
||||
};
|
||||
|
||||
sahara2: sahara@10025000 {
|
||||
|
|
|
@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
|
|||
}
|
||||
build_epilogue(&ctx);
|
||||
|
||||
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
|
||||
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 7
|
||||
if (ctx.imm_count)
|
||||
|
|
|
@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
|
|||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-ce",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 250,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
|
|
|
@ -64,27 +64,31 @@ do { \
|
|||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1; \
|
||||
union { typeof(*p) __val; char __c[1]; } __u; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile ("ldarb %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u8 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile ("ldarh %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u16 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u32 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("ldar %0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
: "=r" (*(__u64 *)__u.__c) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
} \
|
||||
___p1; \
|
||||
__u.__val; \
|
||||
})
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ptrace.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#ifdef __AARCH64EB__
|
||||
|
@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
|
||||
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
|
||||
|
||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
||||
{
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
|
@ -26,22 +25,16 @@
|
|||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
extern struct dma_map_ops *dma_ops;
|
||||
extern struct dma_map_ops dummy_dma_ops;
|
||||
|
||||
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
{
|
||||
if (unlikely(!dev))
|
||||
return dma_ops;
|
||||
else if (dev->archdata.dma_ops)
|
||||
if (dev && dev->archdata.dma_ops)
|
||||
return dev->archdata.dma_ops;
|
||||
else if (acpi_disabled)
|
||||
return dma_ops;
|
||||
|
||||
/*
|
||||
* When ACPI is enabled, if arch_set_dma_ops is not called,
|
||||
* we will disable device DMA capability by setting it
|
||||
* to dummy_dma_ops.
|
||||
* We expect no ISA devices, and all other DMA masters are expected to
|
||||
* have someone call arch_setup_dma_ops at device creation time.
|
||||
*/
|
||||
return &dummy_dma_ops;
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
|
|||
#define destroy_context(mm) do { } while(0)
|
||||
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
||||
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
|
||||
|
||||
/*
|
||||
* This is called when "tsk" is about to enter lazy TLB mode.
|
||||
|
|
|
@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
/*
|
||||
* In case the boot CPU is hotpluggable, we record its initial state and
|
||||
|
@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
|
|||
*/
|
||||
seq_printf(m, "processor\t: %d\n", i);
|
||||
|
||||
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
||||
loops_per_jiffy / (500000UL/HZ),
|
||||
loops_per_jiffy / (5000UL/HZ) % 100);
|
||||
|
||||
/*
|
||||
* Dump out the common processor features in a single line.
|
||||
* Userspace should read the hwcaps with getauxval(AT_HWCAP)
|
||||
|
|
|
@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void)
|
|||
{
|
||||
efi_memory_desc_t *md;
|
||||
|
||||
init_new_context(NULL, &efi_mm);
|
||||
|
||||
for_each_efi_memory_desc(&memmap, md) {
|
||||
u64 paddr, npages, size;
|
||||
pgprot_t prot;
|
||||
|
@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void)
|
|||
else
|
||||
prot = PAGE_KERNEL;
|
||||
|
||||
create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
|
||||
create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size,
|
||||
__pgprot(pgprot_val(prot) | PTE_NG));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init);
|
|||
|
||||
static void efi_set_pgd(struct mm_struct *mm)
|
||||
{
|
||||
if (mm == &init_mm)
|
||||
cpu_set_reserved_ttbr0();
|
||||
else
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
|
||||
local_flush_tlb_all();
|
||||
if (icache_is_aivivt())
|
||||
__local_flush_icache_all();
|
||||
switch_mm(NULL, mm, NULL);
|
||||
}
|
||||
|
||||
void efi_virtmap_load(void)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <linux/ftrace.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
*/
|
||||
local_dbg_save(flags);
|
||||
|
||||
/*
|
||||
* Function graph tracer state gets incosistent when the kernel
|
||||
* calls functions that never return (aka suspend finishers) hence
|
||||
* disable graph tracing during their execution.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
/*
|
||||
* mm context saved on the stack, it will be restored when
|
||||
* the cpu comes out of reset through the identity mapped
|
||||
|
@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
hw_breakpoint_restore(NULL);
|
||||
}
|
||||
|
||||
unpause_graph_tracing();
|
||||
|
||||
/*
|
||||
* Restore pstate flags. OS lock and mdscr have been already
|
||||
* restored, so from this point onwards, debugging is fully
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
|
@ -28,9 +29,6 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
|
||||
bool coherent)
|
||||
{
|
||||
|
@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
|
|||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
|
||||
ret = atomic_pool_init();
|
||||
|
||||
return ret;
|
||||
return atomic_pool_init();
|
||||
}
|
||||
arch_initcall(arm64_dma_init);
|
||||
|
||||
|
@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
|
||||
size_t iosize = size;
|
||||
void *addr;
|
||||
|
||||
if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
|
||||
return NULL;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
/*
|
||||
* Some drivers rely on this, and we probably don't want the
|
||||
* possibility of stale kernel data being read by devices anyway.
|
||||
|
@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
struct page **pages;
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||
|
||||
pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
|
||||
flush_page);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
iommu_dma_free(dev, pages, size, handle);
|
||||
iommu_dma_free(dev, pages, iosize, handle);
|
||||
} else {
|
||||
struct page *page;
|
||||
/*
|
||||
|
@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (iommu_dma_mapping_error(dev, *handle)) {
|
||||
if (coherent)
|
||||
__free_pages(page, get_order(size));
|
||||
|
@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs)
|
||||
{
|
||||
size_t iosize = size;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
/*
|
||||
* @cpu_addr will be one of 3 things depending on how it was allocated:
|
||||
* - A remapped array of pages from iommu_dma_alloc(), for all
|
||||
|
@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||
* Hence how dodgy the below logic looks...
|
||||
*/
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
iommu_dma_unmap_page(dev, handle, size, 0, NULL);
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
|
||||
__free_from_pool(cpu_addr, size);
|
||||
} else if (is_vmalloc_addr(cpu_addr)){
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return;
|
||||
iommu_dma_free(dev, area->pages, size, &handle);
|
||||
iommu_dma_free(dev, area->pages, iosize, &handle);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else {
|
||||
iommu_dma_unmap_page(dev, handle, size, 0, NULL);
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
|
||||
__free_pages(virt_to_page(cpu_addr), get_order(size));
|
||||
}
|
||||
}
|
||||
|
@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
if (!acpi_disabled && !dev->archdata.dma_ops)
|
||||
dev->archdata.dma_ops = dma_ops;
|
||||
if (!dev->archdata.dma_ops)
|
||||
dev->archdata.dma_ops = &swiotlb_dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
|
|
|
@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
|
|||
* for now. This will get more fine grained later once all memory
|
||||
* is mapped
|
||||
*/
|
||||
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
|
||||
unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
|
||||
|
||||
if (end < kernel_x_start) {
|
||||
create_mapping(start, __phys_to_virt(start),
|
||||
|
@ -451,18 +451,18 @@ static void __init fixup_executable(void)
|
|||
{
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
/* now that we are actually fully mapped, make the start/end more fine grained */
|
||||
if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
|
||||
if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
|
||||
unsigned long aligned_start = round_down(__pa(_stext),
|
||||
SECTION_SIZE);
|
||||
SWAPPER_BLOCK_SIZE);
|
||||
|
||||
create_mapping(aligned_start, __phys_to_virt(aligned_start),
|
||||
__pa(_stext) - aligned_start,
|
||||
PAGE_KERNEL);
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
|
||||
if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
|
||||
unsigned long aligned_end = round_up(__pa(__init_end),
|
||||
SECTION_SIZE);
|
||||
SWAPPER_BLOCK_SIZE);
|
||||
create_mapping(__pa(__init_end), (unsigned long)__init_end,
|
||||
aligned_end - __pa(__init_end),
|
||||
PAGE_KERNEL);
|
||||
|
@ -475,7 +475,7 @@ void mark_rodata_ro(void)
|
|||
{
|
||||
create_mapping_late(__pa(_stext), (unsigned long)_stext,
|
||||
(unsigned long)_etext - (unsigned long)_stext,
|
||||
PAGE_KERNEL_EXEC | PTE_RDONLY);
|
||||
PAGE_KERNEL_ROX);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -50,7 +50,7 @@ static const int bpf2a64[] = {
|
|||
[BPF_REG_8] = A64_R(21),
|
||||
[BPF_REG_9] = A64_R(22),
|
||||
/* read-only frame pointer to access stack */
|
||||
[BPF_REG_FP] = A64_FP,
|
||||
[BPF_REG_FP] = A64_R(25),
|
||||
/* temporary register for internal BPF JIT */
|
||||
[TMP_REG_1] = A64_R(23),
|
||||
[TMP_REG_2] = A64_R(24),
|
||||
|
@ -155,18 +155,49 @@ static void build_prologue(struct jit_ctx *ctx)
|
|||
stack_size += 4; /* extra for skb_copy_bits buffer */
|
||||
stack_size = STACK_ALIGN(stack_size);
|
||||
|
||||
/*
|
||||
* BPF prog stack layout
|
||||
*
|
||||
* high
|
||||
* original A64_SP => 0:+-----+ BPF prologue
|
||||
* |FP/LR|
|
||||
* current A64_FP => -16:+-----+
|
||||
* | ... | callee saved registers
|
||||
* +-----+
|
||||
* | | x25/x26
|
||||
* BPF fp register => -80:+-----+
|
||||
* | |
|
||||
* | ... | BPF prog stack
|
||||
* | |
|
||||
* | |
|
||||
* current A64_SP => +-----+
|
||||
* | |
|
||||
* | ... | Function call stack
|
||||
* | |
|
||||
* +-----+
|
||||
* low
|
||||
*
|
||||
*/
|
||||
|
||||
/* Save FP and LR registers to stay align with ARM64 AAPCS */
|
||||
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
|
||||
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
|
||||
|
||||
/* Save callee-saved register */
|
||||
emit(A64_PUSH(r6, r7, A64_SP), ctx);
|
||||
emit(A64_PUSH(r8, r9, A64_SP), ctx);
|
||||
if (ctx->tmp_used)
|
||||
emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
|
||||
|
||||
/* Set up BPF stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
|
||||
/* Save fp (x25) and x26. SP requires 16 bytes alignment */
|
||||
emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
|
||||
|
||||
/* Set up frame pointer */
|
||||
/* Set up BPF prog stack base register (x25) */
|
||||
emit(A64_MOV(1, fp, A64_SP), ctx);
|
||||
|
||||
/* Set up function call stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
|
||||
|
||||
/* Clear registers A and X */
|
||||
emit_a64_mov_i64(ra, 0, ctx);
|
||||
emit_a64_mov_i64(rx, 0, ctx);
|
||||
|
@ -190,14 +221,17 @@ static void build_epilogue(struct jit_ctx *ctx)
|
|||
/* We're done with BPF stack */
|
||||
emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
|
||||
|
||||
/* Restore fs (x25) and x26 */
|
||||
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
|
||||
|
||||
/* Restore callee-saved register */
|
||||
if (ctx->tmp_used)
|
||||
emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
|
||||
emit(A64_POP(r8, r9, A64_SP), ctx);
|
||||
emit(A64_POP(r6, r7, A64_SP), ctx);
|
||||
|
||||
/* Restore frame pointer */
|
||||
emit(A64_MOV(1, fp, A64_SP), ctx);
|
||||
/* Restore FP/LR registers */
|
||||
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
|
||||
|
||||
/* Set return value */
|
||||
emit(A64_MOV(1, A64_R(0), r0), ctx);
|
||||
|
@ -758,7 +792,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
if (bpf_jit_enable > 1)
|
||||
bpf_jit_dump(prog->len, image_size, 2, ctx.image);
|
||||
|
||||
bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
|
||||
bpf_flush_icache(header, ctx.image + ctx.idx);
|
||||
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
prog->bpf_func = (void *)ctx.image;
|
||||
|
|
|
@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
|
|||
AR71XX_RESET_SIZE);
|
||||
ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
|
||||
AR71XX_PLL_SIZE);
|
||||
ath79_detect_sys_type();
|
||||
ath79_ddr_ctrl_init();
|
||||
|
||||
ath79_detect_sys_type();
|
||||
if (mips_machtype != ATH79_MACH_GENERIC_OF)
|
||||
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
|
||||
|
||||
|
@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
|
|||
"Generic",
|
||||
"Generic AR71XX/AR724X/AR913X based board",
|
||||
ath79_generic_init);
|
||||
|
||||
MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
|
||||
"DTB",
|
||||
"Generic AR71XX/AR724X/AR913X based board (DT)",
|
||||
NULL);
|
||||
|
|
|
@ -107,7 +107,7 @@
|
|||
miscintc: interrupt-controller@18060010 {
|
||||
compatible = "qca,ar9132-misc-intc",
|
||||
"qca,ar7100-misc-intc";
|
||||
reg = <0x18060010 0x4>;
|
||||
reg = <0x18060010 0x8>;
|
||||
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <6>;
|
||||
|
|
|
@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
|
|||
{
|
||||
/* avoid <linux/mm.h> include hell */
|
||||
extern unsigned long max_mapnr;
|
||||
unsigned long pfn_offset = ARCH_PFN_OFFSET;
|
||||
|
||||
return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
|
||||
return pfn >= pfn_offset && pfn < max_mapnr;
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_SPARSEMEM)
|
||||
|
|
|
@ -108,6 +108,9 @@ config PGTABLE_LEVELS
|
|||
default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
|
||||
default 2
|
||||
|
||||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y if PA20
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
source "kernel/Kconfig.freezer"
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
#ifndef _ASM_PARISC64_HUGETLB_H
|
||||
#define _ASM_PARISC64_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
unsigned long len) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
{
|
||||
free_pgd_range(tlb, addr, end, floor, ceiling);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
{
|
||||
return pte_none(pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return pte_wrprotect(pte);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t old_pte = *ptep;
|
||||
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
|
||||
}
|
||||
|
||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
int changed = !pte_same(*ptep, pte);
|
||||
if (changed) {
|
||||
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
flush_tlb_page(vma, addr);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_PARISC64_HUGETLB_H */
|
|
@ -145,11 +145,22 @@ extern int npmem_ranges;
|
|||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
|
||||
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
|
||||
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
|
||||
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
|
||||
#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
|
||||
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
|
||||
#else
|
||||
# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
|
||||
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
|
||||
#endif
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
PxD_FLAG_VALID |
|
||||
PxD_FLAG_ATTACHED)
|
||||
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
|
||||
/* The first pmd entry also is marked with _PAGE_GATEWAY as
|
||||
/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
|
||||
* a signal that this pmd may not be freed */
|
||||
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
|
||||
#endif
|
||||
|
|
|
@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
|
||||
|
||||
/* This is the size of the initially mapped kernel memory */
|
||||
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
|
||||
#else
|
||||
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
|
||||
#endif
|
||||
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
|
@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
|
||||
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
|
||||
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
|
||||
/* bit 21 was formerly the FLUSH bit but is now unused */
|
||||
#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
|
||||
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
|
||||
|
||||
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
|
||||
|
@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
|
||||
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
|
||||
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
|
||||
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
|
||||
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
|
||||
|
||||
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
|
||||
|
@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
|
||||
#define PxD_FLAG_MASK (0xf)
|
||||
#define PxD_FLAG_SHIFT (4)
|
||||
#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
|
||||
#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -362,6 +367,18 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; ret
|
|||
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
/*
|
||||
* Huge pte definitions.
|
||||
*/
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
|
||||
#else
|
||||
#define pte_huge(pte) (0)
|
||||
#define pte_mkhuge(pte) (pte)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
/* Find an entry in the second-level page table.. */
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
#define pmd_offset(dir,address) \
|
||||
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
|
||||
((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
|
||||
#else
|
||||
#define pmd_offset(dir,addr) ((pmd_t *) dir)
|
||||
#endif
|
||||
|
|
|
@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
|
|||
*/
|
||||
typedef unsigned int elf_caddr_t;
|
||||
|
||||
#define start_thread_som(regs, new_pc, new_sp) do { \
|
||||
unsigned long *sp = (unsigned long *)new_sp; \
|
||||
__u32 spaceid = (__u32)current->mm->context; \
|
||||
unsigned long pc = (unsigned long)new_pc; \
|
||||
/* offset pc for priv. level */ \
|
||||
pc |= 3; \
|
||||
\
|
||||
regs->iasq[0] = spaceid; \
|
||||
regs->iasq[1] = spaceid; \
|
||||
regs->iaoq[0] = pc; \
|
||||
regs->iaoq[1] = pc + 4; \
|
||||
regs->sr[2] = LINUX_GATEWAY_SPACE; \
|
||||
regs->sr[3] = 0xffff; \
|
||||
regs->sr[4] = spaceid; \
|
||||
regs->sr[5] = spaceid; \
|
||||
regs->sr[6] = spaceid; \
|
||||
regs->sr[7] = spaceid; \
|
||||
regs->gr[ 0] = USER_PSW; \
|
||||
regs->gr[30] = ((new_sp)+63)&~63; \
|
||||
regs->gr[31] = pc; \
|
||||
\
|
||||
get_user(regs->gr[26],&sp[0]); \
|
||||
get_user(regs->gr[25],&sp[-1]); \
|
||||
get_user(regs->gr[24],&sp[-2]); \
|
||||
get_user(regs->gr[23],&sp[-3]); \
|
||||
} while(0)
|
||||
|
||||
/* The ELF abi wants things done a "wee bit" differently than
|
||||
* som does. Supporting this behavior here avoids
|
||||
* having our own version of create_elf_tables.
|
||||
|
|
|
@ -49,16 +49,6 @@
|
|||
#define MADV_DONTFORK 10 /* don't inherit across fork */
|
||||
#define MADV_DOFORK 11 /* do inherit across fork */
|
||||
|
||||
/* The range 12-64 is reserved for page size specification. */
|
||||
#define MADV_4K_PAGES 12 /* Use 4K pages */
|
||||
#define MADV_16K_PAGES 14 /* Use 16K pages */
|
||||
#define MADV_64K_PAGES 16 /* Use 64K pages */
|
||||
#define MADV_256K_PAGES 18 /* Use 256K pages */
|
||||
#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
|
||||
#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
|
||||
#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
|
||||
#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
|
||||
|
||||
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
|
||||
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
|
||||
|
||||
|
|
|
@ -289,6 +289,14 @@ int main(void)
|
|||
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
|
||||
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
|
||||
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
|
||||
BLANK();
|
||||
/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
|
||||
* and kernel data on physical huge pages */
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
|
||||
#else
|
||||
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
|
||||
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
|
||||
|
|
|
@ -502,21 +502,38 @@
|
|||
STREG \pte,0(\ptp)
|
||||
.endm
|
||||
|
||||
/* We have (depending on the page size):
|
||||
* - 38 to 52-bit Physical Page Number
|
||||
* - 12 to 26-bit page offset
|
||||
*/
|
||||
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
|
||||
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
.macro convert_for_tlb_insert20 pte
|
||||
.macro convert_for_tlb_insert20 pte,tmp
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
copy \pte,\tmp
|
||||
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
|
||||
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
|
||||
#else /* Huge pages disabled */
|
||||
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Convert the pte and prot to tlb insertion values. How
|
||||
* this happens is quite subtle, read below */
|
||||
.macro make_insert_tlb spc,pte,prot
|
||||
.macro make_insert_tlb spc,pte,prot,tmp
|
||||
space_to_prot \spc \prot /* create prot id from space */
|
||||
/* The following is the real subtlety. This is depositing
|
||||
* T <-> _PAGE_REFTRAP
|
||||
|
@ -553,7 +570,7 @@
|
|||
depdi 1,12,1,\prot
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
convert_for_tlb_insert20 \pte
|
||||
convert_for_tlb_insert20 \pte \tmp
|
||||
.endm
|
||||
|
||||
/* Identical macro to make_insert_tlb above, except it
|
||||
|
@ -646,17 +663,12 @@
|
|||
|
||||
|
||||
/*
|
||||
* Align fault_vector_20 on 4K boundary so that both
|
||||
* fault_vector_11 and fault_vector_20 are on the
|
||||
* same page. This is only necessary as long as we
|
||||
* write protect the kernel text, which we may stop
|
||||
* doing once we use large page translations to cover
|
||||
* the static part of the kernel address space.
|
||||
* Fault_vectors are architecturally required to be aligned on a 2K
|
||||
* boundary
|
||||
*/
|
||||
|
||||
.text
|
||||
|
||||
.align 4096
|
||||
.align 2048
|
||||
|
||||
ENTRY(fault_vector_20)
|
||||
/* First vector is invalid (0) */
|
||||
|
@ -1147,7 +1159,7 @@ dtlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
|
@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
|
@ -1267,7 +1279,7 @@ dtlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1295,7 +1307,7 @@ nadtlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1404,7 +1416,7 @@ itlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
iitlbt pte,prot
|
||||
|
||||
|
@ -1428,7 +1440,7 @@ naitlb_miss_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
iitlbt pte,prot
|
||||
|
||||
|
@ -1514,7 +1526,7 @@ itlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1534,7 +1546,7 @@ naitlb_miss_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
|
||||
update_accessed ptp,pte,t0,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
@ -1566,7 +1578,7 @@ dbit_trap_20w:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
|
||||
update_dirty ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
idtlbt pte,prot
|
||||
|
||||
|
@ -1610,7 +1622,7 @@ dbit_trap_20:
|
|||
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
|
||||
update_dirty ptp,pte,t1
|
||||
|
||||
make_insert_tlb spc,pte,prot
|
||||
make_insert_tlb spc,pte,prot,t1
|
||||
|
||||
f_extend pte,t1
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ $bss_loop:
|
|||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
/* Initialize startup VM. Just map first 8/16 MB of memory */
|
||||
/* Initialize startup VM. Just map first 16/32 MB of memory */
|
||||
load32 PA(swapper_pg_dir),%r4
|
||||
mtctl %r4,%cr24 /* Initialize kernel root pointer */
|
||||
mtctl %r4,%cr25 /* Initialize user root pointer */
|
||||
|
@ -107,7 +107,7 @@ $bss_loop:
|
|||
/* Now initialize the PTEs themselves. We use RWX for
|
||||
* everything ... it will get remapped correctly later */
|
||||
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
|
||||
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
|
||||
load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
|
||||
load32 PA(pg0),%r1
|
||||
|
||||
$pgt_fill_loop:
|
||||
|
|
|
@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
|
|||
printk(KERN_INFO "The 32-bit Kernel has started...\n");
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
|
||||
printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
|
||||
(int)(PAGE_SIZE / 1024));
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
|
||||
1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
|
||||
#else
|
||||
printk(KERN_CONT "disabled");
|
||||
#endif
|
||||
printk(KERN_CONT ".\n");
|
||||
|
||||
|
||||
pdc_console_init();
|
||||
|
||||
|
@ -377,6 +386,7 @@ arch_initcall(parisc_init);
|
|||
void start_parisc(void)
|
||||
{
|
||||
extern void start_kernel(void);
|
||||
extern void early_trap_init(void);
|
||||
|
||||
int ret, cpunum;
|
||||
struct pdc_coproc_cfg coproc_cfg;
|
||||
|
@ -397,6 +407,8 @@ void start_parisc(void)
|
|||
panic("must have an fpu to boot linux");
|
||||
}
|
||||
|
||||
early_trap_init(); /* initialize checksum of fault_vector */
|
||||
|
||||
start_kernel();
|
||||
// not reached
|
||||
}
|
||||
|
|
|
@ -369,7 +369,7 @@ tracesys_exit:
|
|||
ldo -16(%r30),%r29 /* Reference param save area */
|
||||
#endif
|
||||
ldo TASK_REGS(%r1),%r26
|
||||
bl do_syscall_trace_exit,%r2
|
||||
BL do_syscall_trace_exit,%r2
|
||||
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
|
||||
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
|
||||
LDREG TI_TASK(%r1), %r1
|
||||
|
@ -390,7 +390,7 @@ tracesys_sigexit:
|
|||
#ifdef CONFIG_64BIT
|
||||
ldo -16(%r30),%r29 /* Reference param save area */
|
||||
#endif
|
||||
bl do_syscall_trace_exit,%r2
|
||||
BL do_syscall_trace_exit,%r2
|
||||
ldo TASK_REGS(%r1),%r26
|
||||
|
||||
ldil L%syscall_exit_rfi,%r1
|
||||
|
|
|
@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
|
||||
int __init check_ivt(void *iva)
|
||||
void __init initialize_ivt(const void *iva)
|
||||
{
|
||||
extern u32 os_hpmc_size;
|
||||
extern const u32 os_hpmc[];
|
||||
|
@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
|
|||
u32 *hpmcp;
|
||||
u32 length;
|
||||
|
||||
if (strcmp((char *)iva, "cows can fly"))
|
||||
return -1;
|
||||
if (strcmp((const char *)iva, "cows can fly"))
|
||||
panic("IVT invalid");
|
||||
|
||||
ivap = (u32 *)iva;
|
||||
|
||||
|
@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
|
|||
check += ivap[i];
|
||||
|
||||
ivap[5] = -check;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* early_trap_init() is called before we set up kernel mappings and
|
||||
* write-protect the kernel */
|
||||
void __init early_trap_init(void)
|
||||
{
|
||||
extern const void fault_vector_20;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
extern const void fault_vector_11;
|
||||
extern const void fault_vector_11;
|
||||
initialize_ivt(&fault_vector_11);
|
||||
#endif
|
||||
extern const void fault_vector_20;
|
||||
|
||||
initialize_ivt(&fault_vector_20);
|
||||
}
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
void *iva;
|
||||
|
||||
if (boot_cpu_data.cpu_type >= pcxu)
|
||||
iva = (void *) &fault_vector_20;
|
||||
else
|
||||
#ifdef CONFIG_64BIT
|
||||
panic("Can't boot 64-bit OS on PA1.1 processor!");
|
||||
#else
|
||||
iva = (void *) &fault_vector_11;
|
||||
#endif
|
||||
|
||||
if (check_ivt(iva))
|
||||
panic("IVT invalid");
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ SECTIONS
|
|||
EXIT_DATA
|
||||
}
|
||||
PERCPU_SECTION(8)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. = ALIGN(HUGEPAGE_SIZE);
|
||||
__init_end = .;
|
||||
/* freed after init ends here */
|
||||
|
||||
|
@ -116,7 +116,7 @@ SECTIONS
|
|||
* that we can properly leave these
|
||||
* as writable
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. = ALIGN(HUGEPAGE_SIZE);
|
||||
data_start = .;
|
||||
|
||||
EXCEPTION_TABLE(8)
|
||||
|
@ -135,8 +135,11 @@ SECTIONS
|
|||
_edata = .;
|
||||
|
||||
/* BSS */
|
||||
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
|
||||
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
|
||||
|
||||
/* bootmap is allocated in setup_bootmem() directly behind bss. */
|
||||
|
||||
. = ALIGN(HUGEPAGE_SIZE);
|
||||
_end = . ;
|
||||
|
||||
STABS_DEBUG
|
||||
|
|
|
@ -3,3 +3,4 @@
|
|||
#
|
||||
|
||||
obj-y := init.o fault.o ioremap.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* PARISC64 Huge TLB page support.
|
||||
*
|
||||
* This parisc implementation is heavily based on the SPARC and x86 code.
|
||||
*
|
||||
* Copyright (C) 2015 Helge Deller <deller@gmx.de>
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <asm/mman.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
|
||||
unsigned long
|
||||
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (len > TASK_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED)
|
||||
if (prepare_hugepage_range(file, addr, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (addr)
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
|
||||
/* we need to make sure the colouring is OK */
|
||||
return arch_get_unmapped_area(file, addr, len, pgoff, flags);
|
||||
}
|
||||
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
/* We must align the address, because our caller will run
|
||||
* set_huge_pte_at() on whatever we return, which writes out
|
||||
* all of the sub-ptes for the hugepage range. So we have
|
||||
* to give it the first such sub-pte.
|
||||
*/
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (pud) {
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (pmd)
|
||||
pte = pte_alloc_map(mm, NULL, pmd, addr);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = NULL;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_none(*pgd)) {
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
/* Purge data and instruction TLB entries. Must be called holding
|
||||
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
|
||||
* machines since the purge must be broadcast to all CPUs.
|
||||
*/
|
||||
static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
|
||||
* Linux standard huge pages (e.g. 2 MB) */
|
||||
BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
|
||||
|
||||
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
|
||||
mtsp(mm->context, 1);
|
||||
pdtlb(addr);
|
||||
if (unlikely(split_tlb))
|
||||
pitlb(addr);
|
||||
addr += (1UL << REAL_HPAGE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t entry)
|
||||
{
|
||||
unsigned long addr_start;
|
||||
int i;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
addr_start = addr;
|
||||
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
/* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
|
||||
* instead, but then we get double locking on pa_tlb_lock. */
|
||||
*ptep = entry;
|
||||
ptep++;
|
||||
|
||||
/* Drop the PAGE_SIZE/non-huge tlb entry */
|
||||
purge_tlb_entries(mm, addr);
|
||||
|
||||
addr += PAGE_SIZE;
|
||||
pte_val(entry) += PAGE_SIZE;
|
||||
}
|
||||
|
||||
purge_tlb_entries_huge(mm, addr_start);
|
||||
}
|
||||
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
entry = *ptep;
|
||||
set_huge_pte_at(mm, addr, ptep, __pte(0));
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
|
@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
|
|||
unsigned long vaddr;
|
||||
unsigned long ro_start;
|
||||
unsigned long ro_end;
|
||||
unsigned long fv_addr;
|
||||
unsigned long gw_addr;
|
||||
extern const unsigned long fault_vector_20;
|
||||
extern void * const linux_gateway_page;
|
||||
unsigned long kernel_end;
|
||||
|
||||
ro_start = __pa((unsigned long)_text);
|
||||
ro_end = __pa((unsigned long)&data_start);
|
||||
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
|
||||
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
|
||||
kernel_end = __pa((unsigned long)&_end);
|
||||
|
||||
end_paddr = start_paddr + size;
|
||||
|
||||
|
@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
|
|||
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
|
||||
pte_t pte;
|
||||
|
||||
/*
|
||||
* Map the fault vector writable so we can
|
||||
* write the HPMC checksum.
|
||||
*/
|
||||
if (force)
|
||||
pte = __mk_pte(address, pgprot);
|
||||
else if (parisc_text_address(vaddr) &&
|
||||
address != fv_addr)
|
||||
else if (parisc_text_address(vaddr)) {
|
||||
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
|
||||
if (address >= ro_start && address < kernel_end)
|
||||
pte = pte_mkhuge(pte);
|
||||
}
|
||||
else
|
||||
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
if (address >= ro_start && address < ro_end
|
||||
&& address != fv_addr
|
||||
&& address != gw_addr)
|
||||
pte = __mk_pte(address, PAGE_KERNEL_RO);
|
||||
else
|
||||
if (address >= ro_start && address < ro_end) {
|
||||
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
|
||||
pte = pte_mkhuge(pte);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
pte = __mk_pte(address, pgprot);
|
||||
if (address >= ro_start && address < kernel_end)
|
||||
pte = pte_mkhuge(pte);
|
||||
}
|
||||
|
||||
if (address >= end_paddr) {
|
||||
if (force)
|
||||
|
@ -536,15 +533,12 @@ void free_initmem(void)
|
|||
|
||||
/* force the kernel to see the new TLB entries */
|
||||
__flush_tlb_range(0, init_begin, init_end);
|
||||
/* Attempt to catch anyone trying to execute code here
|
||||
* by filling the page with BRK insns.
|
||||
*/
|
||||
memset((void *)init_begin, 0x00, init_end - init_begin);
|
||||
|
||||
/* finally dump all the instructions which were cached, since the
|
||||
* pages are no-longer executable */
|
||||
flush_icache_range(init_begin, init_end);
|
||||
|
||||
free_initmem_default(-1);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
|
||||
/* set up a new led state on systems shipped LED State panel */
|
||||
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
|
||||
|
@ -728,8 +722,8 @@ static void __init pagetable_init(void)
|
|||
unsigned long size;
|
||||
|
||||
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
|
||||
end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
|
||||
size = pmem_ranges[range].pages << PAGE_SHIFT;
|
||||
end_paddr = start_paddr + size;
|
||||
|
||||
map_pages((unsigned long)__va(start_paddr), start_paddr,
|
||||
size, PAGE_KERNEL, 0);
|
||||
|
|
|
@ -382,3 +382,4 @@ COMPAT_SYS(shmat)
|
|||
SYSCALL(shmdt)
|
||||
SYSCALL(shmget)
|
||||
COMPAT_SYS(shmctl)
|
||||
SYSCALL(mlock2)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 378
|
||||
#define __NR_syscalls 379
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
|
|
@ -400,5 +400,6 @@
|
|||
#define __NR_shmdt 375
|
||||
#define __NR_shmget 376
|
||||
#define __NR_shmctl 377
|
||||
#define __NR_mlock2 378
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -312,6 +312,7 @@ extern void css_schedule_reprobe(void);
|
|||
extern void reipl_ccw_dev(struct ccw_dev_id *id);
|
||||
|
||||
struct cio_iplinfo {
|
||||
u8 ssid;
|
||||
u16 devno;
|
||||
int is_qdio;
|
||||
};
|
||||
|
|
|
@ -206,9 +206,16 @@ do { \
|
|||
} while (0)
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
extern unsigned long mmap_rnd_mask;
|
||||
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
|
||||
/*
|
||||
* Cache aliasing on the latest machines calls for a mapping granularity
|
||||
* of 512KB. For 64-bit processes use a 512KB alignment and a randomization
|
||||
* of up to 1GB. For 31-bit processes the virtual address space is limited,
|
||||
* use no alignment and limit the randomization to 8MB.
|
||||
*/
|
||||
#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
|
||||
#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
|
||||
#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
|
||||
#define STACK_RND_MASK MMAP_RND_MASK
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
|
|
|
@ -64,7 +64,8 @@ struct ipl_block_fcp {
|
|||
|
||||
struct ipl_block_ccw {
|
||||
u8 reserved1[84];
|
||||
u8 reserved2[2];
|
||||
u16 reserved2 : 13;
|
||||
u8 ssid : 3;
|
||||
u16 devno;
|
||||
u8 vm_flags;
|
||||
u8 reserved3[3];
|
||||
|
|
|
@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
|
|||
void dma_free_seg_table(unsigned long);
|
||||
unsigned long *dma_alloc_cpu_table(void);
|
||||
void dma_cleanup_tables(unsigned long *);
|
||||
void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int);
|
||||
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
|
||||
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#define TRACE_INCLUDE_PATH asm/trace
|
||||
#define TRACE_INCLUDE_FILE diag
|
||||
|
||||
TRACE_EVENT(diagnose,
|
||||
TRACE_EVENT(s390_diagnose,
|
||||
TP_PROTO(unsigned short nr),
|
||||
TP_ARGS(nr),
|
||||
TP_STRUCT__entry(
|
||||
|
@ -32,9 +32,9 @@ TRACE_EVENT(diagnose,
|
|||
);
|
||||
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
void trace_diagnose_norecursion(int diag_nr);
|
||||
void trace_s390_diagnose_norecursion(int diag_nr);
|
||||
#else
|
||||
static inline void trace_diagnose_norecursion(int diag_nr) { }
|
||||
static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
|
||||
#endif
|
||||
|
||||
#endif /* _TRACE_S390_DIAG_H */
|
||||
|
|
|
@ -192,14 +192,14 @@
|
|||
#define __NR_set_tid_address 252
|
||||
#define __NR_fadvise64 253
|
||||
#define __NR_timer_create 254
|
||||
#define __NR_timer_settime (__NR_timer_create+1)
|
||||
#define __NR_timer_gettime (__NR_timer_create+2)
|
||||
#define __NR_timer_getoverrun (__NR_timer_create+3)
|
||||
#define __NR_timer_delete (__NR_timer_create+4)
|
||||
#define __NR_clock_settime (__NR_timer_create+5)
|
||||
#define __NR_clock_gettime (__NR_timer_create+6)
|
||||
#define __NR_clock_getres (__NR_timer_create+7)
|
||||
#define __NR_clock_nanosleep (__NR_timer_create+8)
|
||||
#define __NR_timer_settime 255
|
||||
#define __NR_timer_gettime 256
|
||||
#define __NR_timer_getoverrun 257
|
||||
#define __NR_timer_delete 258
|
||||
#define __NR_clock_settime 259
|
||||
#define __NR_clock_gettime 260
|
||||
#define __NR_clock_getres 261
|
||||
#define __NR_clock_nanosleep 262
|
||||
/* Number 263 is reserved for vserver */
|
||||
#define __NR_statfs64 265
|
||||
#define __NR_fstatfs64 266
|
||||
|
@ -309,7 +309,8 @@
|
|||
#define __NR_recvfrom 371
|
||||
#define __NR_recvmsg 372
|
||||
#define __NR_shutdown 373
|
||||
#define NR_syscalls 374
|
||||
#define __NR_mlock2 374
|
||||
#define NR_syscalls 375
|
||||
|
||||
/*
|
||||
* There are some system calls that are not present on 64 bit, some
|
||||
|
|
|
@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
|
|||
COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
|
||||
COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
|
||||
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
|
||||
COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
|
||||
|
|
|
@ -121,14 +121,14 @@ device_initcall(show_diag_stat_init);
|
|||
void diag_stat_inc(enum diag_stat_enum nr)
|
||||
{
|
||||
this_cpu_inc(diag_stat.counter[nr]);
|
||||
trace_diagnose(diag_map[nr].code);
|
||||
trace_s390_diagnose(diag_map[nr].code);
|
||||
}
|
||||
EXPORT_SYMBOL(diag_stat_inc);
|
||||
|
||||
void diag_stat_inc_norecursion(enum diag_stat_enum nr)
|
||||
{
|
||||
this_cpu_inc(diag_stat.counter[nr]);
|
||||
trace_diagnose_norecursion(diag_map[nr].code);
|
||||
trace_s390_diagnose_norecursion(diag_map[nr].code);
|
||||
}
|
||||
EXPORT_SYMBOL(diag_stat_inc_norecursion);
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define ARCH_OFFSET 4
|
||||
|
||||
|
@ -59,19 +60,6 @@ __HEAD
|
|||
.long 0x020006e0,0x20000050
|
||||
|
||||
.org 0x200
|
||||
#
|
||||
# subroutine to set architecture mode
|
||||
#
|
||||
.Lsetmode:
|
||||
mvi __LC_AR_MODE_ID,1 # set esame flag
|
||||
slr %r0,%r0 # set cpuid to zero
|
||||
lhi %r1,2 # mode 2 = esame (dump)
|
||||
sigp %r1,%r0,0x12 # switch to esame mode
|
||||
bras %r13,0f
|
||||
.fill 16,4,0x0
|
||||
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
|
||||
sam31 # switch to 31 bit addressing mode
|
||||
br %r14
|
||||
|
||||
#
|
||||
# subroutine to wait for end I/O
|
||||
|
@ -159,7 +147,14 @@ __HEAD
|
|||
.long 0x02200050,0x00000000
|
||||
|
||||
iplstart:
|
||||
bas %r14,.Lsetmode # Immediately switch to 64 bit mode
|
||||
mvi __LC_AR_MODE_ID,1 # set esame flag
|
||||
slr %r0,%r0 # set cpuid to zero
|
||||
lhi %r1,2 # mode 2 = esame (dump)
|
||||
sigp %r1,%r0,0x12 # switch to esame mode
|
||||
bras %r13,0f
|
||||
.fill 16,4,0x0
|
||||
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
|
||||
sam31 # switch to 31 bit addressing mode
|
||||
lh %r1,0xb8 # test if subchannel number
|
||||
bct %r1,.Lnoload # is valid
|
||||
l %r1,0xb8 # load ipl subchannel number
|
||||
|
@ -268,71 +263,6 @@ iplstart:
|
|||
.align 8
|
||||
.Lcpuid:.fill 8,1,0
|
||||
|
||||
#
|
||||
# SALIPL loader support. Based on a patch by Rob van der Heij.
|
||||
# This entry point is called directly from the SALIPL loader and
|
||||
# doesn't need a builtin ipl record.
|
||||
#
|
||||
.org 0x800
|
||||
ENTRY(start)
|
||||
stm %r0,%r15,0x07b0 # store registers
|
||||
bas %r14,.Lsetmode # Immediately switch to 64 bit mode
|
||||
basr %r12,%r0
|
||||
.base:
|
||||
l %r11,.parm
|
||||
l %r8,.cmd # pointer to command buffer
|
||||
|
||||
ltr %r9,%r9 # do we have SALIPL parameters?
|
||||
bp .sk8x8
|
||||
|
||||
mvc 0(64,%r8),0x00b0 # copy saved registers
|
||||
xc 64(240-64,%r8),0(%r8) # remainder of buffer
|
||||
tr 0(64,%r8),.lowcase
|
||||
b .gotr
|
||||
.sk8x8:
|
||||
mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
|
||||
.gotr:
|
||||
slr %r0,%r0
|
||||
st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
|
||||
st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
|
||||
j startup # continue with startup
|
||||
.cmd: .long COMMAND_LINE # address of command line buffer
|
||||
.parm: .long PARMAREA
|
||||
.lowcase:
|
||||
.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
|
||||
.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
|
||||
.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
|
||||
.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
|
||||
.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
|
||||
.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
|
||||
.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
|
||||
.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
|
||||
.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
|
||||
.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
|
||||
.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
|
||||
.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
|
||||
.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
|
||||
.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
|
||||
.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
|
||||
.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
|
||||
|
||||
.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
|
||||
.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
|
||||
.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
|
||||
.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
|
||||
.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
|
||||
.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
|
||||
.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
|
||||
.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
|
||||
.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
|
||||
.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
|
||||
.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
|
||||
.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
|
||||
.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
|
||||
.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
|
||||
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
|
||||
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
|
||||
|
||||
#
|
||||
# startup-code at 0x10000, running in absolute addressing mode
|
||||
# this is called either by the ipl loader or directly by PSW restart
|
||||
|
@ -364,7 +294,7 @@ ENTRY(startup_kdump)
|
|||
bras %r13,0f
|
||||
.fill 16,4,0x0
|
||||
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
|
||||
sam31 # switch to 31 bit addressing mode
|
||||
sam64 # switch to 64 bit addressing mode
|
||||
basr %r13,0 # get base
|
||||
.LPG0:
|
||||
xc 0x200(256),0x200 # partially clear lowcore
|
||||
|
@ -395,7 +325,7 @@ ENTRY(startup_kdump)
|
|||
jnz 1b
|
||||
j 4f
|
||||
2: l %r15,.Lstack-.LPG0(%r13)
|
||||
ahi %r15,-96
|
||||
ahi %r15,-STACK_FRAME_OVERHEAD
|
||||
la %r2,.Lals_string-.LPG0(%r13)
|
||||
l %r3,.Lsclp_print-.LPG0(%r13)
|
||||
basr %r14,%r3
|
||||
|
@ -429,8 +359,7 @@ ENTRY(startup_kdump)
|
|||
.long 1, 0xc0000000
|
||||
#endif
|
||||
4:
|
||||
/* Continue with 64bit startup code in head64.S */
|
||||
sam64 # switch to 64 bit mode
|
||||
/* Continue with startup code in head64.S */
|
||||
jg startup_continue
|
||||
|
||||
.align 8
|
||||
|
|
|
@ -121,6 +121,7 @@ static char *dump_type_str(enum dump_type type)
|
|||
* Must be in data section since the bss section
|
||||
* is not cleared when these are accessed.
|
||||
*/
|
||||
static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
|
||||
static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
|
||||
u32 ipl_flags __attribute__((__section__(".data"))) = 0;
|
||||
|
||||
|
@ -197,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
|
|||
return snprintf(page, PAGE_SIZE, _format, ##args); \
|
||||
}
|
||||
|
||||
#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
|
||||
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, \
|
||||
const char *buf, size_t len) \
|
||||
{ \
|
||||
unsigned long long ssid, devno; \
|
||||
\
|
||||
if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
_ipl_blk.ssid = ssid; \
|
||||
_ipl_blk.devno = devno; \
|
||||
return len; \
|
||||
}
|
||||
|
||||
#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
|
||||
IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
|
||||
_ipl_blk.ssid, _ipl_blk.devno); \
|
||||
IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
|
||||
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
|
||||
__ATTR(_name, (S_IRUGO | S_IWUSR), \
|
||||
sys_##_prefix##_##_name##_show, \
|
||||
sys_##_prefix##_##_name##_store) \
|
||||
|
||||
#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
|
||||
IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
|
||||
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
|
||||
|
@ -395,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
|
|||
|
||||
switch (ipl_info.type) {
|
||||
case IPL_TYPE_CCW:
|
||||
return sprintf(page, "0.0.%04x\n", ipl_devno);
|
||||
return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
|
||||
case IPL_TYPE_FCP:
|
||||
case IPL_TYPE_FCP_DUMP:
|
||||
return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
|
||||
|
@ -687,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
|
|||
struct bin_attribute *attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
size_t scpdata_len = count;
|
||||
size_t padding;
|
||||
size_t scpdata_len;
|
||||
|
||||
if (off < 0)
|
||||
|
||||
if (off)
|
||||
return -EINVAL;
|
||||
|
||||
if (off >= DIAG308_SCPDATA_SIZE)
|
||||
return -ENOSPC;
|
||||
|
||||
if (count > DIAG308_SCPDATA_SIZE - off)
|
||||
count = DIAG308_SCPDATA_SIZE - off;
|
||||
|
||||
memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
|
||||
scpdata_len = off + count;
|
||||
|
||||
memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
|
||||
if (scpdata_len % 8) {
|
||||
padding = 8 - (scpdata_len % 8);
|
||||
memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
|
||||
|
@ -717,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
|
|||
}
|
||||
static struct bin_attribute sys_reipl_fcp_scp_data_attr =
|
||||
__BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
|
||||
reipl_fcp_scpdata_write, PAGE_SIZE);
|
||||
reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
|
||||
|
||||
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
|
||||
&sys_reipl_fcp_scp_data_attr,
|
||||
|
@ -814,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = {
|
|||
};
|
||||
|
||||
/* CCW reipl device attributes */
|
||||
|
||||
DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
|
||||
reipl_block_ccw->ipl_info.ccw.devno);
|
||||
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
|
||||
|
||||
/* NSS wrapper */
|
||||
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
|
||||
|
@ -1056,8 +1075,8 @@ static void __reipl_run(void *unused)
|
|||
|
||||
switch (reipl_method) {
|
||||
case REIPL_METHOD_CCW_CIO:
|
||||
devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
|
||||
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
|
||||
devid.ssid = 0;
|
||||
reipl_ccw_dev(&devid);
|
||||
break;
|
||||
case REIPL_METHOD_CCW_VM:
|
||||
|
@ -1192,6 +1211,7 @@ static int __init reipl_ccw_init(void)
|
|||
|
||||
reipl_block_ccw_init(reipl_block_ccw);
|
||||
if (ipl_info.type == IPL_TYPE_CCW) {
|
||||
reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
|
||||
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
|
||||
reipl_block_ccw_fill_parms(reipl_block_ccw);
|
||||
}
|
||||
|
@ -1336,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = {
|
|||
};
|
||||
|
||||
/* CCW dump device attributes */
|
||||
|
||||
DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
|
||||
dump_block_ccw->ipl_info.ccw.devno);
|
||||
DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
|
||||
|
||||
static struct attribute *dump_ccw_attrs[] = {
|
||||
&sys_dump_ccw_device_attr.attr,
|
||||
|
@ -1418,8 +1436,8 @@ static void __dump_run(void *unused)
|
|||
|
||||
switch (dump_method) {
|
||||
case DUMP_METHOD_CCW_CIO:
|
||||
devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
|
||||
devid.devno = dump_block_ccw->ipl_info.ccw.devno;
|
||||
devid.ssid = 0;
|
||||
reipl_ccw_dev(&devid);
|
||||
break;
|
||||
case DUMP_METHOD_CCW_VM:
|
||||
|
@ -1939,14 +1957,14 @@ void __init setup_ipl(void)
|
|||
ipl_info.type = get_ipl_type();
|
||||
switch (ipl_info.type) {
|
||||
case IPL_TYPE_CCW:
|
||||
ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
|
||||
ipl_info.data.ccw.dev_id.devno = ipl_devno;
|
||||
ipl_info.data.ccw.dev_id.ssid = 0;
|
||||
break;
|
||||
case IPL_TYPE_FCP:
|
||||
case IPL_TYPE_FCP_DUMP:
|
||||
ipl_info.data.fcp.dev_id.ssid = 0;
|
||||
ipl_info.data.fcp.dev_id.devno =
|
||||
IPL_PARMBLOCK_START->ipl_info.fcp.devno;
|
||||
ipl_info.data.fcp.dev_id.ssid = 0;
|
||||
ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
|
||||
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
|
||||
break;
|
||||
|
@ -1978,6 +1996,7 @@ void __init ipl_save_parameters(void)
|
|||
if (cio_get_iplinfo(&iplinfo))
|
||||
return;
|
||||
|
||||
ipl_ssid = iplinfo.ssid;
|
||||
ipl_devno = iplinfo.devno;
|
||||
ipl_flags |= IPL_DEVNO_VALID;
|
||||
if (!iplinfo.is_qdio)
|
||||
|
|
|
@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
|
|||
|
||||
static inline unsigned long brk_rnd(void)
|
||||
{
|
||||
/* 8MB for 32bit, 1GB for 64bit */
|
||||
if (is_32bit_task())
|
||||
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
|
||||
else
|
||||
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
|
||||
return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
|
|
|
@ -21,7 +21,7 @@ static void _sclp_wait_int(void)
|
|||
__ctl_load(cr0_new, 0, 0);
|
||||
|
||||
psw_ext_save = S390_lowcore.external_new_psw;
|
||||
psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA);
|
||||
psw_mask = __extract_psw();
|
||||
S390_lowcore.external_new_psw.mask = psw_mask;
|
||||
psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
|
||||
S390_lowcore.ext_int_code = 0;
|
||||
|
|
|
@ -764,9 +764,6 @@ static int __init setup_hwcaps(void)
|
|||
get_cpu_id(&cpu_id);
|
||||
add_device_randomness(&cpu_id, sizeof(cpu_id));
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672:
|
||||
strcpy(elf_platform, "g5");
|
||||
break;
|
||||
case 0x2064:
|
||||
case 0x2066:
|
||||
default: /* Use "z900" as default for 64 bit kernels. */
|
||||
|
|
|
@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
|
|||
SYSCALL(sys_recvfrom,compat_sys_recvfrom)
|
||||
SYSCALL(sys_recvmsg,compat_sys_recvmsg)
|
||||
SYSCALL(sys_shutdown,sys_shutdown)
|
||||
SYSCALL(sys_mlock2,compat_sys_mlock2)
|
||||
|
|
|
@ -9,11 +9,11 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/trace/diag.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(diagnose);
|
||||
EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
|
||||
|
||||
void trace_diagnose_norecursion(int diag_nr)
|
||||
void trace_s390_diagnose_norecursion(int diag_nr)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int *depth;
|
||||
|
@ -22,7 +22,7 @@ void trace_diagnose_norecursion(int diag_nr)
|
|||
depth = this_cpu_ptr(&diagnose_trace_depth);
|
||||
if (*depth == 0) {
|
||||
(*depth)++;
|
||||
trace_diagnose(diag_nr);
|
||||
trace_s390_diagnose(diag_nr);
|
||||
(*depth)--;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
|
|||
|
||||
static void __init setup_zero_pages(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
unsigned int order;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672: /* g5 */
|
||||
case 0x2064: /* z900 */
|
||||
case 0x2066: /* z900 */
|
||||
case 0x2084: /* z990 */
|
||||
case 0x2086: /* z990 */
|
||||
case 0x2094: /* z9-109 */
|
||||
case 0x2096: /* z9-109 */
|
||||
order = 0;
|
||||
break;
|
||||
case 0x2097: /* z10 */
|
||||
case 0x2098: /* z10 */
|
||||
case 0x2817: /* z196 */
|
||||
case 0x2818: /* z196 */
|
||||
order = 2;
|
||||
break;
|
||||
case 0x2827: /* zEC12 */
|
||||
case 0x2828: /* zEC12 */
|
||||
order = 5;
|
||||
break;
|
||||
case 0x2964: /* z13 */
|
||||
default:
|
||||
order = 7;
|
||||
break;
|
||||
}
|
||||
/* Latest machines require a mapping granularity of 512KB */
|
||||
order = 7;
|
||||
|
||||
/* Limit number of empty zero pages for small memory sizes */
|
||||
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
|
||||
order--;
|
||||
|
|
|
@ -31,9 +31,6 @@
|
|||
#include <linux/security.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
unsigned long mmap_rnd_mask;
|
||||
static unsigned long mmap_align_mask;
|
||||
|
||||
static unsigned long stack_maxrandom_size(void)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
|
@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
|
|||
|
||||
unsigned long arch_mmap_rnd(void)
|
||||
{
|
||||
if (is_32bit_task())
|
||||
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
|
||||
else
|
||||
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
|
||||
return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base_legacy(unsigned long rnd)
|
||||
|
@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_unmapped_area_info info;
|
||||
int do_color_align;
|
||||
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
return -ENOMEM;
|
||||
|
@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = !is_32bit_task();
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
else
|
||||
info.align_mask = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr = addr0;
|
||||
struct vm_unmapped_area_info info;
|
||||
int do_color_align;
|
||||
|
||||
/* requested length too big for entire address space */
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
|
@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
return addr;
|
||||
}
|
||||
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = !is_32bit_task();
|
||||
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
||||
info.high_limit = mm->mmap_base;
|
||||
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
else
|
||||
info.align_mask = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
|
@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|||
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init setup_mmap_rnd(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x9672:
|
||||
case 0x2064:
|
||||
case 0x2066:
|
||||
case 0x2084:
|
||||
case 0x2086:
|
||||
case 0x2094:
|
||||
case 0x2096:
|
||||
case 0x2097:
|
||||
case 0x2098:
|
||||
case 0x2817:
|
||||
case 0x2818:
|
||||
case 0x2827:
|
||||
case 0x2828:
|
||||
mmap_rnd_mask = 0x7ffUL;
|
||||
mmap_align_mask = 0UL;
|
||||
break;
|
||||
case 0x2964: /* z13 */
|
||||
default:
|
||||
mmap_rnd_mask = 0x3ff80UL;
|
||||
mmap_align_mask = 0x7fUL;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
early_initcall(setup_mmap_rnd);
|
||||
|
|
|
@ -33,7 +33,7 @@ unsigned long *dma_alloc_cpu_table(void)
|
|||
return NULL;
|
||||
|
||||
for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
|
||||
*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
|
||||
*entry = ZPCI_TABLE_INVALID;
|
||||
return table;
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void)
|
|||
return NULL;
|
||||
|
||||
for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
|
||||
*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
|
||||
*entry = ZPCI_PTE_INVALID;
|
||||
return table;
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
|
|||
return pto;
|
||||
}
|
||||
|
||||
static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
|
||||
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
|
||||
{
|
||||
unsigned long *sto, *pto;
|
||||
unsigned int rtx, sx, px;
|
||||
|
@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
|
|||
return &pto[px];
|
||||
}
|
||||
|
||||
void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr,
|
||||
dma_addr_t dma_addr, int flags)
|
||||
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
|
||||
{
|
||||
unsigned long *entry;
|
||||
|
||||
entry = dma_walk_cpu_trans(dma_table, dma_addr);
|
||||
if (!entry) {
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (flags & ZPCI_PTE_INVALID) {
|
||||
invalidate_pt_entry(entry);
|
||||
return;
|
||||
} else {
|
||||
set_pt_pfaa(entry, page_addr);
|
||||
validate_pt_entry(entry);
|
||||
|
@ -146,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
|
|||
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
|
||||
dma_addr_t start_dma_addr = dma_addr;
|
||||
unsigned long irq_flags;
|
||||
unsigned long *entry;
|
||||
int i, rc = 0;
|
||||
|
||||
if (!nr_pages)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
|
||||
if (!zdev->dma_table)
|
||||
if (!zdev->dma_table) {
|
||||
rc = -EINVAL;
|
||||
goto no_refresh;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr,
|
||||
flags);
|
||||
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
|
||||
if (!entry) {
|
||||
rc = -ENOMEM;
|
||||
goto undo_cpu_trans;
|
||||
}
|
||||
dma_update_cpu_trans(entry, page_addr, flags);
|
||||
page_addr += PAGE_SIZE;
|
||||
dma_addr += PAGE_SIZE;
|
||||
}
|
||||
|
@ -176,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
|
|||
|
||||
rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
|
||||
nr_pages * PAGE_SIZE);
|
||||
undo_cpu_trans:
|
||||
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
|
||||
flags = ZPCI_PTE_INVALID;
|
||||
while (i-- > 0) {
|
||||
page_addr -= PAGE_SIZE;
|
||||
dma_addr -= PAGE_SIZE;
|
||||
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
|
||||
if (!entry)
|
||||
break;
|
||||
dma_update_cpu_trans(entry, page_addr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
no_refresh:
|
||||
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
|
||||
|
@ -260,6 +269,16 @@ out:
|
|||
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
|
||||
}
|
||||
|
||||
static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
|
||||
{
|
||||
struct {
|
||||
unsigned long rc;
|
||||
unsigned long addr;
|
||||
} __packed data = {rc, addr};
|
||||
|
||||
zpci_err_hex(&data, sizeof(data));
|
||||
}
|
||||
|
||||
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
|
@ -270,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
|||
unsigned long pa = page_to_phys(page) + offset;
|
||||
int flags = ZPCI_PTE_VALID;
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
|
||||
/* This rounds up number of pages based on size and offset */
|
||||
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
|
||||
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
|
||||
if (iommu_page_index == -1)
|
||||
if (iommu_page_index == -1) {
|
||||
ret = -ENOSPC;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Use rounded up size */
|
||||
size = nr_pages * PAGE_SIZE;
|
||||
|
||||
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
|
||||
if (dma_addr + size > zdev->end_dma)
|
||||
if (dma_addr + size > zdev->end_dma) {
|
||||
ret = -ERANGE;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
|
||||
flags |= ZPCI_TABLE_PROTECTED;
|
||||
|
||||
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
|
||||
atomic64_add(nr_pages, &zdev->mapped_pages);
|
||||
return dma_addr + (offset & ~PAGE_MASK);
|
||||
}
|
||||
ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
atomic64_add(nr_pages, &zdev->mapped_pages);
|
||||
return dma_addr + (offset & ~PAGE_MASK);
|
||||
|
||||
out_free:
|
||||
dma_free_iommu(zdev, iommu_page_index, nr_pages);
|
||||
out_err:
|
||||
zpci_err("map error:\n");
|
||||
zpci_err_hex(&pa, sizeof(pa));
|
||||
zpci_err_dma(ret, pa);
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
|
@ -306,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
|
|||
{
|
||||
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
|
||||
unsigned long iommu_page_index;
|
||||
int npages;
|
||||
int npages, ret;
|
||||
|
||||
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
|
||||
dma_addr = dma_addr & PAGE_MASK;
|
||||
if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
|
||||
ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
|
||||
ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
|
||||
ZPCI_PTE_INVALID);
|
||||
if (ret) {
|
||||
zpci_err("unmap error:\n");
|
||||
zpci_err_hex(&dma_addr, sizeof(dma_addr));
|
||||
zpci_err_dma(ret, dma_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic64_add(npages, &zdev->unmapped_pages);
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#define MSR_IA32_PERFCTR0 0x000000c1
|
||||
#define MSR_IA32_PERFCTR1 0x000000c2
|
||||
#define MSR_FSB_FREQ 0x000000cd
|
||||
#define MSR_NHM_PLATFORM_INFO 0x000000ce
|
||||
#define MSR_PLATFORM_INFO 0x000000ce
|
||||
|
||||
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
|
||||
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
|
||||
|
@ -44,7 +44,6 @@
|
|||
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
|
||||
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
|
||||
|
||||
#define MSR_PLATFORM_INFO 0x000000ce
|
||||
#define MSR_MTRRcap 0x000000fe
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
|
|
@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
|
|||
|
||||
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned long eflags;
|
||||
unsigned long eflags = native_save_fl();
|
||||
|
||||
/* This should have been cleared long ago */
|
||||
raw_local_save_flags(eflags);
|
||||
BUG_ON(eflags & X86_EFLAGS_AC);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_SMAP)) {
|
||||
|
|
|
@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
|
|||
*/
|
||||
void fpu__init_prepare_fx_sw_frame(void)
|
||||
{
|
||||
int fsave_header_size = sizeof(struct fregs_state);
|
||||
int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
|
||||
|
||||
if (config_enabled(CONFIG_X86_32))
|
||||
size += fsave_header_size;
|
||||
|
||||
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
|
||||
fx_sw_reserved.extended_size = size;
|
||||
fx_sw_reserved.xfeatures = xfeatures_mask;
|
||||
fx_sw_reserved.xstate_size = xstate_size;
|
||||
|
||||
if (config_enabled(CONFIG_IA32_EMULATION)) {
|
||||
if (config_enabled(CONFIG_IA32_EMULATION) ||
|
||||
config_enabled(CONFIG_X86_32)) {
|
||||
int fsave_header_size = sizeof(struct fregs_state);
|
||||
|
||||
fx_sw_reserved_ia32 = fx_sw_reserved;
|
||||
fx_sw_reserved_ia32.extended_size += fsave_header_size;
|
||||
fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
|
|||
if (!boot_cpu_has(X86_FEATURE_XSAVE))
|
||||
return NULL;
|
||||
|
||||
xsave = ¤t->thread.fpu.state.xsave;
|
||||
/*
|
||||
* We should not ever be requesting features that we
|
||||
* have not enabled. Remember that pcntxt_mask is
|
||||
|
|
|
@ -278,6 +278,12 @@ trace:
|
|||
/* save_mcount_regs fills in first two parameters */
|
||||
save_mcount_regs
|
||||
|
||||
/*
|
||||
* When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
|
||||
* set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
|
||||
* ip and parent ip are used and the list function is called when
|
||||
* function tracing is enabled.
|
||||
*/
|
||||
call *ftrace_trace_function
|
||||
|
||||
restore_mcount_regs
|
||||
|
|
|
@ -585,6 +585,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
|
|||
return bt_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only want to do a 4-byte get_user() on 32-bit. Otherwise,
|
||||
* we might run off the end of the bounds table if we are on
|
||||
* a 64-bit kernel and try to get 8 bytes.
|
||||
*/
|
||||
int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
long __user *bd_entry_ptr)
|
||||
{
|
||||
u32 bd_entry_32;
|
||||
int ret;
|
||||
|
||||
if (is_64bit_mm(mm))
|
||||
return get_user(*bd_entry_ret, bd_entry_ptr);
|
||||
|
||||
/*
|
||||
* Note that get_user() uses the type of the *pointer* to
|
||||
* establish the size of the get, not the destination.
|
||||
*/
|
||||
ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
|
||||
*bd_entry_ret = bd_entry_32;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the base of bounds tables pointed by specific bounds
|
||||
* directory entry.
|
||||
|
@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
|
|||
int need_write = 0;
|
||||
|
||||
pagefault_disable();
|
||||
ret = get_user(bd_entry, bd_entry_ptr);
|
||||
ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
|
||||
pagefault_enable();
|
||||
if (!ret)
|
||||
break;
|
||||
|
@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
|
|||
*/
|
||||
static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
|
||||
if (is_64bit_mm(mm))
|
||||
return virt_space / MPX_BD_NR_ENTRIES_64;
|
||||
else
|
||||
return virt_space / MPX_BD_NR_ENTRIES_32;
|
||||
unsigned long long virt_space;
|
||||
unsigned long long GB = (1ULL << 30);
|
||||
|
||||
/*
|
||||
* This covers 32-bit emulation as well as 32-bit kernels
|
||||
* running on 64-bit harware.
|
||||
*/
|
||||
if (!is_64bit_mm(mm))
|
||||
return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
|
||||
|
||||
/*
|
||||
* 'x86_virt_bits' returns what the hardware is capable
|
||||
* of, and returns the full >32-bit adddress space when
|
||||
* running 32-bit kernels on 64-bit hardware.
|
||||
*/
|
||||
virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
|
||||
return virt_space / MPX_BD_NR_ENTRIES_64;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
|
|||
void __blk_queue_free_tags(struct request_queue *q);
|
||||
bool __blk_end_bidi_request(struct request *rq, int error,
|
||||
unsigned int nr_bytes, unsigned int bidi_bytes);
|
||||
int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
void blk_queue_exit(struct request_queue *q);
|
||||
void blk_freeze_queue(struct request_queue *q);
|
||||
|
||||
static inline void blk_queue_enter_live(struct request_queue *q)
|
||||
|
|
|
@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
|
|||
|
||||
static int register_pcc_channel(int pcc_subspace_idx)
|
||||
{
|
||||
struct acpi_pcct_subspace *cppc_ss;
|
||||
struct acpi_pcct_hw_reduced *cppc_ss;
|
||||
unsigned int len;
|
||||
|
||||
if (pcc_subspace_idx >= 0) {
|
||||
|
|
|
@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
|||
}
|
||||
|
||||
err_exit:
|
||||
if (result && q)
|
||||
if (result)
|
||||
acpi_ec_delete_query(q);
|
||||
if (data)
|
||||
*data = value;
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dmi.h>
|
||||
#include "sbshc.h"
|
||||
|
||||
#define PREFIX "ACPI: "
|
||||
|
@ -30,6 +29,7 @@ struct acpi_smb_hc {
|
|||
u8 query_bit;
|
||||
smbus_alarm_callback callback;
|
||||
void *context;
|
||||
bool done;
|
||||
};
|
||||
|
||||
static int acpi_smbus_hc_add(struct acpi_device *device);
|
||||
|
@ -88,8 +88,6 @@ enum acpi_smb_offset {
|
|||
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
|
||||
};
|
||||
|
||||
static bool macbook;
|
||||
|
||||
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
|
||||
{
|
||||
return ec_read(hc->offset + address, data);
|
||||
|
@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
|
|||
return ec_write(hc->offset + address, data);
|
||||
}
|
||||
|
||||
static inline int smb_check_done(struct acpi_smb_hc *hc)
|
||||
{
|
||||
union acpi_smb_status status = {.raw = 0};
|
||||
smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
|
||||
return status.fields.done && (status.fields.status == SMBUS_OK);
|
||||
}
|
||||
|
||||
static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
|
||||
{
|
||||
if (wait_event_timeout(hc->wait, smb_check_done(hc),
|
||||
msecs_to_jiffies(timeout)))
|
||||
if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
|
||||
return 0;
|
||||
/*
|
||||
* After the timeout happens, OS will try to check the status of SMbus.
|
||||
* If the status is what OS expected, it will be regarded as the bogus
|
||||
* timeout.
|
||||
*/
|
||||
if (smb_check_done(hc))
|
||||
return 0;
|
||||
else
|
||||
return -ETIME;
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
|
||||
|
@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
|
|||
}
|
||||
|
||||
mutex_lock(&hc->lock);
|
||||
if (macbook)
|
||||
udelay(5);
|
||||
hc->done = false;
|
||||
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
|
||||
goto end;
|
||||
if (temp) {
|
||||
|
@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
|
|||
if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
|
||||
return 0;
|
||||
/* Check if it is only a completion notify */
|
||||
if (status.fields.done)
|
||||
if (status.fields.done && status.fields.status == SMBUS_OK) {
|
||||
hc->done = true;
|
||||
wake_up(&hc->wait);
|
||||
}
|
||||
if (!status.fields.alarm)
|
||||
return 0;
|
||||
mutex_lock(&hc->lock);
|
||||
|
@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
|||
acpi_handle handle, acpi_ec_query_func func,
|
||||
void *data);
|
||||
|
||||
static int macbook_dmi_match(const struct dmi_system_id *d)
|
||||
{
|
||||
pr_debug("Detected MacBook, enabling workaround\n");
|
||||
macbook = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dmi_system_id acpi_smbus_dmi_table[] = {
|
||||
{ macbook_dmi_match, "Apple MacBook", {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static int acpi_smbus_hc_add(struct acpi_device *device)
|
||||
{
|
||||
int status;
|
||||
unsigned long long val;
|
||||
struct acpi_smb_hc *hc;
|
||||
|
||||
dmi_check_system(acpi_smbus_dmi_table);
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
|
|||
struct wake_irq *wirq;
|
||||
int err;
|
||||
|
||||
if (irq < 0)
|
||||
return -EINVAL;
|
||||
|
||||
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
|
||||
if (!wirq)
|
||||
return -ENOMEM;
|
||||
|
@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
|||
struct wake_irq *wirq;
|
||||
int err;
|
||||
|
||||
if (irq < 0)
|
||||
return -EINVAL;
|
||||
|
||||
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
|
||||
if (!wirq)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
|
|||
return rv;
|
||||
}
|
||||
|
||||
static void start_check_enables(struct smi_info *smi_info)
|
||||
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
||||
{
|
||||
smi_info->last_timeout_jiffies = jiffies;
|
||||
mod_timer(&smi_info->si_timer, new_val);
|
||||
smi_info->timer_running = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start a new message and (re)start the timer and thread.
|
||||
*/
|
||||
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
|
||||
unsigned int size)
|
||||
{
|
||||
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
|
||||
|
||||
if (smi_info->thread)
|
||||
wake_up_process(smi_info->thread);
|
||||
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
|
||||
}
|
||||
|
||||
static void start_check_enables(struct smi_info *smi_info, bool start_timer)
|
||||
{
|
||||
unsigned char msg[2];
|
||||
|
||||
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
|
||||
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
|
||||
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
|
||||
if (start_timer)
|
||||
start_new_msg(smi_info, msg, 2);
|
||||
else
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
|
||||
smi_info->si_state = SI_CHECKING_ENABLES;
|
||||
}
|
||||
|
||||
static void start_clear_flags(struct smi_info *smi_info)
|
||||
static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
|
||||
{
|
||||
unsigned char msg[3];
|
||||
|
||||
|
@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
|
|||
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
|
||||
msg[2] = WDT_PRE_TIMEOUT_INT;
|
||||
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
|
||||
if (start_timer)
|
||||
start_new_msg(smi_info, msg, 3);
|
||||
else
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
|
||||
smi_info->si_state = SI_CLEARING_FLAGS;
|
||||
}
|
||||
|
||||
|
@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
|
|||
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
|
||||
smi_info->curr_msg->data_size = 2;
|
||||
|
||||
smi_info->handlers->start_transaction(
|
||||
smi_info->si_sm,
|
||||
smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
start_new_msg(smi_info, smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
smi_info->si_state = SI_GETTING_MESSAGES;
|
||||
}
|
||||
|
||||
|
@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
|
|||
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
|
||||
smi_info->curr_msg->data_size = 2;
|
||||
|
||||
smi_info->handlers->start_transaction(
|
||||
smi_info->si_sm,
|
||||
smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
start_new_msg(smi_info, smi_info->curr_msg->data,
|
||||
smi_info->curr_msg->data_size);
|
||||
smi_info->si_state = SI_GETTING_EVENTS;
|
||||
}
|
||||
|
||||
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
||||
{
|
||||
smi_info->last_timeout_jiffies = jiffies;
|
||||
mod_timer(&smi_info->si_timer, new_val);
|
||||
smi_info->timer_running = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* When we have a situtaion where we run out of memory and cannot
|
||||
* allocate messages, we just leave them in the BMC and run the system
|
||||
|
@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
|||
* Note that we cannot just use disable_irq(), since the interrupt may
|
||||
* be shared.
|
||||
*/
|
||||
static inline bool disable_si_irq(struct smi_info *smi_info)
|
||||
static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
|
||||
{
|
||||
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
|
||||
smi_info->interrupt_disabled = true;
|
||||
start_check_enables(smi_info);
|
||||
start_check_enables(smi_info, start_timer);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
|
|||
{
|
||||
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
|
||||
smi_info->interrupt_disabled = false;
|
||||
start_check_enables(smi_info);
|
||||
start_check_enables(smi_info, true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
|
|||
|
||||
msg = ipmi_alloc_smi_msg();
|
||||
if (!msg) {
|
||||
if (!disable_si_irq(smi_info))
|
||||
if (!disable_si_irq(smi_info, true))
|
||||
smi_info->si_state = SI_NORMAL;
|
||||
} else if (enable_si_irq(smi_info)) {
|
||||
ipmi_free_smi_msg(msg);
|
||||
|
@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
|
|||
/* Watchdog pre-timeout */
|
||||
smi_inc_stat(smi_info, watchdog_pretimeouts);
|
||||
|
||||
start_clear_flags(smi_info);
|
||||
start_clear_flags(smi_info, true);
|
||||
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
|
||||
if (smi_info->intf)
|
||||
ipmi_smi_watchdog_pretimeout(smi_info->intf);
|
||||
|
@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
|
|||
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
|
||||
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
|
||||
|
||||
smi_info->handlers->start_transaction(
|
||||
smi_info->si_sm, msg, 2);
|
||||
start_new_msg(smi_info, msg, 2);
|
||||
smi_info->si_state = SI_GETTING_FLAGS;
|
||||
goto restart;
|
||||
}
|
||||
|
@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
|
|||
* disable and messages disabled.
|
||||
*/
|
||||
if (smi_info->supports_event_msg_buff || smi_info->irq) {
|
||||
start_check_enables(smi_info);
|
||||
start_check_enables(smi_info, true);
|
||||
} else {
|
||||
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
|
||||
if (!smi_info->curr_msg)
|
||||
|
@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
|
|||
}
|
||||
goto restart;
|
||||
}
|
||||
|
||||
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
|
||||
/* Ok it if fails, the timer will just go off. */
|
||||
if (del_timer(&smi_info->si_timer))
|
||||
smi_info->timer_running = false;
|
||||
}
|
||||
|
||||
out:
|
||||
return si_sm_result;
|
||||
}
|
||||
|
@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
|
|||
.data = (void *)(unsigned long) SI_BT },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, of_ipmi_match);
|
||||
|
||||
static int of_ipmi_probe(struct platform_device *dev)
|
||||
{
|
||||
|
@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
MODULE_DEVICE_TABLE(of, of_ipmi_match);
|
||||
#else
|
||||
#define of_ipmi_match NULL
|
||||
static int of_ipmi_probe(struct platform_device *dev)
|
||||
|
@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
|||
* Start clearing the flags before we enable interrupts or the
|
||||
* timer to avoid racing with the timer.
|
||||
*/
|
||||
start_clear_flags(new_smi);
|
||||
start_clear_flags(new_smi, false);
|
||||
|
||||
/*
|
||||
* IRQ is defined to be set when non-zero. req_events will
|
||||
|
@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
|
|||
poll(to_clean);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
disable_si_irq(to_clean);
|
||||
disable_si_irq(to_clean, false);
|
||||
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
|
||||
poll(to_clean);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
|
|
|
@ -153,6 +153,9 @@ static int timeout = 10;
|
|||
/* The pre-timeout is disabled by default. */
|
||||
static int pretimeout;
|
||||
|
||||
/* Default timeout to set on panic */
|
||||
static int panic_wdt_timeout = 255;
|
||||
|
||||
/* Default action is to reset the board on a timeout. */
|
||||
static unsigned char action_val = WDOG_TIMEOUT_RESET;
|
||||
|
||||
|
@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
|
|||
module_param(pretimeout, timeout, 0644);
|
||||
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
|
||||
|
||||
module_param(panic_wdt_timeout, timeout, 0644);
|
||||
MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
|
||||
|
||||
module_param_cb(action, ¶m_ops_str, action_op, 0644);
|
||||
MODULE_PARM_DESC(action, "Timeout action. One of: "
|
||||
"reset, none, power_cycle, power_off.");
|
||||
|
@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
|
|||
/* Make sure we do this only once. */
|
||||
panic_event_handled = 1;
|
||||
|
||||
timeout = 255;
|
||||
timeout = panic_wdt_timeout;
|
||||
pretimeout = 0;
|
||||
panic_halt_ipmi_set_timeout();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
menu "Clock Source drivers"
|
||||
depends on !ARCH_USES_GETTIMEOFFSET
|
||||
|
||||
config CLKSRC_OF
|
||||
bool
|
||||
|
|
|
@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
|
|||
int err;
|
||||
|
||||
ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
|
||||
ftm_writel(~0UL, priv->clkevt_base + FTM_MOD);
|
||||
ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
|
||||
|
||||
ftm_reset_counter(priv->clkevt_base);
|
||||
|
||||
|
@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
|
|||
int err;
|
||||
|
||||
ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
|
||||
ftm_writel(~0UL, priv->clksrc_base + FTM_MOD);
|
||||
ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
|
||||
|
||||
ftm_reset_counter(priv->clksrc_base);
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
|
|||
config ARM_MT8173_CPUFREQ
|
||||
bool "Mediatek MT8173 CPUFreq support"
|
||||
depends on ARCH_MEDIATEK && REGULATOR
|
||||
depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
|
||||
depends on !CPU_THERMAL || THERMAL=y
|
||||
select PM_OPP
|
||||
help
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
config X86_INTEL_PSTATE
|
||||
bool "Intel P state control"
|
||||
depends on X86
|
||||
select ACPI_PROCESSOR if ACPI
|
||||
help
|
||||
This driver provides a P state for Intel core processors.
|
||||
The driver implements an internal governor and will become
|
||||
|
|
|
@ -34,14 +34,10 @@
|
|||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
#include <acpi/processor.h>
|
||||
#endif
|
||||
|
||||
#define BYT_RATIOS 0x66a
|
||||
#define BYT_VIDS 0x66b
|
||||
#define BYT_TURBO_RATIOS 0x66c
|
||||
#define BYT_TURBO_VIDS 0x66d
|
||||
#define ATOM_RATIOS 0x66a
|
||||
#define ATOM_VIDS 0x66b
|
||||
#define ATOM_TURBO_RATIOS 0x66c
|
||||
#define ATOM_TURBO_VIDS 0x66d
|
||||
|
||||
#define FRAC_BITS 8
|
||||
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
|
||||
|
@ -117,9 +113,6 @@ struct cpudata {
|
|||
u64 prev_mperf;
|
||||
u64 prev_tsc;
|
||||
struct sample sample;
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
struct acpi_processor_performance acpi_perf_data;
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct cpudata **all_cpu_data;
|
||||
|
@ -150,7 +143,6 @@ struct cpu_defaults {
|
|||
static struct pstate_adjust_policy pid_params;
|
||||
static struct pstate_funcs pstate_funcs;
|
||||
static int hwp_active;
|
||||
static int no_acpi_perf;
|
||||
|
||||
struct perf_limits {
|
||||
int no_turbo;
|
||||
|
@ -163,8 +155,6 @@ struct perf_limits {
|
|||
int max_sysfs_pct;
|
||||
int min_policy_pct;
|
||||
int min_sysfs_pct;
|
||||
int max_perf_ctl;
|
||||
int min_perf_ctl;
|
||||
};
|
||||
|
||||
static struct perf_limits performance_limits = {
|
||||
|
@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
|
|||
.max_sysfs_pct = 100,
|
||||
.min_policy_pct = 0,
|
||||
.min_sysfs_pct = 0,
|
||||
.max_perf_ctl = 0,
|
||||
.min_perf_ctl = 0,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
||||
|
@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
|
|||
static struct perf_limits *limits = &powersave_limits;
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
/*
|
||||
* The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
|
||||
* in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
|
||||
* max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
|
||||
* ratio, out of it only high 8 bits are used. For example 0x1700 is setting
|
||||
* target ratio 0x17. The _PSS control value stores in a format which can be
|
||||
* directly written to PERF_CTL MSR. But in intel_pstate driver this shift
|
||||
* occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
|
||||
* This function converts the _PSS control value to intel pstate driver format
|
||||
* for comparison and assignment.
|
||||
*/
|
||||
static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
|
||||
{
|
||||
return cpu->acpi_perf_data.states[index].control >> 8;
|
||||
}
|
||||
|
||||
static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
int ret;
|
||||
bool turbo_absent = false;
|
||||
int max_pstate_index;
|
||||
int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
|
||||
int i;
|
||||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
|
||||
cpu->pstate.min_pstate, cpu->pstate.max_pstate,
|
||||
cpu->pstate.turbo_pstate);
|
||||
|
||||
if (!cpu->acpi_perf_data.shared_cpu_map &&
|
||||
zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
|
||||
GFP_KERNEL, cpu_to_node(policy->cpu))) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
|
||||
policy->cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Check if the control value in _PSS is for PERF_CTL MSR, which should
|
||||
* guarantee that the states returned by it map to the states in our
|
||||
* list directly.
|
||||
*/
|
||||
if (cpu->acpi_perf_data.control_register.space_id !=
|
||||
ACPI_ADR_SPACE_FIXED_HARDWARE)
|
||||
return -EIO;
|
||||
|
||||
pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
|
||||
for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
|
||||
pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
|
||||
(i == cpu->acpi_perf_data.state ? '*' : ' '), i,
|
||||
(u32) cpu->acpi_perf_data.states[i].core_frequency,
|
||||
(u32) cpu->acpi_perf_data.states[i].power,
|
||||
(u32) cpu->acpi_perf_data.states[i].control);
|
||||
|
||||
/*
|
||||
* If there is only one entry _PSS, simply ignore _PSS and continue as
|
||||
* usual without taking _PSS into account
|
||||
*/
|
||||
if (cpu->acpi_perf_data.state_count < 2)
|
||||
return 0;
|
||||
|
||||
turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
|
||||
min_pss_ctl = convert_to_native_pstate_format(cpu,
|
||||
cpu->acpi_perf_data.state_count - 1);
|
||||
/* Check if there is a turbo freq in _PSS */
|
||||
if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
|
||||
turbo_pss_ctl > cpu->pstate.min_pstate) {
|
||||
pr_debug("intel_pstate: no turbo range exists in _PSS\n");
|
||||
limits->no_turbo = limits->turbo_disabled = 1;
|
||||
cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
|
||||
turbo_absent = true;
|
||||
}
|
||||
|
||||
/* Check if the max non turbo p state < Intel P state max */
|
||||
max_pstate_index = turbo_absent ? 0 : 1;
|
||||
max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
|
||||
if (max_pss_ctl < cpu->pstate.max_pstate &&
|
||||
max_pss_ctl > cpu->pstate.min_pstate)
|
||||
cpu->pstate.max_pstate = max_pss_ctl;
|
||||
|
||||
/* check If min perf > Intel P State min */
|
||||
if (min_pss_ctl > cpu->pstate.min_pstate &&
|
||||
min_pss_ctl < cpu->pstate.max_pstate) {
|
||||
cpu->pstate.min_pstate = min_pss_ctl;
|
||||
policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
|
||||
}
|
||||
|
||||
if (turbo_absent)
|
||||
policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
|
||||
cpu->pstate.scaling;
|
||||
else {
|
||||
policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
|
||||
cpu->pstate.scaling;
|
||||
/*
|
||||
* The _PSS table doesn't contain whole turbo frequency range.
|
||||
* This just contains +1 MHZ above the max non turbo frequency,
|
||||
* with control value corresponding to max turbo ratio. But
|
||||
* when cpufreq set policy is called, it will call with this
|
||||
* max frequency, which will cause a reduced performance as
|
||||
* this driver uses real max turbo frequency as the max
|
||||
* frequeny. So correct this frequency in _PSS table to
|
||||
* correct max turbo frequency based on the turbo ratio.
|
||||
* Also need to convert to MHz as _PSS freq is in MHz.
|
||||
*/
|
||||
cpu->acpi_perf_data.states[0].core_frequency =
|
||||
turbo_pss_ctl * 100;
|
||||
}
|
||||
|
||||
pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
|
||||
cpu->pstate.min_pstate, cpu->pstate.max_pstate,
|
||||
cpu->pstate.turbo_pstate);
|
||||
pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
|
||||
policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
|
||||
if (!no_acpi_perf)
|
||||
return 0;
|
||||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
acpi_processor_unregister_performance(policy->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
|
||||
int deadband, int integral) {
|
||||
pid->setpoint = setpoint;
|
||||
|
@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
|
|||
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
|
||||
}
|
||||
|
||||
static int byt_get_min_pstate(void)
|
||||
static int atom_get_min_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_RATIOS, value);
|
||||
rdmsrl(ATOM_RATIOS, value);
|
||||
return (value >> 8) & 0x7F;
|
||||
}
|
||||
|
||||
static int byt_get_max_pstate(void)
|
||||
static int atom_get_max_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_RATIOS, value);
|
||||
rdmsrl(ATOM_RATIOS, value);
|
||||
return (value >> 16) & 0x7F;
|
||||
}
|
||||
|
||||
static int byt_get_turbo_pstate(void)
|
||||
static int atom_get_turbo_pstate(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_TURBO_RATIOS, value);
|
||||
rdmsrl(ATOM_TURBO_RATIOS, value);
|
||||
return value & 0x7F;
|
||||
}
|
||||
|
||||
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
||||
static void atom_set_pstate(struct cpudata *cpudata, int pstate)
|
||||
{
|
||||
u64 val;
|
||||
int32_t vid_fp;
|
||||
|
@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
|||
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
|
||||
}
|
||||
|
||||
#define BYT_BCLK_FREQS 5
|
||||
static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
|
||||
|
||||
static int byt_get_scaling(void)
|
||||
static int silvermont_get_scaling(void)
|
||||
{
|
||||
u64 value;
|
||||
int i;
|
||||
/* Defined in Table 35-6 from SDM (Sept 2015) */
|
||||
static int silvermont_freq_table[] = {
|
||||
83300, 100000, 133300, 116700, 80000};
|
||||
|
||||
rdmsrl(MSR_FSB_FREQ, value);
|
||||
i = value & 0x3;
|
||||
i = value & 0x7;
|
||||
WARN_ON(i > 4);
|
||||
|
||||
BUG_ON(i > BYT_BCLK_FREQS);
|
||||
|
||||
return byt_freq_table[i] * 100;
|
||||
return silvermont_freq_table[i];
|
||||
}
|
||||
|
||||
static void byt_get_vid(struct cpudata *cpudata)
|
||||
static int airmont_get_scaling(void)
|
||||
{
|
||||
u64 value;
|
||||
int i;
|
||||
/* Defined in Table 35-10 from SDM (Sept 2015) */
|
||||
static int airmont_freq_table[] = {
|
||||
83300, 100000, 133300, 116700, 80000,
|
||||
93300, 90000, 88900, 87500};
|
||||
|
||||
rdmsrl(MSR_FSB_FREQ, value);
|
||||
i = value & 0xF;
|
||||
WARN_ON(i > 8);
|
||||
|
||||
return airmont_freq_table[i];
|
||||
}
|
||||
|
||||
static void atom_get_vid(struct cpudata *cpudata)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(BYT_VIDS, value);
|
||||
rdmsrl(ATOM_VIDS, value);
|
||||
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
|
||||
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
|
||||
cpudata->vid.ratio = div_fp(
|
||||
|
@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
|
|||
int_tofp(cpudata->pstate.max_pstate -
|
||||
cpudata->pstate.min_pstate));
|
||||
|
||||
rdmsrl(BYT_TURBO_VIDS, value);
|
||||
rdmsrl(ATOM_TURBO_VIDS, value);
|
||||
cpudata->vid.turbo = value & 0x7f;
|
||||
}
|
||||
|
||||
|
@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct cpu_defaults byt_params = {
|
||||
static struct cpu_defaults silvermont_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
|
@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
|
|||
.i_gain_pct = 4,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = byt_get_max_pstate,
|
||||
.get_max_physical = byt_get_max_pstate,
|
||||
.get_min = byt_get_min_pstate,
|
||||
.get_turbo = byt_get_turbo_pstate,
|
||||
.set = byt_set_pstate,
|
||||
.get_scaling = byt_get_scaling,
|
||||
.get_vid = byt_get_vid,
|
||||
.get_max = atom_get_max_pstate,
|
||||
.get_max_physical = atom_get_max_pstate,
|
||||
.get_min = atom_get_min_pstate,
|
||||
.get_turbo = atom_get_turbo_pstate,
|
||||
.set = atom_set_pstate,
|
||||
.get_scaling = silvermont_get_scaling,
|
||||
.get_vid = atom_get_vid,
|
||||
},
|
||||
};
|
||||
|
||||
static struct cpu_defaults airmont_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
.setpoint = 60,
|
||||
.p_gain_pct = 14,
|
||||
.d_gain_pct = 0,
|
||||
.i_gain_pct = 4,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = atom_get_max_pstate,
|
||||
.get_max_physical = atom_get_max_pstate,
|
||||
.get_min = atom_get_min_pstate,
|
||||
.get_turbo = atom_get_turbo_pstate,
|
||||
.set = atom_set_pstate,
|
||||
.get_scaling = airmont_get_scaling,
|
||||
.get_vid = atom_get_vid,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
|||
* policy, or by cpu specific default values determined through
|
||||
* experimentation.
|
||||
*/
|
||||
if (limits->max_perf_ctl && limits->max_sysfs_pct >=
|
||||
limits->max_policy_pct) {
|
||||
*max = limits->max_perf_ctl;
|
||||
} else {
|
||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
|
||||
limits->max_perf));
|
||||
*max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
|
||||
cpu->pstate.turbo_pstate);
|
||||
}
|
||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
|
||||
*max = clamp_t(int, max_perf_adj,
|
||||
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
|
||||
|
||||
if (limits->min_perf_ctl) {
|
||||
*min = limits->min_perf_ctl;
|
||||
} else {
|
||||
min_perf = fp_toint(mul_fp(int_tofp(max_perf),
|
||||
limits->min_perf));
|
||||
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
|
||||
}
|
||||
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
|
||||
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
|
||||
}
|
||||
|
||||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
|
||||
|
@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
|
|||
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
||||
ICPU(0x2a, core_params),
|
||||
ICPU(0x2d, core_params),
|
||||
ICPU(0x37, byt_params),
|
||||
ICPU(0x37, silvermont_params),
|
||||
ICPU(0x3a, core_params),
|
||||
ICPU(0x3c, core_params),
|
||||
ICPU(0x3d, core_params),
|
||||
|
@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
ICPU(0x45, core_params),
|
||||
ICPU(0x46, core_params),
|
||||
ICPU(0x47, core_params),
|
||||
ICPU(0x4c, byt_params),
|
||||
ICPU(0x4c, airmont_params),
|
||||
ICPU(0x4e, core_params),
|
||||
ICPU(0x4f, core_params),
|
||||
ICPU(0x5e, core_params),
|
||||
|
@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
|
|||
|
||||
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
struct cpudata *cpu;
|
||||
int i;
|
||||
#endif
|
||||
pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
|
||||
policy->cpuinfo.max_freq, policy->max);
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
|
||||
int_tofp(100));
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
|
||||
int control;
|
||||
|
||||
control = convert_to_native_pstate_format(cpu, i);
|
||||
if (control * cpu->pstate.scaling == policy->max)
|
||||
limits->max_perf_ctl = control;
|
||||
if (control * cpu->pstate.scaling == policy->min)
|
||||
limits->min_perf_ctl = control;
|
||||
}
|
||||
|
||||
pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
|
||||
policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
|
||||
limits->max_perf_ctl);
|
||||
#endif
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_set();
|
||||
|
||||
|
@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||
policy->cpuinfo.max_freq =
|
||||
cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||
if (!no_acpi_perf)
|
||||
intel_pstate_init_perf_limits(policy);
|
||||
/*
|
||||
* If there is no acpi perf data or error, we ignore and use Intel P
|
||||
* state calculated limits, So this is not fatal error.
|
||||
*/
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
cpumask_set_cpu(policy->cpu, policy->cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
return intel_pstate_exit_perf_limits(policy);
|
||||
}
|
||||
|
||||
static struct cpufreq_driver intel_pstate_driver = {
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.verify = intel_pstate_verify_policy,
|
||||
.setpolicy = intel_pstate_set_policy,
|
||||
.get = intel_pstate_get,
|
||||
.init = intel_pstate_cpu_init,
|
||||
.exit = intel_pstate_cpu_exit,
|
||||
.stop_cpu = intel_pstate_stop_cpu,
|
||||
.name = "intel_pstate",
|
||||
};
|
||||
|
@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
|
|||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
#include <acpi/processor.h>
|
||||
|
||||
static bool intel_pstate_no_acpi_pss(void)
|
||||
{
|
||||
|
@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str)
|
|||
force_load = 1;
|
||||
if (!strcmp(str, "hwp_only"))
|
||||
hwp_only = 1;
|
||||
if (!strcmp(str, "no_acpi"))
|
||||
no_acpi_perf = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("intel_pstate", intel_pstate_setup);
|
||||
|
|
|
@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
params_head = section_head->params;
|
||||
params_head = section.params;
|
||||
|
||||
while (params_head) {
|
||||
if (copy_from_user(&key_val, (void __user *)params_head,
|
||||
|
|
|
@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
|
|||
return NULL;
|
||||
|
||||
dev_info(chan2dev(chan),
|
||||
"%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, xt->src_start, xt->dst_start, xt->numf,
|
||||
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, &xt->src_start, &xt->dst_start, xt->numf,
|
||||
xt->frame_size, flags);
|
||||
|
||||
/*
|
||||
|
@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
u32 ctrla;
|
||||
u32 ctrlb;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
|
||||
dest, src, len, flags);
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
|
||||
&dest, &src, len, flags);
|
||||
|
||||
if (unlikely(!len)) {
|
||||
dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
|
||||
|
@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
void __iomem *vaddr;
|
||||
dma_addr_t paddr;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
|
||||
dest, value, len, flags);
|
||||
dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
|
||||
&dest, value, len, flags);
|
||||
|
||||
if (unlikely(!len)) {
|
||||
dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
|
||||
|
@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
|
|||
dma_addr_t dest = sg_dma_address(sg);
|
||||
size_t len = sg_dma_len(sg);
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n",
|
||||
__func__, dest, len);
|
||||
dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
|
||||
__func__, &dest, len);
|
||||
|
||||
if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
|
||||
dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
|
||||
|
@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
|||
unsigned int periods = buf_len / period_len;
|
||||
unsigned int i;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
|
||||
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
|
||||
buf_addr,
|
||||
&buf_addr,
|
||||
periods, buf_len, period_len);
|
||||
|
||||
if (unlikely(!atslave || !buf_len || !period_len)) {
|
||||
|
|
|
@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
|
|||
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
|
||||
{
|
||||
dev_crit(chan2dev(&atchan->chan_common),
|
||||
" desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
|
||||
lli->saddr, lli->daddr,
|
||||
lli->ctrla, lli->ctrlb, lli->dscr);
|
||||
" desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
|
||||
&lli->saddr, &lli->daddr,
|
||||
lli->ctrla, lli->ctrlb, &lli->dscr);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
|
|||
desc->lld.mbr_cfg = chan_cc;
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, desc->lld.mbr_sa, desc->lld.mbr_da,
|
||||
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
|
||||
desc->lld.mbr_ubc, desc->lld.mbr_cfg);
|
||||
|
||||
/* Chain lld. */
|
||||
|
@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
|||
if ((xt->numf > 1) && (xt->frame_size > 1))
|
||||
return NULL;
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, xt->src_start, xt->dst_start, xt->numf,
|
||||
dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
|
||||
__func__, &xt->src_start, &xt->dst_start, xt->numf,
|
||||
xt->frame_size, flags);
|
||||
|
||||
src_addr = xt->src_start;
|
||||
|
@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
|
|||
desc->lld.mbr_cfg = chan_cc;
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
|
||||
"%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
|
||||
__func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
|
||||
desc->lld.mbr_cfg);
|
||||
|
||||
return desc;
|
||||
|
@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
struct at_xdmac_desc *desc;
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, dest, len, value, flags);
|
||||
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, &dest, len, value, flags);
|
||||
|
||||
if (unlikely(!len))
|
||||
return NULL;
|
||||
|
@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
/* Prepare descriptors. */
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, sg_dma_address(sg), sg_dma_len(sg),
|
||||
dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
|
||||
__func__, &sg_dma_address(sg), sg_dma_len(sg),
|
||||
value, flags);
|
||||
desc = at_xdmac_memset_create_desc(chan, atchan,
|
||||
sg_dma_address(sg),
|
||||
|
|
|
@ -107,7 +107,7 @@
|
|||
|
||||
/* CCCFG register */
|
||||
#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
|
||||
#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */
|
||||
#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
|
||||
#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
|
||||
#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
|
||||
#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
|
||||
|
@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
|
|||
struct platform_device *tc_pdev;
|
||||
int ret;
|
||||
|
||||
if (!tc)
|
||||
if (!IS_ENABLED(CONFIG_OF) || !tc)
|
||||
return;
|
||||
|
||||
tc_pdev = of_find_device_by_node(tc->node);
|
||||
|
|
|
@ -1462,7 +1462,7 @@ err_firmware:
|
|||
|
||||
#define EVENT_REMAP_CELLS 3
|
||||
|
||||
static int __init sdma_event_remap(struct sdma_engine *sdma)
|
||||
static int sdma_event_remap(struct sdma_engine *sdma)
|
||||
{
|
||||
struct device_node *np = sdma->dev->of_node;
|
||||
struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
|
||||
|
|
|
@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev)
|
|||
struct usb_dmac *dmac = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dmac->n_channels; ++i)
|
||||
for (i = 0; i < dmac->n_channels; ++i) {
|
||||
if (!dmac->channels[i].iomem)
|
||||
break;
|
||||
usb_dmac_chan_halt(&dmac->channels[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev)
|
|||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
||||
return ret;
|
||||
goto error_pm;
|
||||
}
|
||||
|
||||
ret = usb_dmac_init(dmac);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to reset device\n");
|
||||
|
@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev)
|
|||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
error_pm:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -389,7 +389,6 @@ struct amdgpu_clock {
|
|||
* Fences.
|
||||
*/
|
||||
struct amdgpu_fence_driver {
|
||||
struct amdgpu_ring *ring;
|
||||
uint64_t gpu_addr;
|
||||
volatile uint32_t *cpu_addr;
|
||||
/* sync_seq is protected by ring emission lock */
|
||||
|
@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
|
|||
bool initialized;
|
||||
struct amdgpu_irq_src *irq_src;
|
||||
unsigned irq_type;
|
||||
struct delayed_work lockup_work;
|
||||
struct timer_list fallback_timer;
|
||||
wait_queue_head_t fence_queue;
|
||||
};
|
||||
|
||||
|
@ -917,8 +916,8 @@ struct amdgpu_ring {
|
|||
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
|
@ -926,8 +925,6 @@ struct amdgpu_vm_id {
|
|||
uint64_t pd_gpu_addr;
|
||||
/* last flushed PD/PT update */
|
||||
struct fence *flushed_updates;
|
||||
/* last use of vmid */
|
||||
struct fence *last_id_use;
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
|
@ -957,24 +954,70 @@ struct amdgpu_vm {
|
|||
|
||||
/* for id and flush management per ring */
|
||||
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
|
||||
/* for interval tree */
|
||||
spinlock_t it_lock;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
struct fence *active[AMDGPU_NUM_VM];
|
||||
uint32_t max_pfn;
|
||||
struct {
|
||||
struct fence *active;
|
||||
atomic_long_t owner;
|
||||
} ids[AMDGPU_NUM_VM];
|
||||
|
||||
uint32_t max_pfn;
|
||||
/* number of VMIDs */
|
||||
unsigned nvm;
|
||||
unsigned nvm;
|
||||
/* vram base address for page table entry */
|
||||
u64 vram_base_offset;
|
||||
u64 vram_base_offset;
|
||||
/* is vm enabled? */
|
||||
bool enabled;
|
||||
/* for hw to save the PD addr on suspend/resume */
|
||||
uint32_t saved_table_addr[AMDGPU_NUM_VM];
|
||||
bool enabled;
|
||||
/* vm pte handling */
|
||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||
struct amdgpu_ring *vm_pte_funcs_ring;
|
||||
};
|
||||
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct list_head *head);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *updates);
|
||||
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *fence);
|
||||
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_sync *sync);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct ttm_mem_reg *mem);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
int amdgpu_vm_free_job(struct amdgpu_job *job);
|
||||
|
||||
/*
|
||||
* context related structures
|
||||
*/
|
||||
|
@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser {
|
|||
/* relocations */
|
||||
struct amdgpu_bo_list_entry *vm_bos;
|
||||
struct list_head validated;
|
||||
struct fence *fence;
|
||||
|
||||
struct amdgpu_ib *ibs;
|
||||
uint32_t num_ibs;
|
||||
|
@ -1226,7 +1270,7 @@ struct amdgpu_job {
|
|||
struct amdgpu_device *adev;
|
||||
struct amdgpu_ib *ibs;
|
||||
uint32_t num_ibs;
|
||||
struct mutex job_lock;
|
||||
void *owner;
|
||||
struct amdgpu_user_fence uf;
|
||||
int (*free_job)(struct amdgpu_job *job);
|
||||
};
|
||||
|
@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
|
|||
bool amdgpu_card_posted(struct amdgpu_device *adev);
|
||||
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
||||
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
|
||||
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
|
||||
struct drm_file *filp,
|
||||
struct amdgpu_ctx *ctx,
|
||||
struct amdgpu_ib *ibs,
|
||||
uint32_t num_ibs);
|
||||
|
||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
|
@ -2318,49 +2357,6 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
|
|||
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
/*
|
||||
* vm
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct list_head *head);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *updates);
|
||||
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_fence *fence);
|
||||
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm, struct amdgpu_sync *sync);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct ttm_mem_reg *mem);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
int amdgpu_vm_free_job(struct amdgpu_job *job);
|
||||
/*
|
||||
* functions used by amdgpu_encoder.c
|
||||
*/
|
||||
|
|
|
@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
|
||||
struct drm_file *filp,
|
||||
struct amdgpu_ctx *ctx,
|
||||
struct amdgpu_ib *ibs,
|
||||
uint32_t num_ibs)
|
||||
{
|
||||
struct amdgpu_cs_parser *parser;
|
||||
int i;
|
||||
|
||||
parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
|
||||
if (!parser)
|
||||
return NULL;
|
||||
|
||||
parser->adev = adev;
|
||||
parser->filp = filp;
|
||||
parser->ctx = ctx;
|
||||
parser->ibs = ibs;
|
||||
parser->num_ibs = num_ibs;
|
||||
for (i = 0; i < num_ibs; i++)
|
||||
ibs[i].ctx = ctx;
|
||||
|
||||
return parser;
|
||||
}
|
||||
|
||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
{
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
|
@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
|
|||
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
|
||||
}
|
||||
|
||||
static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
|
||||
/**
|
||||
* cs_parser_fini() - clean parser states
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @error: error number
|
||||
*
|
||||
* If error is set than unvalidate buffer, otherwise just free memory
|
||||
* used by parsing context.
|
||||
**/
|
||||
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (!error) {
|
||||
/* Sort the buffer list from the smallest to largest buffer,
|
||||
* which affects the order of buffers in the LRU list.
|
||||
|
@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
|
|||
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&parser->ticket,
|
||||
&parser->validated,
|
||||
&parser->ibs[parser->num_ibs-1].fence->base);
|
||||
&parser->validated,
|
||||
parser->fence);
|
||||
} else if (backoff) {
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
}
|
||||
}
|
||||
fence_put(parser->fence);
|
||||
|
||||
static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
|
||||
{
|
||||
unsigned i;
|
||||
if (parser->ctx)
|
||||
amdgpu_ctx_put(parser->ctx);
|
||||
if (parser->bo_list)
|
||||
|
@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
|
|||
for (i = 0; i < parser->nchunks; i++)
|
||||
drm_free_large(parser->chunks[i].kdata);
|
||||
kfree(parser->chunks);
|
||||
if (!amdgpu_enable_scheduler)
|
||||
{
|
||||
if (parser->ibs)
|
||||
for (i = 0; i < parser->num_ibs; i++)
|
||||
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
|
||||
kfree(parser->ibs);
|
||||
if (parser->uf.bo)
|
||||
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
|
||||
}
|
||||
|
||||
kfree(parser);
|
||||
}
|
||||
|
||||
/**
|
||||
* cs_parser_fini() - clean parser states
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @error: error number
|
||||
*
|
||||
* If error is set than unvalidate buffer, otherwise just free memory
|
||||
* used by parsing context.
|
||||
**/
|
||||
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
|
||||
{
|
||||
amdgpu_cs_parser_fini_early(parser, error, backoff);
|
||||
amdgpu_cs_parser_fini_late(parser);
|
||||
if (parser->ibs)
|
||||
for (i = 0; i < parser->num_ibs; i++)
|
||||
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
|
||||
kfree(parser->ibs);
|
||||
if (parser->uf.bo)
|
||||
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
||||
|
@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
r = amdgpu_bo_vm_update_pte(parser, vm);
|
||||
if (r) {
|
||||
goto out;
|
||||
}
|
||||
amdgpu_cs_sync_rings(parser);
|
||||
if (!amdgpu_enable_scheduler)
|
||||
r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
|
||||
parser->filp);
|
||||
if (!r)
|
||||
amdgpu_cs_sync_rings(parser);
|
||||
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
union drm_amdgpu_cs *cs = data;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_cs_parser *parser;
|
||||
struct amdgpu_cs_parser parser = {};
|
||||
bool reserved_buffers = false;
|
||||
int i, r;
|
||||
|
||||
if (!adev->accel_working)
|
||||
return -EBUSY;
|
||||
|
||||
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
|
||||
if (!parser)
|
||||
return -ENOMEM;
|
||||
r = amdgpu_cs_parser_init(parser, data);
|
||||
parser.adev = adev;
|
||||
parser.filp = filp;
|
||||
|
||||
r = amdgpu_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
amdgpu_cs_parser_fini(parser, r, false);
|
||||
amdgpu_cs_parser_fini(&parser, r, false);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&vm->mutex);
|
||||
r = amdgpu_cs_parser_relocs(parser);
|
||||
r = amdgpu_cs_parser_relocs(&parser);
|
||||
if (r == -ENOMEM)
|
||||
DRM_ERROR("Not enough memory for command submission!\n");
|
||||
else if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
||||
else if (!r) {
|
||||
reserved_buffers = true;
|
||||
r = amdgpu_cs_ib_fill(adev, parser);
|
||||
r = amdgpu_cs_ib_fill(adev, &parser);
|
||||
}
|
||||
|
||||
if (!r) {
|
||||
r = amdgpu_cs_dependencies(adev, parser);
|
||||
r = amdgpu_cs_dependencies(adev, &parser);
|
||||
if (r)
|
||||
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
|
||||
}
|
||||
|
@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < parser->num_ibs; i++)
|
||||
trace_amdgpu_cs(parser, i);
|
||||
for (i = 0; i < parser.num_ibs; i++)
|
||||
trace_amdgpu_cs(&parser, i);
|
||||
|
||||
r = amdgpu_cs_ib_vm_chunk(adev, parser);
|
||||
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (amdgpu_enable_scheduler && parser->num_ibs) {
|
||||
if (amdgpu_enable_scheduler && parser.num_ibs) {
|
||||
struct amdgpu_ring * ring = parser.ibs->ring;
|
||||
struct amd_sched_fence *fence;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ring * ring = parser->ibs->ring;
|
||||
|
||||
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
|
||||
if (!job) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
|
||||
job->adev = parser->adev;
|
||||
job->ibs = parser->ibs;
|
||||
job->num_ibs = parser->num_ibs;
|
||||
job->base.owner = parser->filp;
|
||||
mutex_init(&job->job_lock);
|
||||
job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
|
||||
job->adev = parser.adev;
|
||||
job->owner = parser.filp;
|
||||
job->free_job = amdgpu_cs_free_job;
|
||||
|
||||
job->ibs = parser.ibs;
|
||||
job->num_ibs = parser.num_ibs;
|
||||
parser.ibs = NULL;
|
||||
parser.num_ibs = 0;
|
||||
|
||||
if (job->ibs[job->num_ibs - 1].user) {
|
||||
memcpy(&job->uf, &parser->uf,
|
||||
sizeof(struct amdgpu_user_fence));
|
||||
job->uf = parser.uf;
|
||||
job->ibs[job->num_ibs - 1].user = &job->uf;
|
||||
parser.uf.bo = NULL;
|
||||
}
|
||||
|
||||
job->free_job = amdgpu_cs_free_job;
|
||||
mutex_lock(&job->job_lock);
|
||||
r = amd_sched_entity_push_job(&job->base);
|
||||
if (r) {
|
||||
mutex_unlock(&job->job_lock);
|
||||
fence = amd_sched_fence_create(job->base.s_entity,
|
||||
parser.filp);
|
||||
if (!fence) {
|
||||
r = -ENOMEM;
|
||||
amdgpu_cs_free_job(job);
|
||||
kfree(job);
|
||||
goto out;
|
||||
}
|
||||
cs->out.handle =
|
||||
amdgpu_ctx_add_fence(parser->ctx, ring,
|
||||
&job->base.s_fence->base);
|
||||
parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
|
||||
job->base.s_fence = fence;
|
||||
parser.fence = fence_get(&fence->base);
|
||||
|
||||
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
|
||||
ttm_eu_fence_buffer_objects(&parser->ticket,
|
||||
&parser->validated,
|
||||
&job->base.s_fence->base);
|
||||
cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
|
||||
&fence->base);
|
||||
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
|
||||
|
||||
mutex_unlock(&job->job_lock);
|
||||
amdgpu_cs_parser_fini_late(parser);
|
||||
mutex_unlock(&vm->mutex);
|
||||
return 0;
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
amd_sched_entity_push_job(&job->base);
|
||||
|
||||
} else {
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
|
||||
parser.filp);
|
||||
fence = parser.ibs[parser.num_ibs - 1].fence;
|
||||
parser.fence = fence_get(&fence->base);
|
||||
cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
|
||||
}
|
||||
|
||||
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
|
||||
out:
|
||||
amdgpu_cs_parser_fini(parser, r, reserved_buffers);
|
||||
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
|
||||
mutex_unlock(&vm->mutex);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
|
|
|
@ -47,6 +47,9 @@
|
|||
* that the the relevant GPU caches have been flushed.
|
||||
*/
|
||||
|
||||
static struct kmem_cache *amdgpu_fence_slab;
|
||||
static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
|
||||
|
||||
/**
|
||||
* amdgpu_fence_write - write a fence value
|
||||
*
|
||||
|
@ -84,24 +87,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
|
|||
return seq;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_schedule_check - schedule lockup check
|
||||
*
|
||||
* @ring: pointer to struct amdgpu_ring
|
||||
*
|
||||
* Queues a delayed work item to check for lockups.
|
||||
*/
|
||||
static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
|
||||
{
|
||||
/*
|
||||
* Do not reset the timer here with mod_delayed_work,
|
||||
* this can livelock in an interaction with TTM delayed destroy.
|
||||
*/
|
||||
queue_delayed_work(system_power_efficient_wq,
|
||||
&ring->fence_drv.lockup_work,
|
||||
AMDGPU_FENCE_JIFFIES_TIMEOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_emit - emit a fence on the requested ring
|
||||
*
|
||||
|
@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
|
|||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
/* we are protected by the ring emission mutex */
|
||||
*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
|
||||
*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if ((*fence) == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -132,10 +117,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
|
|||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
(*fence)->seq,
|
||||
AMDGPU_FENCE_FLAG_INT);
|
||||
trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_schedule_fallback - schedule fallback check
|
||||
*
|
||||
* @ring: pointer to struct amdgpu_ring
|
||||
*
|
||||
* Start a timer as fallback to our interrupts.
|
||||
*/
|
||||
static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
|
||||
{
|
||||
mod_timer(&ring->fence_drv.fallback_timer,
|
||||
jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_activity - check for fence activity
|
||||
*
|
||||
|
@ -202,32 +199,11 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
|
|||
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
|
||||
|
||||
if (seq < last_emitted)
|
||||
amdgpu_fence_schedule_check(ring);
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
|
||||
return wake;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_check_lockup - check for hardware lockup
|
||||
*
|
||||
* @work: delayed work item
|
||||
*
|
||||
* Checks for fence activity and if there is none probe
|
||||
* the hardware if a lockup occured.
|
||||
*/
|
||||
static void amdgpu_fence_check_lockup(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_fence_driver *fence_drv;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
fence_drv = container_of(work, struct amdgpu_fence_driver,
|
||||
lockup_work.work);
|
||||
ring = fence_drv->ring;
|
||||
|
||||
if (amdgpu_fence_activity(ring))
|
||||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_process - process a fence
|
||||
*
|
||||
|
@ -243,6 +219,20 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
|
|||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_fallback - fallback for hardware interrupts
|
||||
*
|
||||
* @work: delayed work item
|
||||
*
|
||||
* Checks for fence activity.
|
||||
*/
|
||||
static void amdgpu_fence_fallback(unsigned long arg)
|
||||
{
|
||||
struct amdgpu_ring *ring = (void *)arg;
|
||||
|
||||
amdgpu_fence_process(ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
|
||||
*
|
||||
|
@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
|
|||
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
|
||||
return 0;
|
||||
|
||||
amdgpu_fence_schedule_check(ring);
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
wait_event(ring->fence_drv.fence_queue, (
|
||||
(signaled = amdgpu_fence_seq_signaled(ring, seq))));
|
||||
|
||||
|
@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
|||
atomic64_set(&ring->fence_drv.last_seq, 0);
|
||||
ring->fence_drv.initialized = false;
|
||||
|
||||
INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
|
||||
amdgpu_fence_check_lockup);
|
||||
ring->fence_drv.ring = ring;
|
||||
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
|
||||
(unsigned long)ring);
|
||||
|
||||
init_waitqueue_head(&ring->fence_drv.fence_queue);
|
||||
|
||||
|
@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
|||
*/
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
|
||||
{
|
||||
if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
|
||||
amdgpu_fence_slab = kmem_cache_create(
|
||||
"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!amdgpu_fence_slab)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (amdgpu_debugfs_fence_init(adev))
|
||||
dev_err(adev->dev, "fence debugfs file creation failed\n");
|
||||
|
||||
|
@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, r;
|
||||
|
||||
if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
|
||||
kmem_cache_destroy(amdgpu_fence_slab);
|
||||
mutex_lock(&adev->ring_lock);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
|
@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
amd_sched_fini(&ring->sched);
|
||||
del_timer_sync(&ring->fence_drv.fallback_timer);
|
||||
ring->fence_drv.initialized = false;
|
||||
}
|
||||
mutex_unlock(&adev->ring_lock);
|
||||
|
@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
|
|||
fence->fence_wake.func = amdgpu_fence_check_signaled;
|
||||
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_get(f);
|
||||
amdgpu_fence_schedule_check(ring);
|
||||
if (!timer_pending(&ring->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(ring);
|
||||
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_fence_release(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
kmem_cache_free(amdgpu_fence_slab, fence);
|
||||
}
|
||||
|
||||
const struct fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
.enable_signaling = amdgpu_fence_enable_signaling,
|
||||
.signaled = amdgpu_fence_is_signaled,
|
||||
.wait = fence_default_wait,
|
||||
.release = NULL,
|
||||
.release = amdgpu_fence_release,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
goto error_unreserve;
|
||||
}
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
|
||||
if (r)
|
||||
|
@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_bo *rbo;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct ttm_validate_buffer tv, tv_pd;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
int r = 0;
|
||||
|
||||
|
@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
return -ENOENT;
|
||||
mutex_lock(&fpriv->vm.mutex);
|
||||
rbo = gem_to_amdgpu_bo(gobj);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
tv.bo = &rbo->tbo;
|
||||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
if (args->operation == AMDGPU_VA_OP_MAP) {
|
||||
tv_pd.bo = &fpriv->vm.page_directory->tbo;
|
||||
tv_pd.shared = true;
|
||||
list_add(&tv_pd.head, &list);
|
||||
}
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r) {
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
|
||||
if (!bo_va) {
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
|
|
|
@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
|
|||
int r;
|
||||
|
||||
if (size) {
|
||||
r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
|
||||
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
|
||||
&ib->sa_bo, size, 256);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
|
||||
|
@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
|||
}
|
||||
|
||||
if (ib->vm)
|
||||
amdgpu_vm_fence(adev, ib->vm, ib->fence);
|
||||
amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
|
||||
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
return 0;
|
||||
|
|
|
@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
|
|||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager);
|
||||
int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align);
|
||||
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align);
|
||||
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
struct fence *fence);
|
||||
|
|
|
@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|||
return false;
|
||||
}
|
||||
|
||||
int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
||||
struct amdgpu_sa_manager *sa_manager,
|
||||
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
||||
struct amdgpu_sa_bo **sa_bo,
|
||||
unsigned size, unsigned align)
|
||||
{
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
|
||||
{
|
||||
|
@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
|
|||
return NULL;
|
||||
}
|
||||
job = to_amdgpu_job(sched_job);
|
||||
mutex_lock(&job->job_lock);
|
||||
r = amdgpu_ib_schedule(job->adev,
|
||||
job->num_ibs,
|
||||
job->ibs,
|
||||
job->base.owner);
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
|
||||
if (r) {
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
goto err;
|
||||
|
@ -61,8 +59,6 @@ err:
|
|||
if (job->free_job)
|
||||
job->free_job(job);
|
||||
|
||||
mutex_unlock(&job->job_lock);
|
||||
fence_put(&job->base.s_fence->base);
|
||||
kfree(job);
|
||||
return fence ? &fence->base : NULL;
|
||||
}
|
||||
|
@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
|||
return -ENOMEM;
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
|
||||
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
|
||||
if (!job->base.s_fence) {
|
||||
kfree(job);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*f = fence_get(&job->base.s_fence->base);
|
||||
|
||||
job->adev = adev;
|
||||
job->ibs = ibs;
|
||||
job->num_ibs = num_ibs;
|
||||
job->base.owner = owner;
|
||||
mutex_init(&job->job_lock);
|
||||
job->owner = owner;
|
||||
job->free_job = free_job;
|
||||
mutex_lock(&job->job_lock);
|
||||
r = amd_sched_entity_push_job(&job->base);
|
||||
if (r) {
|
||||
mutex_unlock(&job->job_lock);
|
||||
kfree(job);
|
||||
return r;
|
||||
}
|
||||
*f = fence_get(&job->base.s_fence->base);
|
||||
mutex_unlock(&job->job_lock);
|
||||
amd_sched_entity_push_job(&job->base);
|
||||
} else {
|
||||
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
|
||||
if (r)
|
||||
|
|
|
@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
|
|||
if (*semaphore == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
|
||||
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
|
||||
&(*semaphore)->sa_bo, 8, 8);
|
||||
if (r) {
|
||||
kfree(*semaphore);
|
||||
|
|
|
@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
|
||||
(count >= AMDGPU_NUM_SYNCS)) {
|
||||
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
|
||||
r = fence_wait(&fence->base, true);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count >= AMDGPU_NUM_SYNCS) {
|
||||
/* not enough room, wait manually */
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
|
|
|
@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
|
|||
__entry->fences)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_cs_ioctl,
|
||||
TP_PROTO(struct amdgpu_job *job),
|
||||
TP_ARGS(job),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amdgpu_device *, adev)
|
||||
__field(struct amd_sched_job *, sched_job)
|
||||
__field(struct amdgpu_ib *, ib)
|
||||
__field(struct fence *, fence)
|
||||
__field(char *, ring_name)
|
||||
__field(u32, num_ibs)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->adev = job->adev;
|
||||
__entry->sched_job = &job->base;
|
||||
__entry->ib = job->ibs;
|
||||
__entry->fence = &job->base.s_fence->base;
|
||||
__entry->ring_name = job->ibs[0].ring->name;
|
||||
__entry->num_ibs = job->num_ibs;
|
||||
),
|
||||
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
|
||||
__entry->adev, __entry->sched_job, __entry->ib,
|
||||
__entry->fence, __entry->ring_name, __entry->num_ibs)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_sched_run_job,
|
||||
TP_PROTO(struct amdgpu_job *job),
|
||||
TP_ARGS(job),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amdgpu_device *, adev)
|
||||
__field(struct amd_sched_job *, sched_job)
|
||||
__field(struct amdgpu_ib *, ib)
|
||||
__field(struct fence *, fence)
|
||||
__field(char *, ring_name)
|
||||
__field(u32, num_ibs)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->adev = job->adev;
|
||||
__entry->sched_job = &job->base;
|
||||
__entry->ib = job->ibs;
|
||||
__entry->fence = &job->base.s_fence->base;
|
||||
__entry->ring_name = job->ibs[0].ring->name;
|
||||
__entry->num_ibs = job->num_ibs;
|
||||
),
|
||||
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
|
||||
__entry->adev, __entry->sched_job, __entry->ib,
|
||||
__entry->fence, __entry->ring_name, __entry->num_ibs)
|
||||
);
|
||||
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_grab_id,
|
||||
TP_PROTO(unsigned vmid, int ring),
|
||||
TP_ARGS(vmid, ring),
|
||||
|
@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
|
|||
TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(amdgpu_fence_request,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(int, ring)
|
||||
__field(u32, seqno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
__entry->ring = ring;
|
||||
__entry->seqno = seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%d, seqno=%u",
|
||||
__entry->dev, __entry->ring, __entry->seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
||||
TP_ARGS(dev, ring, seqno)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
|
|
@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
|
|||
ret = drm_mm_dump_table(m, mm);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
if (ttm_pl == TTM_PL_VRAM)
|
||||
seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
|
||||
seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
|
||||
adev->mman.bdev.man[ttm_pl].size,
|
||||
atomic64_read(&adev->vram_usage) >> 20,
|
||||
atomic64_read(&adev->vram_vis_usage) >> 20);
|
||||
(u64)atomic64_read(&adev->vram_usage) >> 20,
|
||||
(u64)atomic64_read(&adev->vram_vis_usage) >> 20);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
unsigned i;
|
||||
|
||||
/* check if the id is still valid */
|
||||
if (vm_id->id && vm_id->last_id_use &&
|
||||
vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
|
||||
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
|
||||
return 0;
|
||||
if (vm_id->id) {
|
||||
unsigned id = vm_id->id;
|
||||
long owner;
|
||||
|
||||
owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
|
||||
if (owner == (long)vm) {
|
||||
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* we definately need to flush */
|
||||
|
@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
|
||||
/* skip over VMID 0, since it is the system VM */
|
||||
for (i = 1; i < adev->vm_manager.nvm; ++i) {
|
||||
struct fence *fence = adev->vm_manager.active[i];
|
||||
struct fence *fence = adev->vm_manager.ids[i].active;
|
||||
struct amdgpu_ring *fring;
|
||||
|
||||
if (fence == NULL) {
|
||||
|
@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
if (choices[i]) {
|
||||
struct fence *fence;
|
||||
|
||||
fence = adev->vm_manager.active[choices[i]];
|
||||
fence = adev->vm_manager.ids[choices[i]].active;
|
||||
vm_id->id = choices[i];
|
||||
|
||||
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
|
||||
|
@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
|
||||
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
||||
struct fence *flushed_updates = vm_id->flushed_updates;
|
||||
bool is_earlier = false;
|
||||
bool is_later;
|
||||
|
||||
if (flushed_updates && updates) {
|
||||
BUG_ON(flushed_updates->context != updates->context);
|
||||
is_earlier = (updates->seqno - flushed_updates->seqno <=
|
||||
INT_MAX) ? true : false;
|
||||
}
|
||||
|
||||
if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
|
||||
is_earlier) {
|
||||
if (!flushed_updates)
|
||||
is_later = true;
|
||||
else if (!updates)
|
||||
is_later = false;
|
||||
else
|
||||
is_later = fence_is_later(updates, flushed_updates);
|
||||
|
||||
if (pd_addr != vm_id->pd_gpu_addr || is_later) {
|
||||
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
|
||||
if (is_earlier) {
|
||||
if (is_later) {
|
||||
vm_id->flushed_updates = fence_get(updates);
|
||||
fence_put(flushed_updates);
|
||||
}
|
||||
if (!flushed_updates)
|
||||
vm_id->flushed_updates = fence_get(updates);
|
||||
vm_id->pd_gpu_addr = pd_addr;
|
||||
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
|
||||
}
|
||||
|
@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
|||
*/
|
||||
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_fence *fence)
|
||||
struct fence *fence)
|
||||
{
|
||||
unsigned ridx = fence->ring->idx;
|
||||
unsigned vm_id = vm->ids[ridx].id;
|
||||
struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
|
||||
unsigned vm_id = vm->ids[ring->idx].id;
|
||||
|
||||
fence_put(adev->vm_manager.active[vm_id]);
|
||||
adev->vm_manager.active[vm_id] = fence_get(&fence->base);
|
||||
|
||||
fence_put(vm->ids[ridx].last_id_use);
|
||||
vm->ids[ridx].last_id_use = fence_get(&fence->base);
|
||||
fence_put(adev->vm_manager.ids[vm_id].active);
|
||||
adev->vm_manager.ids[vm_id].active = fence_get(fence);
|
||||
atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
|
|||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo: bo to clear
|
||||
*
|
||||
* need to reserve bo first before calling it.
|
||||
*/
|
||||
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo)
|
||||
|
@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
uint64_t addr;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = reservation_object_reserve_shared(bo->tbo.resv);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
entries = amdgpu_bo_size(bo) / 8;
|
||||
|
||||
ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
|
||||
if (!ib)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
|
||||
if (r)
|
||||
|
@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
if (!r)
|
||||
amdgpu_bo_fence(bo, fence, true);
|
||||
fence_put(fence);
|
||||
if (amdgpu_enable_scheduler) {
|
||||
amdgpu_bo_unreserve(bo);
|
||||
if (amdgpu_enable_scheduler)
|
||||
return 0;
|
||||
}
|
||||
|
||||
error_free:
|
||||
amdgpu_ib_free(adev, ib);
|
||||
kfree(ib);
|
||||
|
||||
error_unreserve:
|
||||
amdgpu_bo_unreserve(bo);
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||
* Add a mapping of the BO at the specefied addr into the VM.
|
||||
* Returns 0 for success, error for failure.
|
||||
*
|
||||
* Object has to be reserved and gets unreserved by this function!
|
||||
* Object has to be reserved and unreserved outside!
|
||||
*/
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
|
@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
|
||||
/* validate the parameters */
|
||||
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
||||
size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* make sure object fit at this offset */
|
||||
eaddr = saddr + size;
|
||||
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
|
||||
if (last_pfn > adev->vm_manager.max_pfn) {
|
||||
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
|
||||
last_pfn, adev->vm_manager.max_pfn);
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
spin_lock(&vm->it_lock);
|
||||
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
|
||||
spin_unlock(&vm->it_lock);
|
||||
if (it) {
|
||||
struct amdgpu_bo_va_mapping *tmp;
|
||||
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
|
||||
|
@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
||||
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
|
||||
tmp->it.start, tmp->it.last + 1);
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
r = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
||||
if (!mapping) {
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
r = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
mapping->flags = flags;
|
||||
|
||||
list_add(&mapping->list, &bo_va->invalids);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_insert(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||
|
||||
/* Make sure the page tables are allocated */
|
||||
|
@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
if (eaddr > vm->max_pde_used)
|
||||
vm->max_pde_used = eaddr;
|
||||
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
|
||||
/* walk over the address space and allocate the page tables */
|
||||
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
|
||||
struct reservation_object *resv = vm->page_directory->tbo.resv;
|
||||
|
@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
if (vm->page_tables[pt_idx].bo)
|
||||
continue;
|
||||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
||||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
|
||||
NULL, resv, &pt);
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
|
||||
error_free:
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
kfree(mapping);
|
||||
|
||||
|
@ -1119,7 +1110,7 @@ error:
|
|||
* Remove a mapping of the BO at the specefied addr from the VM.
|
||||
* Returns 0 for success, error for failure.
|
||||
*
|
||||
* Object has to be reserved and gets unreserved by this function!
|
||||
* Object has to be reserved and unreserved outside!
|
||||
*/
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
|
@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||
break;
|
||||
}
|
||||
|
||||
if (&mapping->list == &bo_va->invalids) {
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
if (&mapping->list == &bo_va->invalids)
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
|
||||
if (valid)
|
||||
list_add(&mapping->list, &vm->freed);
|
||||
else
|
||||
kfree(mapping);
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|||
|
||||
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
list_add(&mapping->list, &vm->freed);
|
||||
}
|
||||
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
||||
list_del(&mapping->list);
|
||||
spin_lock(&vm->it_lock);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
spin_unlock(&vm->it_lock);
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
|
@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
vm->ids[i].id = 0;
|
||||
vm->ids[i].flushed_updates = NULL;
|
||||
vm->ids[i].last_id_use = NULL;
|
||||
}
|
||||
mutex_init(&vm->mutex);
|
||||
vm->va = RB_ROOT;
|
||||
|
@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
INIT_LIST_HEAD(&vm->cleared);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
|
||||
spin_lock_init(&vm->it_lock);
|
||||
pd_size = amdgpu_vm_directory_size(adev);
|
||||
pd_entries = amdgpu_vm_num_pdes(adev);
|
||||
|
||||
|
@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
NULL, NULL, &vm->page_directory);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_reserve(vm->page_directory, false);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&vm->page_directory);
|
||||
vm->page_directory = NULL;
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_vm_clear_bo(adev, vm->page_directory);
|
||||
amdgpu_bo_unreserve(vm->page_directory);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&vm->page_directory);
|
||||
vm->page_directory = NULL;
|
||||
|
@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
|
||||
amdgpu_bo_unref(&vm->page_directory);
|
||||
fence_put(vm->page_directory_fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
unsigned id = vm->ids[i].id;
|
||||
|
||||
atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
|
||||
(long)vm, 0);
|
||||
fence_put(vm->ids[i].flushed_updates);
|
||||
fence_put(vm->ids[i].last_id_use);
|
||||
}
|
||||
|
||||
mutex_destroy(&vm->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_manager_fini - cleanup VM manager
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Cleanup the VM manager and free resources.
|
||||
*/
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||
fence_put(adev->vm_manager.ids[i].active);
|
||||
}
|
||||
|
|
|
@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
|
|||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
|
||||
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
|
||||
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
|
||||
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
|
||||
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
|
||||
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
|
||||
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
|
||||
break;
|
||||
default:
|
||||
|
@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
|
|||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
|
||||
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
|
||||
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
|
||||
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
|
||||
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
|
||||
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
|
||||
WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
|
|||
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
|
||||
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
|
@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
|
|||
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
|
||||
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
|
||||
mmPCIE_INDEX, 0xffffffff, 0x0140001c,
|
||||
mmPCIE_DATA, 0x000f0000, 0x00000000,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
|
||||
};
|
||||
|
||||
|
@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
adev->gfx.config.max_cu_per_sh = 16;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 4;
|
||||
adev->gfx.config.max_texture_channel_caches = 8;
|
||||
adev->gfx.config.max_texture_channel_caches = 16;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
|
|||
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
|
||||
}
|
||||
case CHIP_FIJI:
|
||||
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
|
||||
switch (reg_offset) {
|
||||
case 0:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 1:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 2:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 3:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 4:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 5:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 6:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 7:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 8:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
|
||||
break;
|
||||
case 9:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 10:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 11:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 12:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 13:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 14:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 15:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 16:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 17:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 18:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 19:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 20:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 21:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 22:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 23:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 24:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 25:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 26:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 27:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 28:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 29:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 30:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
default:
|
||||
gb_tile_moden = 0;
|
||||
break;
|
||||
}
|
||||
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
|
||||
}
|
||||
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
|
||||
switch (reg_offset) {
|
||||
case 0:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 1:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 2:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 3:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 4:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 5:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 6:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 8:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 9:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 10:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 11:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 12:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 13:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 14:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
|
||||
NUM_BANKS(ADDR_SURF_4_BANK));
|
||||
break;
|
||||
case 7:
|
||||
/* unused idx */
|
||||
continue;
|
||||
default:
|
||||
gb_tile_moden = 0;
|
||||
break;
|
||||
}
|
||||
adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
|
||||
}
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
|
||||
switch (reg_offset) {
|
||||
|
@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
amdgpu_ring_write(ring, 0x16000012);
|
||||
amdgpu_ring_write(ring, 0x0000002A);
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
amdgpu_ring_write(ring, 0x3a00161a);
|
||||
amdgpu_ring_write(ring, 0x0000002e);
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
case CHIP_CARRIZO:
|
||||
amdgpu_ring_write(ring, 0x00000002);
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче