ARC updates for 4.18
- Software managed DMA wreckage after rework in 4.17 [Euginey] + missing cache flush + SMP_CACHE_BYTES vs. cache_line_size - allmodconfig build errors [Randy] - Maintainer update for Mellanox (EZChip) NPS platform -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJbY3tYAAoJEGnX8d3iisJeCbIP/1aRwU61Sn+1g4PBh2x8XBzU hvlUB5IIlFY+1aQEZG3h3P3SNi/DO3WtjXaAzzUlSHdX6jLFn8VWZupfnTE8Tr2p 9tx5VGrHECQzg+ew6qc5KU3zcRMT4uy61QqE5r0MPYRXzpO9V25bWArQ1wBDMrzG T85X19dKvmNDFrJk5SkKFn6bHpeGaIrzwJzzgVJAcDMmMolQggMqJwHbLGt6reiI 8CDZ3bmwwCbnb3r6JlltZq/MeJFdcLReL1eQsedh2GbqFoi4IRia0ICQakL+DLLk ru7sg+LOGKm9GpSHxzP1Jq1m3iPgXKW2UpggwCfe8Fima5mNSGRiwUZkVTfZ5h+W en4Mf97E6eFnouGD8w86b4KnQ3X6c1zxBEGjrnaDbifq/6iGfef+sxEFanJoGaHa kpLHYXe3CG9OgV05kxtnjJQRuBuRgIcK4G4LwASuq8JWRb3jIQL+VrHsYnh9oWUl 66yMd9SSMHgW5ccE0r6oaJvG0dCBaichbJaVX0VoEZCqNbbSRR+ifoRJ/yXOpeVw TGkRlah7b5l64RL3klfa+mlONuCArn4Oflxjpje5RqKOQP8mFIhy/w4mox82DWW7 n+7QygRj6H8Euei7ttP3G+9jL9+WErZf6+EwVKcfYmyEKj9+wRn5ySFm75hjmLsM IBUXUeVjttbCncx9yCrj =OGbe -----END PGP SIGNATURE----- Merge tag 'arc-4.18-final' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC fixes from Vineet Gupta: "Another batch of fixes for ARC, this time mainly DMA API rework wreckage: - Fix software managed DMA wreckage after rework in 4.17 [Euginey] * missing cache flush * SMP_CACHE_BYTES vs cache_line_size - Fix allmodconfig build errors [Randy] - Maintainer update for Mellanox (EZChip) NPS platform" * tag 'arc-4.18-final' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: arc: fix type warnings in arc/mm/cache.c arc: fix build errors in arc/include/asm/delay.h arc: [plat-eznps] fix printk warning in arc/plat-eznps/mtm.c arc: [plat-eznps] fix data type errors in platform headers ARC: [plat-eznps] Add missing struct nps_host_reg_aux_dpc ARC: add SMP_CACHE_BYTES value validate ARC: dma [non-IOC] setup SMP_CACHE_BYTES and cache_line_size ARC: dma [non IOC]: fix arc_dma_sync_single_for_(device|cpu) ARC: Add Ofer Levi as plat-eznps maintainer
This commit is contained in:
Коммит
ed0093d976
|
@ -5444,6 +5444,7 @@ F: drivers/iommu/exynos-iommu.c
|
|||
|
||||
EZchip NPS platform support
|
||||
M: Vineet Gupta <vgupta@synopsys.com>
|
||||
M: Ofer Levi <oferle@mellanox.com>
|
||||
S: Supported
|
||||
F: arch/arc/plat-eznps
|
||||
F: arch/arc/boot/dts/eznps.dts
|
||||
|
|
|
@ -50,6 +50,9 @@ config ARC
|
|||
select HAVE_KERNEL_LZMA
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
|
||||
config ARCH_HAS_CACHE_LINE_SIZE
|
||||
def_bool y
|
||||
|
||||
config MIGHT_HAVE_PCI
|
||||
bool
|
||||
|
||||
|
|
|
@ -48,7 +48,9 @@
|
|||
})
|
||||
|
||||
/* Largest line length for either L1 or L2 is 128 bytes */
|
||||
#define ARCH_DMA_MINALIGN 128
|
||||
#define SMP_CACHE_BYTES 128
|
||||
#define cache_line_size() SMP_CACHE_BYTES
|
||||
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
|
||||
|
||||
extern void arc_cache_init(void);
|
||||
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
|
||||
|
|
|
@ -17,8 +17,11 @@
|
|||
#ifndef __ASM_ARC_UDELAY_H
|
||||
#define __ASM_ARC_UDELAY_H
|
||||
|
||||
#include <asm-generic/types.h>
|
||||
#include <asm/param.h> /* HZ */
|
||||
|
||||
extern unsigned long loops_per_jiffy;
|
||||
|
||||
static inline void __delay(unsigned long loops)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
|
|
|
@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
unsigned int paddr = pfn << PAGE_SHIFT;
|
||||
phys_addr_t paddr = pfn << PAGE_SHIFT;
|
||||
|
||||
u_vaddr &= PAGE_MASK;
|
||||
|
||||
|
@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
|
|||
unsigned long u_vaddr)
|
||||
{
|
||||
/* TBD: do we really need to clear the kernel mapping */
|
||||
__flush_dcache_page(page_address(page), u_vaddr);
|
||||
__flush_dcache_page(page_address(page), page_address(page));
|
||||
__flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
|
||||
__flush_dcache_page((phys_addr_t)page_address(page),
|
||||
(phys_addr_t)page_address(page));
|
||||
|
||||
}
|
||||
|
||||
|
@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
|
||||
* or equal to any cache line length.
|
||||
*/
|
||||
BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
|
||||
"SMP_CACHE_BYTES must be >= any cache line length");
|
||||
if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
|
||||
panic("L2 Cache line [%d] > kernel Config [%d]\n",
|
||||
l2_line_sz, SMP_CACHE_BYTES);
|
||||
|
||||
/* Note that SLC disable not formally supported till HS 3.0 */
|
||||
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
|
||||
arc_slc_disable();
|
||||
|
|
|
@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache operations depending on function and direction argument, inspired by
|
||||
* https://lkml.org/lkml/2018/5/18/979
|
||||
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
|
||||
* dma-mapping: provide a generic dma-noncoherent implementation)"
|
||||
*
|
||||
* | map == for_device | unmap == for_cpu
|
||||
* |----------------------------------------------------------------
|
||||
* TO_DEV | writeback writeback | none none
|
||||
* FROM_DEV | invalidate invalidate | invalidate* invalidate*
|
||||
* BIDIR | writeback+inv writeback+inv | invalidate invalidate
|
||||
*
|
||||
* [*] needed for CPU speculative prefetches
|
||||
*
|
||||
* NOTE: we don't check the validity of direction argument as it is done in
|
||||
* upper layer functions (in include/linux/dma-mapping.h)
|
||||
*/
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_wback(paddr, size);
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
dma_cache_wback(paddr, size);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
dma_cache_inv(paddr, size);
|
||||
break;
|
||||
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_wback_inv(paddr, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_inv(paddr, size);
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
break;
|
||||
|
||||
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_inv(paddr, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#error "Incorrect ctop.h include"
|
||||
#endif
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <soc/nps/common.h>
|
||||
|
||||
/* core auxiliary registers */
|
||||
|
@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
|
|||
};
|
||||
|
||||
/* AUX registers definition */
|
||||
struct nps_host_reg_aux_dpc {
|
||||
union {
|
||||
struct {
|
||||
u32 ien:1, men:1, hen:1, reserved:29;
|
||||
};
|
||||
u32 value;
|
||||
};
|
||||
};
|
||||
|
||||
struct nps_host_reg_aux_udmc {
|
||||
union {
|
||||
struct {
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
*/
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/log2.h>
|
||||
#include <asm/arcregs.h>
|
||||
|
@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
|
|||
/* Verify and set the value of the mtm hs counter */
|
||||
static int __init set_mtm_hs_ctr(char *ctr_str)
|
||||
{
|
||||
long hs_ctr;
|
||||
int hs_ctr;
|
||||
int ret;
|
||||
|
||||
ret = kstrtol(ctr_str, 0, &hs_ctr);
|
||||
ret = kstrtoint(ctr_str, 0, &hs_ctr);
|
||||
|
||||
if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
|
||||
pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
|
||||
|
|
Загрузка…
Ссылка в новой задаче