Staging: Merge 'tidspbridge-2.6.37-rc1' into staging-linus

This is a big revert of a lot of -rc1 tidspbridge patches in order to
get the driver back into a working state.  It also includes a OMAP patch
that was approved by the OMAP maintainer.

Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Greg Kroah-Hartman 2010-11-11 05:12:34 -08:00
Родитель 307ae1d3d0 50ad26f4c9
Коммит 94fb7c9c5d
32 изменённых файлов: 3470 добавлений и 571 удалений

Просмотреть файл

@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
if (!size)
return;
paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT);
paddr = memblock_alloc(size, SZ_1M);
if (!paddr) {
pr_err("%s: failed to reserve %x bytes\n",
__func__, size);
return;
}
memblock_free(paddr, size);
memblock_remove(paddr, size);
omap_dsp_phys_mempool_base = paddr;
}

Просмотреть файл

@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE
tristate "DSP Bridge driver"
depends on ARCH_OMAP3
select OMAP_MBOX_FWK
select OMAP_IOMMU
help
DSP/BIOS Bridge is designed for platforms that contain a GPP and
one or more attached DSPs. The GPP is considered the master or

Просмотреть файл

@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \
core/tiomap3430_pwr.o core/tiomap_io.o \
core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
pmgr/cmm.o pmgr/dbll.o
pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
rmgr/nldr.o rmgr/drv_interface.o
libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
dynload/tramp.o
libhw = hw/hw_mmu.o
bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
$(libdload)
$(libdload) $(libhw)
#Machine dependent
ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \

Просмотреть файл

@ -27,8 +27,9 @@
struct deh_mgr {
struct bridge_dev_context *hbridge_context; /* Bridge context. */
struct ntfy_object *ntfy_obj; /* NTFY object */
/* MMU Fault DPC */
struct tasklet_struct dpc_tasklet;
};
int mmu_fault_isr(struct iommu *mmu);
#endif /* _DEH_ */

Просмотреть файл

@ -23,8 +23,8 @@
#include <plat/clockdomain.h>
#include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h>
#include <dspbridge/dsp-mmu.h>
#include <dspbridge/devdefs.h>
#include <hw_defs.h>
#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
#include <dspbridge/sync.h>
#include <dspbridge/clk.h>
@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
struct shm_segs {
u32 seg0_da;
u32 seg0_pa;
u32 seg0_va;
u32 seg0_size;
u32 seg1_da;
u32 seg1_pa;
u32 seg1_va;
u32 seg1_size;
};
/* This Bridge driver's device context: */
struct bridge_dev_context {
struct dev_object *hdev_obj; /* Handle to Bridge device object. */
@ -328,6 +316,7 @@ struct bridge_dev_context {
*/
u32 dw_dsp_ext_base_addr; /* See the comment above */
u32 dw_api_reg_base; /* API mem map'd registers */
void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
u32 dw_api_clk_base; /* CLK Registers */
u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
u32 dw_public_rhea; /* Pub Rhea */
@ -339,8 +328,7 @@ struct bridge_dev_context {
u32 dw_internal_size; /* Internal memory size */
struct omap_mbox *mbox; /* Mail box handle */
struct iommu *dsp_mmu; /* iommu for iva2 handler */
struct shm_segs sh_s;
struct cfg_hostres *resources; /* Host Resources */
/*
@ -353,6 +341,7 @@ struct bridge_dev_context {
/* TC Settings */
bool tc_word_swap_on; /* Traffic Controller Word Swap */
struct pg_table_attrs *pt_attrs;
u32 dsp_per_clks;
};

Просмотреть файл

@ -1,317 +0,0 @@
/*
* dsp-mmu.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP iommu.
*
* Copyright (C) 2010 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <dspbridge/host_os.h>
#include <plat/dmtimer.h>
#include <dspbridge/dbdefs.h>
#include <dspbridge/dev.h>
#include <dspbridge/io_sm.h>
#include <dspbridge/dspdeh.h>
#include "_tiomap.h"
#include <dspbridge/dsp-mmu.h>
#define MMU_CNTL_TWL_EN (1 << 2)
static struct tasklet_struct mmu_tasklet;
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
{
void *dummy_addr;
u32 fa, tmp;
struct iotlb_entry e;
struct iommu *mmu = dev_context->dsp_mmu;
dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
/*
* Before acking the MMU fault, let's make sure MMU can only
* access entry #0. Then add a new entry so that the DSP OS
* can continue in order to dump the stack.
*/
tmp = iommu_read_reg(mmu, MMU_CNTL);
tmp &= ~MMU_CNTL_TWL_EN;
iommu_write_reg(mmu, tmp, MMU_CNTL);
fa = iommu_read_reg(mmu, MMU_FAULT_AD);
e.da = fa & PAGE_MASK;
e.pa = virt_to_phys(dummy_addr);
e.valid = 1;
e.prsvd = 1;
e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
e.endian = MMU_RAM_ENDIAN_LITTLE;
e.elsz = MMU_RAM_ELSZ_32;
e.mixed = 0;
load_iotlb_entry(mmu, &e);
dsp_clk_enable(DSP_CLK_GPT8);
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
/* Clear MMU interrupt */
tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
dump_dsp_stack(dev_context);
dsp_clk_disable(DSP_CLK_GPT8);
iopgtable_clear_entry(mmu, fa);
free_page((unsigned long)dummy_addr);
}
#endif
static void fault_tasklet(unsigned long data)
{
struct iommu *mmu = (struct iommu *)data;
struct bridge_dev_context *dev_ctx;
struct deh_mgr *dm;
u32 fa;
dev_get_deh_mgr(dev_get_first(), &dm);
dev_get_bridge_context(dev_get_first(), &dev_ctx);
if (!dm || !dev_ctx)
return;
fa = iommu_read_reg(mmu, MMU_FAULT_AD);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
print_dsp_trace_buffer(dev_ctx);
dump_dl_modules(dev_ctx);
mmu_fault_print_stack(dev_ctx);
#endif
bridge_deh_notify(dm, DSP_MMUFAULT, fa);
}
/*
* ======== mmu_fault_isr ========
* ISR to be triggered by a DSP MMU fault interrupt.
*/
static int mmu_fault_callback(struct iommu *mmu)
{
if (!mmu)
return -EPERM;
iommu_write_reg(mmu, 0, MMU_IRQENABLE);
tasklet_schedule(&mmu_tasklet);
return 0;
}
/**
* dsp_mmu_init() - initialize dsp_mmu module and returns a handle
*
* This function initialize dsp mmu module and returns a struct iommu
* handle to use it for dsp maps.
*
*/
struct iommu *dsp_mmu_init()
{
struct iommu *mmu;
mmu = iommu_get("iva2");
if (!IS_ERR(mmu)) {
tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
mmu->isr = mmu_fault_callback;
}
return mmu;
}
/**
* dsp_mmu_exit() - destroy dsp mmu module
* @mmu: Pointer to iommu handle.
*
* This function destroys dsp mmu module.
*
*/
void dsp_mmu_exit(struct iommu *mmu)
{
if (mmu)
iommu_put(mmu);
tasklet_kill(&mmu_tasklet);
}
/**
* user_va2_pa() - get physical address from userspace address.
* @mm: mm_struct Pointer of the process.
* @address: Virtual user space address.
*
*/
static u32 user_va2_pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
pmd = pmd_offset(pgd, address);
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
ptep = pte_offset_map(pmd, address);
if (ptep) {
pte = *ptep;
if (pte_present(pte))
return pte & PAGE_MASK;
}
}
}
return 0;
}
/**
* get_io_pages() - pin and get pages of io user's buffer.
* @mm: mm_struct Pointer of the process.
* @uva: Virtual user space address.
* @pages Pages to be pined.
* @usr_pgs struct page array pointer where the user pages will be stored
*
*/
static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
struct page **usr_pgs)
{
u32 pa;
int i;
struct page *pg;
for (i = 0; i < pages; i++) {
pa = user_va2_pa(mm, uva);
if (!pfn_valid(__phys_to_pfn(pa)))
break;
pg = phys_to_page(pa);
usr_pgs[i] = pg;
get_page(pg);
}
return i;
}
/**
* user_to_dsp_map() - maps user to dsp virtual address
* @mmu: Pointer to iommu handle.
* @uva: Virtual user space address.
* @da DSP address
* @size Buffer size to map.
* @usr_pgs struct page array pointer where the user pages will be stored
*
* This function maps a user space buffer into DSP virtual address.
*
*/
u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
struct page **usr_pgs)
{
int res, w;
unsigned pages;
int i;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct sg_table *sgt;
struct scatterlist *sg;
if (!size || !usr_pgs)
return -EINVAL;
pages = size / PG_SIZE4K;
down_read(&mm->mmap_sem);
vma = find_vma(mm, uva);
while (vma && (uva + size > vma->vm_end))
vma = find_vma(mm, vma->vm_end + 1);
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
__func__, uva, size);
up_read(&mm->mmap_sem);
return -EINVAL;
}
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
w = 1;
if (vma->vm_flags & VM_IO)
i = get_io_pages(mm, uva, pages, usr_pgs);
else
i = get_user_pages(current, mm, uva, pages, w, 1,
usr_pgs, NULL);
up_read(&mm->mmap_sem);
if (i < 0)
return i;
if (i < pages) {
res = -EFAULT;
goto err_pages;
}
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
res = -ENOMEM;
goto err_pages;
}
res = sg_alloc_table(sgt, pages, GFP_KERNEL);
if (res < 0)
goto err_sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
if (!IS_ERR_VALUE(da))
return da;
res = (int)da;
sg_free_table(sgt);
err_sg:
kfree(sgt);
i = pages;
err_pages:
while (i--)
put_page(usr_pgs[i]);
return res;
}
/**
* user_to_dsp_unmap() - unmaps DSP virtual buffer.
* @mmu: Pointer to iommu handle.
* @da DSP address
*
* This function unmaps a user space buffer into DSP virtual address.
*
*/
int user_to_dsp_unmap(struct iommu *mmu, u32 da)
{
unsigned i;
struct sg_table *sgt;
struct scatterlist *sg;
sgt = iommu_vunmap(mmu, da);
if (!sgt)
return -EFAULT;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
put_page(sg_page(sg));
sg_free_table(sgt);
kfree(sgt);
return 0;
}

Просмотреть файл

@ -39,6 +39,10 @@
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
/* Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* Bridge Driver */
#include <dspbridge/dspdeh.h>
#include <dspbridge/dspio.h>
@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
struct cod_manager *cod_man;
struct chnl_mgr *hchnl_mgr;
struct msg_mgr *hmsg_mgr;
struct shm_segs *sm_sg;
u32 ul_shm_base;
u32 ul_shm_base_offset;
u32 ul_shm_limit;
@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
u32 map_attrs;
u32 shm0_end;
u32 ul_dyn_ext_base;
u32 ul_seg1_size = 0;
u32 pa_curr = 0;
u32 va_curr = 0;
u32 gpp_va_curr = 0;
u32 num_bytes = 0;
u32 all_bits = 0;
u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
if (!pbridge_context) {
@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT;
goto func_end;
}
sm_sg = &pbridge_context->sh_s;
status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
if (!cod_man) {
status = -EFAULT;
@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
if (status)
goto func_end;
sm_sg->seg1_pa = ul_gpp_pa;
sm_sg->seg1_da = ul_dyn_ext_base;
sm_sg->seg1_va = ul_gpp_va;
sm_sg->seg1_size = ul_seg1_size;
sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size;
sm_sg->seg0_da = ul_dsp_va;
sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size;
sm_sg->seg0_size = ul_seg_size;
pa_curr = ul_gpp_pa;
va_curr = ul_dyn_ext_base * hio_mgr->word_size;
gpp_va_curr = ul_gpp_va;
num_bytes = ul_seg1_size;
/*
* Try to fit into TLB entries. If not possible, push them to page
* tables. It is quite possible that if sections are not on
* bigger page boundary, we may end up making several small pages.
* So, push them onto page tables, if that is the case.
*/
map_attrs = 0x00000000;
map_attrs = DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPPHYSICALADDR;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPDONOTLOCK;
while (num_bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
"num_bytes %x\n", all_bits, pa_curr, va_curr,
num_bytes);
for (i = 0; i < 4; i++) {
if ((num_bytes >= page_size[i]) && ((all_bits &
(page_size[i] -
1)) == 0)) {
status =
hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
if (status)
goto func_end;
pa_curr += page_size[i];
va_curr += page_size[i];
gpp_va_curr += page_size[i];
num_bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
* size.
*/
break;
}
}
}
pa_curr += ul_pad_size;
va_curr += ul_pad_size;
gpp_va_curr += ul_pad_size;
/* Configure the TLB entries for the next cacheable segment */
num_bytes = ul_seg_size;
va_curr = ul_dsp_va * hio_mgr->word_size;
while (num_bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
"va_curr %x, num_bytes %x\n", all_bits, pa_curr,
va_curr, num_bytes);
for (i = 0; i < 4; i++) {
if (!(num_bytes >= page_size[i]) ||
!((all_bits & (page_size[i] - 1)) == 0))
continue;
if (ndx < MAX_LOCK_TLB_ENTRIES) {
/*
* This is the physical address written to
* DSP MMU.
*/
ae_proc[ndx].ul_gpp_pa = pa_curr;
/*
* This is the virtual uncached ioremapped
* address!!!
*/
ae_proc[ndx].ul_gpp_va = gpp_va_curr;
ae_proc[ndx].ul_dsp_va =
va_curr / hio_mgr->word_size;
ae_proc[ndx].ul_size = page_size[i];
ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
dev_dbg(bridge, "shm MMU TLB entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].ul_gpp_pa,
ae_proc[ndx].ul_gpp_va,
ae_proc[ndx].ul_dsp_va *
hio_mgr->word_size, page_size[i]);
ndx++;
} else {
status =
hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
dev_dbg(bridge,
"shm MMU PTE entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].ul_gpp_pa,
ae_proc[ndx].ul_gpp_va,
ae_proc[ndx].ul_dsp_va *
hio_mgr->word_size, page_size[i]);
if (status)
goto func_end;
}
pa_curr += page_size[i];
va_curr += page_size[i];
gpp_va_curr += page_size[i];
num_bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have reached
* an address aligned to a bigger page size.
*/
break;
}
}
/*
* Copy remaining entries from CDB. All entries are 1 MB and
@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
"DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
ae_proc[ndx].ul_dsp_va);
ndx++;
} else {
status = hio_mgr->intf_fxns->pfn_brd_mem_map
(hio_mgr->hbridge_context,
hio_mgr->ext_proc_info.ty_tlb[i].
ul_gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].
ul_dsp_virt, 0x100000, map_attrs,
NULL);
}
}
if (status)
goto func_end;
}
map_attrs = 0x00000000;
map_attrs = DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPPHYSICALADDR;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPDONOTLOCK;
/* Map the L4 peripherals */
i = 0;
while (l4_peripheral_table[i].phys_addr) {
status = hio_mgr->intf_fxns->pfn_brd_mem_map
(hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
map_attrs, NULL);
if (status)
goto func_end;
i++;
}
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
ae_proc[i].ul_dsp_va = 0;
ae_proc[i].ul_gpp_pa = 0;
@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
status = -EFAULT;
goto func_end;
} else {
if (sm_sg->seg0_da > ul_shm_base) {
if (ae_proc[0].ul_dsp_va > ul_shm_base) {
status = -EPERM;
goto func_end;
}
/* ul_shm_base may not be at ul_dsp_va address */
ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) *
ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
hio_mgr->word_size;
/*
* bridge_dev_ctrl() will set dev context dsp-mmu info. In
@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
goto func_end;
}
/* Register SM */
status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa);
status =
register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
}
hio_mgr->shared_mem = (struct shm *)ul_shm_base;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -31,6 +31,10 @@
#include <dspbridge/dev.h>
#include <dspbridge/iodefs.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
#include <dspbridge/pwr_sh.h>
/* ----------------------------------- Bridge Driver */

Просмотреть файл

@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
if (!status) {
ul_tlb_base_virt =
dev_context->sh_s.seg0_da * DSPWORDSIZE;
dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va;
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].ul_gpp_va;
if (!trace_read) {
ul_shm_offset_virt =
ul_shm_base_virt - ul_tlb_base_virt;
ul_shm_offset_virt +=
PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
1, PAGE_SIZE * 16);
1, HW_PAGE_SIZE64KB);
dw_ext_prog_virt_mem -= ul_shm_offset_virt;
dw_ext_prog_virt_mem +=
(ul_ext_base - ul_dyn_ext_base);
@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
ret = -EPERM;
if (!ret) {
ul_tlb_base_virt = dev_context->sh_s.seg0_da *
DSPWORDSIZE;
ul_tlb_base_virt =
dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) {
@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
ul_shm_base_virt - ul_tlb_base_virt;
if (trace_load) {
dw_ext_prog_virt_mem =
dev_context->sh_s.seg0_va;
dev_context->atlb_entry[0].ul_gpp_va;
} else {
dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
dw_ext_prog_virt_mem +=
@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
omap_dspbridge_dev->dev.platform_data;
struct cfg_hostres *resources = dev_context->resources;
int status = 0;
u32 temp;
if (!dev_context->mbox)
return 0;
@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
omap_mbox_restore_ctx(dev_context->mbox);
/* Access MMU SYS CONFIG register to generate a short wakeup */
iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG);
temp = readl(resources->dw_dmmu_base + 0x10);
dev_context->dw_brd_state = BRD_RUNNING;
} else if (dev_context->dw_brd_state == BRD_RETENTION) {

Просмотреть файл

@ -31,6 +31,57 @@
#include <dspbridge/drv.h>
#include <dspbridge/wdt.h>
static u32 fault_addr;
static void mmu_fault_dpc(unsigned long data)
{
struct deh_mgr *deh = (void *)data;
if (!deh)
return;
bridge_deh_notify(deh, DSP_MMUFAULT, 0);
}
static irqreturn_t mmu_fault_isr(int irq, void *data)
{
struct deh_mgr *deh = data;
struct cfg_hostres *resources;
u32 event;
if (!deh)
return IRQ_HANDLED;
resources = deh->hbridge_context->resources;
if (!resources) {
dev_dbg(bridge, "%s: Failed to get Host Resources\n",
__func__);
return IRQ_HANDLED;
}
hw_mmu_event_status(resources->dw_dmmu_base, &event);
if (event == HW_MMU_TRANSLATION_FAULT) {
hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
event, fault_addr);
/*
* Schedule a DPC directly. In the future, it may be
* necessary to check if DSP MMU fault is intended for
* Bridge.
*/
tasklet_schedule(&deh->dpc_tasklet);
/* Disable the MMU events, else once we clear it will
* start to raise INTs again */
hw_mmu_event_disable(resources->dw_dmmu_base,
HW_MMU_TRANSLATION_FAULT);
} else {
hw_mmu_event_disable(resources->dw_dmmu_base,
HW_MMU_ALL_INTERRUPTS);
}
return IRQ_HANDLED;
}
int bridge_deh_create(struct deh_mgr **ret_deh,
struct dev_object *hdev_obj)
{
@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
}
ntfy_init(deh->ntfy_obj);
/* Create a MMUfault DPC */
tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
/* Fill in context structure */
deh->hbridge_context = hbridge_context;
/* Install ISR function for DSP MMU fault */
status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
"DspBridge\tiommu fault", deh);
if (status < 0)
goto err;
*ret_deh = deh;
return 0;
@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh)
ntfy_delete(deh->ntfy_obj);
kfree(deh->ntfy_obj);
}
/* Disable DSP MMU fault */
free_irq(INT_DSP_MMU_IRQ, deh);
/* Free DPC object */
tasklet_kill(&deh->dpc_tasklet);
/* Deallocate the DEH manager object */
kfree(deh);
@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
return ntfy_unregister(deh->ntfy_obj, hnotification);
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
{
struct cfg_hostres *resources;
struct hw_mmu_map_attrs_t map_attrs = {
.endianism = HW_LITTLE_ENDIAN,
.element_size = HW_ELEM_SIZE16BIT,
.mixed_size = HW_MMU_CPUES,
};
void *dummy_va_addr;
resources = dev_context->resources;
dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
/*
* Before acking the MMU fault, let's make sure MMU can only
* access entry #0. Then add a new entry so that the DSP OS
* can continue in order to dump the stack.
*/
hw_mmu_twl_disable(resources->dw_dmmu_base);
hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
hw_mmu_tlb_add(resources->dw_dmmu_base,
virt_to_phys(dummy_va_addr), fault_addr,
HW_PAGE_SIZE4KB, 1,
&map_attrs, HW_SET, HW_SET);
dsp_clk_enable(DSP_CLK_GPT8);
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
/* Clear MMU interrupt */
hw_mmu_event_ack(resources->dw_dmmu_base,
HW_MMU_TRANSLATION_FAULT);
dump_dsp_stack(dev_context);
dsp_clk_disable(DSP_CLK_GPT8);
hw_mmu_disable(resources->dw_dmmu_base);
free_page((unsigned long)dummy_va_addr);
}
#endif
static inline const char *event_to_string(int event)
{
switch (event) {
@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
#endif
break;
case DSP_MMUFAULT:
dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info);
dev_err(bridge, "%s: %s, addr=0x%x", __func__,
str, fault_addr);
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
print_dsp_trace_buffer(dev_context);
dump_dl_modules(dev_context);
mmu_fault_print_stack(dev_context);
#endif
break;
default:
dev_err(bridge, "%s: %s", __func__, str);

Просмотреть файл

@ -0,0 +1,41 @@
/*
* EasiGlobal.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _EASIGLOBAL_H
#define _EASIGLOBAL_H
#include <linux/types.h>
/*
* DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
*
* DESCRIPTION: Defines used to describe register types for EASI-checker tests.
*/
#define READ_ONLY 1
#define WRITE_ONLY 2
#define READ_WRITE 3
/*
* MACRO: _DEBUG_LEVEL1_EASI
*
* DESCRIPTION: A MACRO which can be used to indicate that a particular beach
* register access function was called.
*
* NOTE: We currently dont use this functionality.
*/
#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
#endif /* _EASIGLOBAL_H */

Просмотреть файл

@ -0,0 +1,76 @@
/*
* MMUAccInt.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _MMU_ACC_INT_H
#define _MMU_ACC_INT_H
/* Mappings of level 1 EASI function numbers to function names */
#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
/* Register offset address definitions */
#define MMU_MMU_SYSCONFIG_OFFSET 0x10
#define MMU_MMU_IRQSTATUS_OFFSET 0x18
#define MMU_MMU_IRQENABLE_OFFSET 0x1c
#define MMU_MMU_WALKING_ST_OFFSET 0x40
#define MMU_MMU_CNTL_OFFSET 0x44
#define MMU_MMU_FAULT_AD_OFFSET 0x48
#define MMU_MMU_TTB_OFFSET 0x4c
#define MMU_MMU_LOCK_OFFSET 0x50
#define MMU_MMU_LD_TLB_OFFSET 0x54
#define MMU_MMU_CAM_OFFSET 0x58
#define MMU_MMU_RAM_OFFSET 0x5c
#define MMU_MMU_GFLUSH_OFFSET 0x60
#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
/* Bitfield mask and offset declarations */
#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
#endif /* _MMU_ACC_INT_H */

Просмотреть файл

@ -0,0 +1,225 @@
/*
* MMURegAcM.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _MMU_REG_ACM_H
#define _MMU_REG_ACM_H
#include <linux/io.h>
#include <EasiGlobal.h>
#include "MMUAccInt.h"
#if defined(USE_LEVEL_1_MACROS)
#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
__raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
& MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_CNTL_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_TTB_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
MMU_MMU_LOCK_BASE_VALUE_OFFSET))
#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
(((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
{\
const u32 offset = MMU_MMU_LOCK_OFFSET;\
register u32 data = __raw_readl((base_address)+offset);\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
new_value |= data;\
__raw_writel(new_value, base_address+offset);\
}
#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
(((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
(((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
(_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
__raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_CAM_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_RAM_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
{\
const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
register u32 new_value = (value);\
_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
__raw_writel(new_value, (base_address)+offset);\
}
#endif /* USE_LEVEL_1_MACROS */
#endif /* _MMU_REG_ACM_H */

Просмотреть файл

@ -0,0 +1,58 @@
/*
* hw_defs.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Global HW definitions
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _HW_DEFS_H
#define _HW_DEFS_H
/* Page size */
#define HW_PAGE_SIZE4KB 0x1000
#define HW_PAGE_SIZE64KB 0x10000
#define HW_PAGE_SIZE1MB 0x100000
#define HW_PAGE_SIZE16MB 0x1000000
/* hw_status: return type for HW API */
typedef long hw_status;
/* Macro used to set and clear any bit */
#define HW_CLEAR 0
#define HW_SET 1
/* hw_endianism_t: Enumerated Type used to specify the endianism
* Do NOT change these values. They are used as bit fields. */
enum hw_endianism_t {
HW_LITTLE_ENDIAN,
HW_BIG_ENDIAN
};
/* hw_element_size_t: Enumerated Type used to specify the element size
* Do NOT change these values. They are used as bit fields. */
enum hw_element_size_t {
HW_ELEM_SIZE8BIT,
HW_ELEM_SIZE16BIT,
HW_ELEM_SIZE32BIT,
HW_ELEM_SIZE64BIT
};
/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
enum hw_idle_mode_t {
HW_FORCE_IDLE,
HW_NO_IDLE,
HW_SMART_IDLE
};
#endif /* _HW_DEFS_H */

Просмотреть файл

@ -0,0 +1,562 @@
/*
* hw_mmu.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* API definitions to setup MMU TLB and PTE
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/io.h>
#include "MMURegAcM.h"
#include <hw_defs.h>
#include <hw_mmu.h>
#include <linux/types.h>
#include <linux/err.h>
#define MMU_BASE_VAL_MASK 0xFC00
#define MMU_PAGE_MAX 3
#define MMU_ELEMENTSIZE_MAX 3
#define MMU_ADDR_MASK 0xFFFFF000
#define MMU_TTB_MASK 0xFFFFC000
#define MMU_SECTION_ADDR_MASK 0xFFF00000
#define MMU_SSECTION_ADDR_MASK 0xFF000000
#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
#define MMU_LARGE_PAGE_MASK 0xFFFF0000
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define MMU_LOAD_TLB 0x00000001
#define MMU_GFLUSH 0x60
/*
* hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
*/
enum hw_mmu_page_size_t {
HW_MMU_SECTION,
HW_MMU_LARGE_PAGE,
HW_MMU_SMALL_PAGE,
HW_MMU_SUPERSECTION
};
/*
* FUNCTION : mmu_flush_entry
*
* INPUTS:
*
* Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
* RETURNS:
*
* Type : hw_status
* Description : 0 -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer
* Paramater was set to NULL
*
* PURPOSE: : Flush the TLB entry pointed by the
* lock counter register
* even if this entry is set protected
*
* METHOD: : Check the Input parameter and Flush a
* single entry in the TLB.
*/
static hw_status mmu_flush_entry(const void __iomem *base_address);
/*
* FUNCTION : mmu_set_cam_entry
*
* INPUTS:
*
* Identifier : base_address
* TypE : const u32
* Description : Base Address of instance of MMU module
*
* Identifier : page_sz
* TypE : const u32
* Description : It indicates the page size
*
* Identifier : preserved_bit
* Type : const u32
* Description : It indicates the TLB entry is preserved entry
* or not
*
* Identifier : valid_bit
* Type : const u32
* Description : It indicates the TLB entry is valid entry or not
*
*
* Identifier : virtual_addr_tag
* Type : const u32
* Description : virtual Address
*
* RETURNS:
*
* Type : hw_status
* Description : 0 -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater
* was set to NULL
* RET_PARAM_OUT_OF_RANGE -- Input Parameter out
* of Range
*
* PURPOSE: : Set MMU_CAM reg
*
* METHOD: : Check the Input parameters and set the CAM entry.
*/
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 valid_bit,
const u32 virtual_addr_tag);
/*
* FUNCTION : mmu_set_ram_entry
*
* INPUTS:
*
* Identifier : base_address
* Type : const u32
* Description : Base Address of instance of MMU module
*
* Identifier : physical_addr
* Type : const u32
* Description : Physical Address to which the corresponding
* virtual Address shouldpoint
*
* Identifier : endianism
* Type : hw_endianism_t
* Description : endianism for the given page
*
* Identifier : element_size
* Type : hw_element_size_t
* Description : The element size ( 8,16, 32 or 64 bit)
*
* Identifier : mixed_size
* Type : hw_mmu_mixed_size_t
* Description : Element Size to follow CPU or TLB
*
* RETURNS:
*
* Type : hw_status
* Description : 0 -- No errors occured
* RET_BAD_NULL_PARAM -- A Pointer Paramater
* was set to NULL
* RET_PARAM_OUT_OF_RANGE -- Input Parameter
* out of Range
*
* PURPOSE: : Set MMU_CAM reg
*
* METHOD: : Check the Input parameters and set the RAM entry.
*/
static hw_status mmu_set_ram_entry(const void __iomem *base_address,
const u32 physical_addr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
enum hw_mmu_mixed_size_t mixed_size);
/* HW FUNCTIONS */
hw_status hw_mmu_enable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
return status;
}
hw_status hw_mmu_disable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
return status;
}
hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 num_locked_entries)
{
hw_status status = 0;
MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
return status;
}
hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victim_entry_num)
{
hw_status status = 0;
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
return status;
}
hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
return status;
}
hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
u32 irq_reg;
irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
return status;
}
hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
u32 irq_reg;
irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
return status;
}
hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
{
hw_status status = 0;
*irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
return status;
}
hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
{
hw_status status = 0;
/* read values from register */
*addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
return status;
}
hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
{
hw_status status = 0;
u32 load_ttb;
load_ttb = ttb_phys_addr & ~0x7FUL;
/* write values to register */
MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
return status;
}
hw_status hw_mmu_twl_enable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
return status;
}
hw_status hw_mmu_twl_disable(const void __iomem *base_address)
{
hw_status status = 0;
MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
return status;
}
hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
u32 page_sz)
{
hw_status status = 0;
u32 virtual_addr_tag;
enum hw_mmu_page_size_t pg_size_bits;
switch (page_sz) {
case HW_PAGE_SIZE4KB:
pg_size_bits = HW_MMU_SMALL_PAGE;
break;
case HW_PAGE_SIZE64KB:
pg_size_bits = HW_MMU_LARGE_PAGE;
break;
case HW_PAGE_SIZE1MB:
pg_size_bits = HW_MMU_SECTION;
break;
case HW_PAGE_SIZE16MB:
pg_size_bits = HW_MMU_SUPERSECTION;
break;
default:
return -EINVAL;
}
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
mmu_flush_entry(base_address);
return status;
}
hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preserved_bit, s8 valid_bit)
{
hw_status status = 0;
u32 lock_reg;
u32 virtual_addr_tag;
enum hw_mmu_page_size_t mmu_pg_size;
/*Check the input Parameters */
switch (page_sz) {
case HW_PAGE_SIZE4KB:
mmu_pg_size = HW_MMU_SMALL_PAGE;
break;
case HW_PAGE_SIZE64KB:
mmu_pg_size = HW_MMU_LARGE_PAGE;
break;
case HW_PAGE_SIZE1MB:
mmu_pg_size = HW_MMU_SECTION;
break;
case HW_PAGE_SIZE16MB:
mmu_pg_size = HW_MMU_SUPERSECTION;
break;
default:
return -EINVAL;
}
lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
/* Generate the 20-bit tag from virtual address */
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
/* Write the fields in the CAM Entry Register */
mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
virtual_addr_tag);
/* Write the different fields of the RAM Entry Register */
/* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
map_attrs->element_size, map_attrs->mixed_size);
/* Update the MMU Lock Register */
/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
/* Enable loading of an entry in TLB by writing 1
into LD_TLB_REG register */
MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
return status;
}
hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
{
hw_status status = 0;
u32 pte_addr, pte_val;
s32 num_entries = 1;
switch (page_sz) {
case HW_PAGE_SIZE4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_SMALL_PAGE_MASK);
pte_val =
((physical_addr & MMU_SMALL_PAGE_MASK) |
(map_attrs->endianism << 9) | (map_attrs->
element_size << 4) |
(map_attrs->mixed_size << 11) | 2);
break;
case HW_PAGE_SIZE64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_LARGE_PAGE_MASK);
pte_val =
((physical_addr & MMU_LARGE_PAGE_MASK) |
(map_attrs->endianism << 9) | (map_attrs->
element_size << 4) |
(map_attrs->mixed_size << 11) | 1);
break;
case HW_PAGE_SIZE1MB:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val =
((((physical_addr & MMU_SECTION_ADDR_MASK) |
(map_attrs->endianism << 15) | (map_attrs->
element_size << 10) |
(map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
break;
case HW_PAGE_SIZE16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SSECTION_ADDR_MASK);
pte_val =
(((physical_addr & MMU_SSECTION_ADDR_MASK) |
(map_attrs->endianism << 15) | (map_attrs->
element_size << 10) |
(map_attrs->mixed_size << 17)
) | 0x40000 | 0x2);
break;
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SECTION_ADDR_MASK);
pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
break;
default:
return -EINVAL;
}
while (--num_entries >= 0)
((u32 *) pte_addr)[num_entries] = pte_val;
return status;
}
hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
{
hw_status status = 0;
u32 pte_addr;
s32 num_entries = 1;
switch (page_size) {
case HW_PAGE_SIZE4KB:
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_SMALL_PAGE_MASK);
break;
case HW_PAGE_SIZE64KB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
virtual_addr &
MMU_LARGE_PAGE_MASK);
break;
case HW_PAGE_SIZE1MB:
case HW_MMU_COARSE_PAGE_SIZE:
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SECTION_ADDR_MASK);
break;
case HW_PAGE_SIZE16MB:
num_entries = 16;
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
virtual_addr &
MMU_SSECTION_ADDR_MASK);
break;
default:
return -EINVAL;
}
while (--num_entries >= 0)
((u32 *) pte_addr)[num_entries] = 0;
return status;
}
/* mmu_flush_entry */
static hw_status mmu_flush_entry(const void __iomem *base_address)
{
hw_status status = 0;
u32 flush_entry_data = 0x1;
/* write values to register */
MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
return status;
}
/* mmu_set_cam_entry */
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 valid_bit,
const u32 virtual_addr_tag)
{
hw_status status = 0;
u32 mmu_cam_reg;
mmu_cam_reg = (virtual_addr_tag << 12);
mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
(preserved_bit << 3);
/* write values to register */
MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
return status;
}
/* mmu_set_ram_entry */
static hw_status mmu_set_ram_entry(const void __iomem *base_address,
const u32 physical_addr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
enum hw_mmu_mixed_size_t mixed_size)
{
hw_status status = 0;
u32 mmu_ram_reg;
mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
(mixed_size << 6));
/* write values to register */
MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
return status;
}
void hw_mmu_tlb_flush_all(const void __iomem *base)
{
__raw_writeb(1, base + MMU_GFLUSH);
}

Просмотреть файл

@ -0,0 +1,163 @@
/*
* hw_mmu.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* MMU types and API declarations
*
* Copyright (C) 2007 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _HW_MMU_H
#define _HW_MMU_H
#include <linux/types.h>
/* Bitmasks for interrupt sources */
#define HW_MMU_TRANSLATION_FAULT 0x2
#define HW_MMU_ALL_INTERRUPTS 0x1F
#define HW_MMU_COARSE_PAGE_SIZE 0x400
/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
CPU/TLB Element size */
enum hw_mmu_mixed_size_t {
HW_MMU_TLBES,
HW_MMU_CPUES
};
/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
struct hw_mmu_map_attrs_t {
enum hw_endianism_t endianism;
enum hw_element_size_t element_size;
enum hw_mmu_mixed_size_t mixed_size;
bool donotlockmpupage;
};
extern hw_status hw_mmu_enable(const void __iomem *base_address);
extern hw_status hw_mmu_disable(const void __iomem *base_address);
extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
u32 num_locked_entries);
extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
u32 victim_entry_num);
/* For MMU faults */
extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
u32 irq_mask);
extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
u32 irq_mask);
extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
u32 irq_mask);
extern hw_status hw_mmu_event_status(const void __iomem *base_address,
u32 *irq_mask);
extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
u32 *addr);
/* Set the TT base address */
extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
u32 ttb_phys_addr);
extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
u32 virtual_addr, u32 page_sz);
extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
u32 entry_num,
struct hw_mmu_map_attrs_t *map_attrs,
s8 preserved_bit, s8 valid_bit);
/* For PTEs */
extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
struct hw_mmu_map_attrs_t *map_attrs);
extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 virtual_addr, u32 page_size);
void hw_mmu_tlb_flush_all(const void __iomem *base);
static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
{
u32 pte_addr;
u32 va31_to20;
va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
va31_to20 &= 0xFFFFFFFCUL;
pte_addr = l1_base + va31_to20;
return pte_addr;
}
static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
{
u32 pte_addr;
pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
return pte_addr;
}
static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
{
u32 pte_coarse;
pte_coarse = pte_val & 0xFFFFFC00;
return pte_coarse;
}
static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
{
u32 pte_size = 0;
if ((pte_val & 0x3) == 0x1) {
/* Points to L2 PT */
pte_size = HW_MMU_COARSE_PAGE_SIZE;
}
if ((pte_val & 0x3) == 0x2) {
if (pte_val & (1 << 18))
pte_size = HW_PAGE_SIZE16MB;
else
pte_size = HW_PAGE_SIZE1MB;
}
return pte_size;
}
static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
{
u32 pte_size = 0;
if (pte_val & 0x2)
pte_size = HW_PAGE_SIZE4KB;
else if (pte_val & 0x1)
pte_size = HW_PAGE_SIZE64KB;
return pte_size;
}
#endif /* _HW_MMU_H */

Просмотреть файл

@ -68,6 +68,7 @@ struct cfg_hostres {
void __iomem *dw_per_base;
u32 dw_per_pm_base;
u32 dw_core_pm_base;
void __iomem *dw_dmmu_base;
void __iomem *dw_sys_ctrl_base;
};

Просмотреть файл

@ -27,6 +27,7 @@
#include <dspbridge/nodedefs.h>
#include <dspbridge/dispdefs.h>
#include <dspbridge/dspdefs.h>
#include <dspbridge/dmm.h>
#include <dspbridge/host_os.h>
/* ----------------------------------- This */
@ -232,6 +233,29 @@ extern int dev_get_chnl_mgr(struct dev_object *hdev_obj,
extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
struct cmm_object **mgr);
/*
* ======== dev_get_dmm_mgr ========
* Purpose:
* Retrieve the handle to the dynamic memory manager created for this
* device.
* Parameters:
* hdev_obj: Handle to device object created with
* dev_create_device().
* *mgr: Ptr to location to store handle.
* Returns:
* 0: Success.
* -EFAULT: Invalid hdev_obj.
* Requires:
* mgr != NULL.
* DEV Initialized.
* Ensures:
* 0: *mgr contains a handle to a channel manager object,
* or NULL.
* else: *mgr is NULL.
*/
extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
struct dmm_object **mgr);
/*
* ======== dev_get_cod_mgr ========
* Purpose:

Просмотреть файл

@ -0,0 +1,75 @@
/*
* dmm.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
* space that can be directly mapped to any MPU buffer or memory region.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef DMM_
#define DMM_
#include <dspbridge/dbdefs.h>
struct dmm_object;
/* DMM attributes used in dmm_create() */
struct dmm_mgrattrs {
u32 reserved;
};
#define DMMPOOLSIZE 0x4000000
/*
* ======== dmm_get_handle ========
* Purpose:
* Return the dynamic memory manager object for this device.
* This is typically called from the client process.
*/
extern int dmm_get_handle(void *hprocessor,
struct dmm_object **dmm_manager);
extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
u32 size, u32 *prsv_addr);
extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
u32 rsv_addr);
extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
u32 size);
extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
u32 addr, u32 *psize);
extern int dmm_destroy(struct dmm_object *dmm_mgr);
extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
extern int dmm_create(struct dmm_object **dmm_manager,
struct dev_object *hdev_obj,
const struct dmm_mgrattrs *mgr_attrts);
extern bool dmm_init(void);
extern void dmm_exit(void);
extern int dmm_create_tables(struct dmm_object *dmm_mgr,
u32 addr, u32 size);
#ifdef DSP_DMM_DEBUG
u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
#endif
#endif /* DMM_ */

Просмотреть файл

@ -108,6 +108,12 @@ struct dmm_map_object {
struct bridge_dma_map_info dma_info;
};
/* Used for DMM reserved memory accounting */
struct dmm_rsv_object {
struct list_head link;
u32 dsp_reserved_addr;
};
/* New structure (member of process context) abstracts DMM resource info */
struct dspheap_res_object {
s32 heap_allocated; /* DMM status */
@ -159,6 +165,10 @@ struct process_context {
struct list_head dmm_map_list;
spinlock_t dmm_map_lock;
/* DMM reserved memory resources */
struct list_head dmm_rsv_list;
spinlock_t dmm_rsv_lock;
/* DSP Heap resources */
struct dspheap_res_object *pdspheap_list;

Просмотреть файл

@ -1,67 +0,0 @@
/*
* dsp-mmu.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP iommu.
*
* Copyright (C) 2005-2010 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _DSP_MMU_
#define _DSP_MMU_
#include <plat/iommu.h>
#include <plat/iovmm.h>
/**
* dsp_mmu_init() - initialize dsp_mmu module and returns a handle
*
* This function initialize dsp mmu module and returns a struct iommu
* handle to use it for dsp maps.
*
*/
struct iommu *dsp_mmu_init(void);
/**
* dsp_mmu_exit() - destroy dsp mmu module
* @mmu: Pointer to iommu handle.
*
* This function destroys dsp mmu module.
*
*/
void dsp_mmu_exit(struct iommu *mmu);
/**
* user_to_dsp_map() - maps user to dsp virtual address
* @mmu: Pointer to iommu handle.
* @uva: Virtual user space address.
* @da DSP address
* @size Buffer size to map.
* @usr_pgs struct page array pointer where the user pages will be stored
*
* This function maps a user space buffer into DSP virtual address.
*
*/
u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
struct page **usr_pgs);
/**
* user_to_dsp_unmap() - unmaps DSP virtual buffer.
* @mmu: Pointer to iommu handle.
* @da DSP address
*
* This function unmaps a user space buffer into DSP virtual address.
*
*/
int user_to_dsp_unmap(struct iommu *mmu, u32 da);
#endif

Просмотреть файл

@ -161,6 +161,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
u32 dsp_addr, u32 ul_num_bytes,
u32 mem_type);
/*
* ======== bridge_brd_mem_map ========
* Purpose:
* Map a MPU memory region to a DSP/IVA memory space
* Parameters:
* dev_ctxt: Handle to Bridge driver defined device info.
* ul_mpu_addr: MPU memory region start address.
* virt_addr: DSP/IVA memory region u8 address.
* ul_num_bytes: Number of bytes to map.
* map_attrs: Mapping attributes (e.g. endianness).
* Returns:
* 0: Success.
* -EPERM: Other, unspecified error.
* Requires:
* dev_ctxt != NULL;
* Ensures:
*/
typedef int(*fxn_brd_memmap) (struct bridge_dev_context
* dev_ctxt, u32 ul_mpu_addr,
u32 virt_addr, u32 ul_num_bytes,
u32 map_attr,
struct page **mapped_pages);
/*
* ======== bridge_brd_mem_un_map ========
* Purpose:
* UnMap an MPU memory region from DSP/IVA memory space
* Parameters:
* dev_ctxt: Handle to Bridge driver defined device info.
* virt_addr: DSP/IVA memory region u8 address.
* ul_num_bytes: Number of bytes to unmap.
* Returns:
* 0: Success.
* -EPERM: Other, unspecified error.
* Requires:
* dev_ctxt != NULL;
* Ensures:
*/
typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
* dev_ctxt,
u32 virt_addr, u32 ul_num_bytes);
/*
* ======== bridge_brd_stop ========
* Purpose:
@ -951,6 +993,8 @@ struct bridge_drv_interface {
fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
fxn_chnl_open pfn_chnl_open; /* Create a new channel. */

Просмотреть файл

@ -19,6 +19,10 @@
#ifndef DSPIOCTL_
#define DSPIOCTL_
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/*
* Any IOCTLS at or above this value are reserved for standard Bridge driver
* interfaces.
@ -61,6 +65,9 @@ struct bridge_ioctl_extproc {
/* GPP virtual address. __va does not work for ioremapped addresses */
u32 ul_gpp_va;
u32 ul_size; /* Size of the mapped memory in bytes */
enum hw_endianism_t endianism;
enum hw_mmu_mixed_size_t mixed_mode;
enum hw_element_size_t elem_size;
};
#endif /* DSPIOCTL_ */

Просмотреть файл

@ -550,6 +550,29 @@ extern int proc_map(void *hprocessor,
void **pp_map_addr, u32 ul_map_attr,
struct process_context *pr_ctxt);
/*
* ======== proc_reserve_memory ========
* Purpose:
* Reserve a virtually contiguous region of DSP address space.
* Parameters:
* hprocessor : The processor handle.
* ul_size : Size of the address space to reserve.
* pp_rsv_addr : Ptr to DSP side reserved u8 address.
* Returns:
* 0 : Success.
* -EFAULT : Invalid processor handle.
* -EPERM : General failure.
* -ENOMEM : Cannot reserve chunk of this size.
* Requires:
* pp_rsv_addr is not NULL
* PROC Initialized.
* Ensures:
* Details:
*/
extern int proc_reserve_memory(void *hprocessor,
u32 ul_size, void **pp_rsv_addr,
struct process_context *pr_ctxt);
/*
* ======== proc_un_map ========
* Purpose:
@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor,
extern int proc_un_map(void *hprocessor, void *map_addr,
struct process_context *pr_ctxt);
/*
* ======== proc_un_reserve_memory ========
* Purpose:
* Frees a previously reserved region of DSP address space.
* Parameters:
* hprocessor : The processor handle.
* prsv_addr : Ptr to DSP side reservedBYTE address.
* Returns:
* 0 : Success.
* -EFAULT : Invalid processor handle.
* -EPERM : General failure.
* -ENOENT : Cannot find a reserved region starting with this
* : address.
* Requires:
* prsv_addr is not NULL
* PROC Initialized.
* Ensures:
* Details:
*/
extern int proc_un_reserve_memory(void *hprocessor,
void *prsv_addr,
struct process_context *pr_ctxt);
#endif /* PROC_ */

Просмотреть файл

@ -34,6 +34,7 @@
#include <dspbridge/cod.h>
#include <dspbridge/drv.h>
#include <dspbridge/proc.h>
#include <dspbridge/dmm.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/mgr.h>
@ -74,6 +75,7 @@ struct dev_object {
struct msg_mgr *hmsg_mgr; /* Message manager. */
struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
struct cmm_object *hcmm_mgr; /* SM memory manager. */
struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
struct ldr_module *module_obj; /* Bridge Module handle. */
u32 word_size; /* DSP word size: quick access. */
struct drv_object *hdrv_obj; /* Driver Object */
@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj,
/* Instantiate the DEH module */
status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
}
/* Create DMM mgr . */
status = dmm_create(&dev_obj->dmm_mgr,
(struct dev_object *)dev_obj, NULL);
}
/* Add the new DEV_Object to the global list: */
if (!status) {
@ -273,6 +278,8 @@ leave:
kfree(dev_obj->proc_list);
if (dev_obj->cod_mgr)
cod_delete(dev_obj->cod_mgr);
if (dev_obj->dmm_mgr)
dmm_destroy(dev_obj->dmm_mgr);
kfree(dev_obj);
}
@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj)
dev_obj->hcmm_mgr = NULL;
}
if (dev_obj->dmm_mgr) {
dmm_destroy(dev_obj->dmm_mgr);
dev_obj->dmm_mgr = NULL;
}
/* Call the driver's bridge_dev_destroy() function: */
/* Require of DevDestroy */
if (dev_obj->hbridge_context) {
@ -461,6 +473,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
return status;
}
/*
* ======== dev_get_dmm_mgr ========
* Purpose:
* Retrieve the handle to the dynamic memory manager created for this
* device.
*/
int dev_get_dmm_mgr(struct dev_object *hdev_obj,
struct dmm_object **mgr)
{
int status = 0;
struct dev_object *dev_obj = hdev_obj;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(mgr != NULL);
if (hdev_obj) {
*mgr = dev_obj->dmm_mgr;
} else {
*mgr = NULL;
status = -EFAULT;
}
DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
return status;
}
/*
* ======== dev_get_cod_mgr ========
* Purpose:
@ -713,8 +751,10 @@ void dev_exit(void)
refs--;
if (refs == 0)
if (refs == 0) {
cmm_exit();
dmm_exit();
}
DBC_ENSURE(refs >= 0);
}
@ -726,12 +766,25 @@ void dev_exit(void)
*/
bool dev_init(void)
{
bool ret = true;
bool cmm_ret, dmm_ret, ret = true;
DBC_REQUIRE(refs >= 0);
if (refs == 0)
ret = cmm_init();
if (refs == 0) {
cmm_ret = cmm_init();
dmm_ret = dmm_init();
ret = cmm_ret && dmm_ret;
if (!ret) {
if (cmm_ret)
cmm_exit();
if (dmm_ret)
dmm_exit();
}
}
if (ret)
refs++;
@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
STORE_FXN(fxn_chnl_create, pfn_chnl_create);
STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
STORE_FXN(fxn_chnl_open, pfn_chnl_open);

Просмотреть файл

@ -0,0 +1,533 @@
/*
* dmm.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
* space that can be directly mapped to any MPU buffer or memory region
*
* Notes:
* Region: Generic memory entitiy having a start address and a size
* Chunk: Reserved region
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/types.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/proc.h>
/* ----------------------------------- This */
#include <dspbridge/dmm.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
#define DMM_ADDR_VIRTUAL(a) \
(((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
dyn_mem_map_beg)
#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
/* DMM Mgr */
struct dmm_object {
/* Dmm Lock is used to serialize access mem manager for
* multi-threads. */
spinlock_t dmm_lock; /* Lock to access dmm mgr */
};
/* ----------------------------------- Globals */
static u32 refs; /* module reference count */
struct map_page {
u32 region_size:15;
u32 mapped_size:15;
u32 reserved:1;
u32 mapped:1;
};
/* Create the free list */
static struct map_page *virtual_mapping_table;
static u32 free_region; /* The index of free region */
static u32 free_size;
static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
static u32 table_size; /* The size of virt and phys pages tables */
/* ----------------------------------- Function Prototypes */
static struct map_page *get_region(u32 addr);
static struct map_page *get_free_region(u32 len);
static struct map_page *get_mapped_region(u32 addrs);
/* ======== dmm_create_tables ========
* Purpose:
* Create table to hold the information of physical address
* the buffer pages that is passed by the user, and the table
* to hold the information of the virtual memory that is reserved
* for DSP.
*/
int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
status = dmm_delete_tables(dmm_obj);
if (!status) {
dyn_mem_map_beg = addr;
table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
/* Create the free list */
virtual_mapping_table = __vmalloc(table_size *
sizeof(struct map_page), GFP_KERNEL |
__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
if (virtual_mapping_table == NULL)
status = -ENOMEM;
else {
/* On successful allocation,
* all entries are zero ('free') */
free_region = 0;
free_size = table_size * PG_SIZE4K;
virtual_mapping_table[0].region_size = table_size;
}
}
if (status)
pr_err("%s: failure, status 0x%x\n", __func__, status);
return status;
}
/*
* ======== dmm_create ========
* Purpose:
* Create a dynamic memory manager object.
*/
int dmm_create(struct dmm_object **dmm_manager,
struct dev_object *hdev_obj,
const struct dmm_mgrattrs *mgr_attrts)
{
struct dmm_object *dmm_obj = NULL;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(dmm_manager != NULL);
*dmm_manager = NULL;
/* create, zero, and tag a cmm mgr object */
dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
if (dmm_obj != NULL) {
spin_lock_init(&dmm_obj->dmm_lock);
*dmm_manager = dmm_obj;
} else {
status = -ENOMEM;
}
return status;
}
/*
* ======== dmm_destroy ========
* Purpose:
* Release the communication memory manager resources.
*/
int dmm_destroy(struct dmm_object *dmm_mgr)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
int status = 0;
DBC_REQUIRE(refs > 0);
if (dmm_mgr) {
status = dmm_delete_tables(dmm_obj);
if (!status)
kfree(dmm_obj);
} else
status = -EFAULT;
return status;
}
/*
* ======== dmm_delete_tables ========
* Purpose:
* Delete DMM Tables.
*/
int dmm_delete_tables(struct dmm_object *dmm_mgr)
{
int status = 0;
DBC_REQUIRE(refs > 0);
/* Delete all DMM tables */
if (dmm_mgr)
vfree(virtual_mapping_table);
else
status = -EFAULT;
return status;
}
/*
* ======== dmm_exit ========
* Purpose:
* Discontinue usage of module; free resources when reference count
* reaches 0.
*/
void dmm_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
}
/*
* ======== dmm_get_handle ========
* Purpose:
* Return the dynamic memory manager object for this device.
* This is typically called from the client process.
*/
int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
{
int status = 0;
struct dev_object *hdev_obj;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(dmm_manager != NULL);
if (hprocessor != NULL)
status = proc_get_dev_object(hprocessor, &hdev_obj);
else
hdev_obj = dev_get_first(); /* default */
if (!status)
status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
return status;
}
/*
* ======== dmm_init ========
* Purpose:
* Initializes private state of DMM module.
*/
bool dmm_init(void)
{
bool ret = true;
DBC_REQUIRE(refs >= 0);
if (ret)
refs++;
DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
virtual_mapping_table = NULL;
table_size = 0;
return ret;
}
/*
* ======== dmm_map_memory ========
* Purpose:
* Add a mapping block to the reserved chunk. DMM assumes that this block
* will be mapped in the DSP/IVA's address space. DMM returns an error if a
* mapping overlaps another one. This function stores the info that will be
* required later while unmapping the block.
*/
int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *chunk;
int status = 0;
spin_lock(&dmm_obj->dmm_lock);
/* Find the Reserved memory chunk containing the DSP block to
* be mapped */
chunk = (struct map_page *)get_region(addr);
if (chunk != NULL) {
/* Mark the region 'mapped', leave the 'reserved' info as-is */
chunk->mapped = true;
chunk->mapped_size = (size / PG_SIZE4K);
} else
status = -ENOENT;
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
"chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
return status;
}
/*
* ======== dmm_reserve_memory ========
* Purpose:
* Reserve a chunk of virtually contiguous DSP/IVA address space.
*/
int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
u32 *prsv_addr)
{
int status = 0;
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *node;
u32 rsv_addr = 0;
u32 rsv_size = 0;
spin_lock(&dmm_obj->dmm_lock);
/* Try to get a DSP chunk from the free list */
node = get_free_region(size);
if (node != NULL) {
/* DSP chunk of given size is available. */
rsv_addr = DMM_ADDR_VIRTUAL(node);
/* Calculate the number entries to use */
rsv_size = size / PG_SIZE4K;
if (rsv_size < node->region_size) {
/* Mark remainder of free region */
node[rsv_size].mapped = false;
node[rsv_size].reserved = false;
node[rsv_size].region_size =
node->region_size - rsv_size;
node[rsv_size].mapped_size = 0;
}
/* get_region will return first fit chunk. But we only use what
is requested. */
node->mapped = false;
node->reserved = true;
node->region_size = rsv_size;
node->mapped_size = 0;
/* Return the chunk's starting address */
*prsv_addr = rsv_addr;
} else
/*dSP chunk of given size is not available */
status = -ENOMEM;
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
"rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
prsv_addr, status, rsv_addr, rsv_size);
return status;
}
/*
* ======== dmm_un_map_memory ========
* Purpose:
* Remove the mapped block from the reserved chunk.
*/
int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *chunk;
int status = 0;
spin_lock(&dmm_obj->dmm_lock);
chunk = get_mapped_region(addr);
if (chunk == NULL)
status = -ENOENT;
if (!status) {
/* Unmap the region */
*psize = chunk->mapped_size * PG_SIZE4K;
chunk->mapped = false;
chunk->mapped_size = 0;
}
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
"chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
return status;
}
/*
* ======== dmm_un_reserve_memory ========
* Purpose:
* Free a chunk of reserved DSP/IVA address space.
*/
int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
{
struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
struct map_page *chunk;
u32 i;
int status = 0;
u32 chunk_size;
spin_lock(&dmm_obj->dmm_lock);
/* Find the chunk containing the reserved address */
chunk = get_mapped_region(rsv_addr);
if (chunk == NULL)
status = -ENOENT;
if (!status) {
/* Free all the mapped pages for this reserved region */
i = 0;
while (i < chunk->region_size) {
if (chunk[i].mapped) {
/* Remove mapping from the page tables. */
chunk_size = chunk[i].mapped_size;
/* Clear the mapping flags */
chunk[i].mapped = false;
chunk[i].mapped_size = 0;
i += chunk_size;
} else
i++;
}
/* Clear the flags (mark the region 'free') */
chunk->reserved = false;
/* NOTE: We do NOT coalesce free regions here.
* Free regions are coalesced in get_region(), as it traverses
*the whole mapping table
*/
}
spin_unlock(&dmm_obj->dmm_lock);
dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
__func__, dmm_mgr, rsv_addr, status, chunk);
return status;
}
/*
* ======== get_region ========
* Purpose:
* Returns a region containing the specified memory region
*/
static struct map_page *get_region(u32 addr)
{
struct map_page *curr_region = NULL;
u32 i = 0;
if (virtual_mapping_table != NULL) {
/* find page mapped by this address */
i = DMM_ADDR_TO_INDEX(addr);
if (i < table_size)
curr_region = virtual_mapping_table + i;
}
dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
__func__, curr_region, free_region, free_size);
return curr_region;
}
/*
* ======== get_free_region ========
* Purpose:
* Returns the requested free region
*/
static struct map_page *get_free_region(u32 len)
{
struct map_page *curr_region = NULL;
u32 i = 0;
u32 region_size = 0;
u32 next_i = 0;
if (virtual_mapping_table == NULL)
return curr_region;
if (len > free_size) {
/* Find the largest free region
* (coalesce during the traversal) */
while (i < table_size) {
region_size = virtual_mapping_table[i].region_size;
next_i = i + region_size;
if (virtual_mapping_table[i].reserved == false) {
/* Coalesce, if possible */
if (next_i < table_size &&
virtual_mapping_table[next_i].reserved
== false) {
virtual_mapping_table[i].region_size +=
virtual_mapping_table
[next_i].region_size;
continue;
}
region_size *= PG_SIZE4K;
if (region_size > free_size) {
free_region = i;
free_size = region_size;
}
}
i = next_i;
}
}
if (len <= free_size) {
curr_region = virtual_mapping_table + free_region;
free_region += (len / PG_SIZE4K);
free_size -= len;
}
return curr_region;
}
/*
* ======== get_mapped_region ========
* Purpose:
* Returns the requestedmapped region
*/
static struct map_page *get_mapped_region(u32 addrs)
{
u32 i = 0;
struct map_page *curr_region = NULL;
if (virtual_mapping_table == NULL)
return curr_region;
i = DMM_ADDR_TO_INDEX(addrs);
if (i < table_size && (virtual_mapping_table[i].mapped ||
virtual_mapping_table[i].reserved))
curr_region = virtual_mapping_table + i;
return curr_region;
}
#ifdef DSP_DMM_DEBUG
u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
{
struct map_page *curr_node = NULL;
u32 i;
u32 freemem = 0;
u32 bigsize = 0;
spin_lock(&dmm_mgr->dmm_lock);
if (virtual_mapping_table != NULL) {
for (i = 0; i < table_size; i +=
virtual_mapping_table[i].region_size) {
curr_node = virtual_mapping_table + i;
if (curr_node->reserved) {
/*printk("RESERVED size = 0x%x, "
"Map size = 0x%x\n",
(curr_node->region_size * PG_SIZE4K),
(curr_node->mapped == false) ? 0 :
(curr_node->mapped_size * PG_SIZE4K));
*/
} else {
/* printk("UNRESERVED size = 0x%x\n",
(curr_node->region_size * PG_SIZE4K));
*/
freemem += (curr_node->region_size * PG_SIZE4K);
if (curr_node->region_size > bigsize)
bigsize = curr_node->region_size;
}
}
}
spin_unlock(&dmm_mgr->dmm_lock);
printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
freemem / (1024 * 1024));
printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
(((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
(bigsize * PG_SIZE4K / (1024 * 1024)));
return 0;
}
#endif

Просмотреть файл

@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
/*
* ======== procwrap_reserve_memory ========
*/
u32 __deprecated procwrap_reserve_memory(union trapped_args *args,
void *pr_ctxt)
u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
{
return 0;
int status;
void *prsv_addr;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
if ((args->args_proc_rsvmem.ul_size <= 0) ||
(args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
return -EINVAL;
status = proc_reserve_memory(hprocessor,
args->args_proc_rsvmem.ul_size, &prsv_addr,
pr_ctxt);
if (!status) {
if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
status = -EINVAL;
proc_un_reserve_memory(args->args_proc_rsvmem.
hprocessor, prsv_addr, pr_ctxt);
}
}
return status;
}
/*
@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
/*
* ======== procwrap_un_reserve_memory ========
*/
u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args,
void *pr_ctxt)
u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
{
return 0;
int status;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
status = proc_un_reserve_memory(hprocessor,
args->args_proc_unrsvmem.prsv_addr,
pr_ctxt);
return status;
}
/*

Просмотреть файл

@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
struct process_context *ctxt = (struct process_context *)process_ctxt;
int status = 0;
struct dmm_map_object *temp_map, *map_obj;
struct dmm_rsv_object *temp_rsv, *rsv_obj;
/* Free DMM mapped memory resources */
list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
pr_err("%s: proc_un_map failed!"
" status = 0x%xn", __func__, status);
}
/* Free DMM reserved memory resources */
list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
rsv_obj->dsp_reserved_addr,
ctxt);
if (status)
pr_err("%s: proc_un_reserve_memory failed!"
" status = 0x%xn", __func__, status);
}
return status;
}
@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res)
host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
/* for 24xx base port is not mapping the mamory for DSP
* internal memory TODO Do a ioremap here */
@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources)
OMAP_PER_PRM_SIZE);
host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
OMAP_CORE_PRM_SIZE);
host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
OMAP_DMMU_SIZE);
dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
host_res->dw_mem_base[0]);
@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources)
host_res->dw_mem_base[3]);
dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
host_res->dw_mem_base[4]);
dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
shm_size = drv_datap->shm_size;
if (shm_size >= 0x10000) {

Просмотреть файл

@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp)
pr_ctxt->res_state = PROC_RES_ALLOCATED;
spin_lock_init(&pr_ctxt->dmm_map_lock);
INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
spin_lock_init(&pr_ctxt->dmm_rsv_lock);
INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
if (pr_ctxt->node_id) {

Просмотреть файл

@ -56,6 +56,7 @@
/* ----------------------------------- This */
#include <dspbridge/nodepriv.h>
#include <dspbridge/node.h>
#include <dspbridge/dmm.h>
/* Static/Dynamic Loader includes */
#include <dspbridge/dbll.h>
@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor,
u32 mapped_addr = 0;
u32 map_attrs = 0x0;
struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif
void *node_res;
@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor,
if (status)
goto func_cont;
status = proc_reserve_memory(hprocessor,
pnode->create_args.asa.task_arg_obj.
heap_size + PAGE_SIZE,
(void **)&(pnode->create_args.asa.
task_arg_obj.udsp_heap_res_addr),
pr_ctxt);
if (status) {
pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
__func__, status);
goto func_cont;
}
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = DSP_EHANDLE;
goto func_cont;
}
dmm_mem_map_dump(dmm_mgr);
#endif
map_attrs |= DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPVIRTUALADDR;
status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
pnode->create_args.asa.task_arg_obj.heap_size,
NULL, (void **)&mapped_addr, map_attrs,
(void *)pnode->create_args.asa.task_arg_obj.
udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
pr_ctxt);
if (status)
pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode,
struct stream_chnl stream;
struct node_msgargs node_msg_args;
struct node_taskargs task_arg_obj;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object =
(struct proc_object *)hnode->hprocessor;
#endif
int status;
if (!hnode)
goto func_end;
@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode,
status = proc_un_map(hnode->hprocessor, (void *)
task_arg_obj.udsp_heap_addr,
pr_ctxt);
status = proc_un_reserve_memory(hnode->hprocessor,
(void *)
task_arg_obj.
udsp_heap_res_addr,
pr_ctxt);
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
dmm_mem_map_dump(dmm_mgr);
else
status = DSP_EHANDLE;
#endif
}
}
if (node_type != NODE_MESSAGE) {

Просмотреть файл

@ -39,6 +39,7 @@
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
#include <dspbridge/procpriv.h>
#include <dspbridge/dmm.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/mgr.h>
@ -51,7 +52,6 @@
#include <dspbridge/msg.h>
#include <dspbridge/dspioctl.h>
#include <dspbridge/drv.h>
#include <_tiomap.h>
/* ----------------------------------- This */
#include <dspbridge/proc.h>
@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
return map_obj;
}
static int match_exact_map_obj(struct dmm_map_object *map_obj,
u32 dsp_addr, u32 size)
{
if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
__func__, dsp_addr, map_obj->size, size);
return map_obj->dsp_addr == dsp_addr &&
map_obj->size == size;
}
static void remove_mapping_information(struct process_context *pr_ctxt,
u32 dsp_addr)
u32 dsp_addr, u32 size)
{
struct dmm_map_object *map_obj;
pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr);
pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
dsp_addr, size);
spin_lock(&pr_ctxt->dmm_map_lock);
list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n",
pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
__func__,
map_obj->mpu_addr,
map_obj->dsp_addr);
map_obj->dsp_addr,
map_obj->size);
if (map_obj->dsp_addr == dsp_addr) {
if (match_exact_map_obj(map_obj, dsp_addr, size)) {
pr_debug("%s: match, deleting map info\n", __func__);
list_del(&map_obj->link);
kfree(map_obj->dma_info.sg);
@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
s32 cnew_envp; /* " " in new_envp[] */
s32 nproc_id = 0; /* Anticipate MP version. */
struct dcd_manager *hdcd_handle;
struct dmm_object *dmm_mgr;
u32 dw_ext_end;
u32 proc_id;
int brd_state;
@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index,
if (!status)
status = cod_get_sym_value(cod_mgr, EXTEND,
&dw_ext_end);
/* Reset DMM structs and add an initial free chunk */
if (!status) {
status =
dev_get_dmm_mgr(p_proc_object->hdev_obj,
&dmm_mgr);
if (dmm_mgr) {
/* Set dw_ext_end to DMM START u8
* address */
dw_ext_end =
(dw_ext_end + 1) * DSPWORDSIZE;
/* DMM memory is from EXT_END */
status = dmm_create_tables(dmm_mgr,
dw_ext_end,
DMMPOOLSIZE);
} else {
status = -EFAULT;
}
}
}
}
/* Restore the original argv[0] */
@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
{
u32 va_align;
u32 pa_align;
struct dmm_object *dmm_mgr;
u32 size_align;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_map_object *map_obj;
u32 tmp_addr = 0;
#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
}
/* Critical section */
mutex_lock(&proc_lock);
dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
status = dmm_map_memory(dmm_mgr, va_align, size_align);
else
status = -EFAULT;
/* Add mapping to the page tables. */
if (!status) {
/* Mapped address = MSB of VA | LSB of PA */
tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
/* mapped memory resource tracking */
map_obj = add_mapping_info(pr_ctxt, pa_align, va_align,
map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
size_align);
if (!map_obj) {
if (!map_obj)
status = -ENOMEM;
} else {
va_align = user_to_dsp_map(
p_proc_object->hbridge_context->dsp_mmu,
pa_align, va_align, size_align,
map_obj->pages);
if (IS_ERR_VALUE(va_align))
status = (int)va_align;
}
else
status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
(p_proc_object->hbridge_context, pa_align, va_align,
size_align, ul_map_attr, map_obj->pages);
}
if (!status) {
/* Mapped address = MSB of VA | LSB of PA */
map_obj->dsp_addr = (va_align |
((u32)pmpu_addr & (PG_SIZE4K - 1)));
*pp_map_addr = (void *)map_obj->dsp_addr;
*pp_map_addr = (void *) tmp_addr;
} else {
remove_mapping_information(pr_ctxt, va_align);
remove_mapping_information(pr_ctxt, tmp_addr, size_align);
dmm_un_map_memory(dmm_mgr, va_align, &size_align);
}
mutex_unlock(&proc_lock);
@ -1462,6 +1500,55 @@ func_end:
return status;
}
/*
* ======== proc_reserve_memory ========
* Purpose:
* Reserve a virtually contiguous region of DSP address space.
*/
int proc_reserve_memory(void *hprocessor, u32 ul_size,
void **pp_rsv_addr,
struct process_context *pr_ctxt)
{
struct dmm_object *dmm_mgr;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_rsv_object *rsv_obj;
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
}
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
if (status != 0)
goto func_end;
/*
* A successful reserve should be followed by insertion of rsv_obj
* into dmm_rsv_list, so that reserved memory resource tracking
* remains uptodate
*/
rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
if (rsv_obj) {
rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
spin_lock(&pr_ctxt->dmm_rsv_lock);
list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
spin_unlock(&pr_ctxt->dmm_rsv_lock);
}
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
"status 0x%x\n", __func__, hprocessor,
ul_size, pp_rsv_addr, status);
return status;
}
/*
* ======== proc_start ========
* Purpose:
@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
{
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_object *dmm_mgr;
u32 va_align;
u32 size_align;
va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
if (!p_proc_object) {
@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr,
goto func_end;
}
status = dmm_get_handle(hprocessor, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
/* Critical section */
mutex_lock(&proc_lock);
/*
* Update DMM structures. Get the size to unmap.
* This function returns error if the VA is not mapped
*/
status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
/* Remove mapping from the page tables. */
status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu,
va_align);
if (!status) {
status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
(p_proc_object->hbridge_context, va_align, size_align);
}
mutex_unlock(&proc_lock);
if (status)
@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
* from dmm_map_list, so that mapped memory resource tracking
* remains uptodate
*/
remove_mapping_information(pr_ctxt, (u32) map_addr);
remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@ -1641,6 +1743,55 @@ func_end:
return status;
}
/*
* ======== proc_un_reserve_memory ========
* Purpose:
* Frees a previously reserved region of DSP address space.
*/
int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
struct process_context *pr_ctxt)
{
struct dmm_object *dmm_mgr;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_rsv_object *rsv_obj;
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
}
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
if (status != 0)
goto func_end;
/*
* A successful unreserve should be followed by removal of rsv_obj
* from dmm_rsv_list, so that reserved memory resource tracking
* remains uptodate
*/
spin_lock(&pr_ctxt->dmm_rsv_lock);
list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
list_del(&rsv_obj->link);
kfree(rsv_obj);
break;
}
}
spin_unlock(&pr_ctxt->dmm_rsv_lock);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
__func__, hprocessor, prsv_addr, status);
return status;
}
/*
* ======== = proc_monitor ======== ==
* Purpose: