Clean up mostly unused IOSPACE macros
Most architectures defined three macros, MK_IOSPACE_PFN(), GET_IOSPACE() and GET_PFN() in pgtable.h. However, the only callers of any of these macros are in Sparc specific code, either in arch/sparc, arch/sparc64 or drivers/sbus. This patch removes the redundant macros from all architectures except sparc and sparc64. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
53f049fa5f
Коммит
0bb5e19d63
|
@ -345,10 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
|
|||
#define io_remap_pfn_range(vma, start, pfn, size, prot) \
|
||||
remap_pfn_range(vma, start, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pmd_ERROR(e) \
|
||||
|
|
|
@ -83,10 +83,6 @@ extern int is_in_rom(unsigned long);
|
|||
#define io_remap_page_range remap_page_range
|
||||
#define io_remap_pfn_range remap_pfn_range
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
|
||||
/*
|
||||
* All 32bit addresses are effectively valid for vmalloc...
|
||||
|
|
|
@ -395,10 +395,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|||
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
|
||||
remap_pfn_range(vma, from, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
|
|
@ -297,10 +297,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
|
|||
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
|
||||
remap_pfn_range(vma, from, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASMARM_PGTABLE_H */
|
||||
|
|
|
@ -394,10 +394,6 @@ typedef pte_t *pte_addr_t;
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/* No page table caches to initialize (?) */
|
||||
#define pgtable_cache_init() do { } while(0)
|
||||
|
||||
|
|
|
@ -509,10 +509,6 @@ static inline int pte_file(pte_t pte)
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
|
|
|
@ -55,10 +55,6 @@ extern int is_in_rom(unsigned long);
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/*
|
||||
* All 32bit addresses are effectively valid for vmalloc...
|
||||
* Sort of meaningless for non-VM targets.
|
||||
|
|
|
@ -542,10 +542,6 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* _I386_PGTABLE_H */
|
||||
|
|
|
@ -485,10 +485,6 @@ extern void paging_init (void);
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
|
|
|
@ -381,10 +381,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
|
|
|
@ -143,10 +143,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/* MMU-specific headers */
|
||||
|
||||
#ifdef CONFIG_SUN3
|
||||
|
|
|
@ -59,10 +59,6 @@ extern int is_in_rom(unsigned long);
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/*
|
||||
* All 32bit addresses are effectively valid for vmalloc...
|
||||
* Sort of meaningless for non-VM targets.
|
||||
|
|
|
@ -387,10 +387,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
|||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
#endif
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/*
|
||||
|
|
|
@ -528,10 +528,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/* We provide our own get_unmapped_area to provide cache coherency */
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
|
|
@ -827,10 +827,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
|||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
#endif
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
/*
|
||||
* No page table caches to initialise
|
||||
*/
|
||||
|
|
|
@ -568,10 +568,6 @@ typedef pte_t *pte_addr_t;
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
/*
|
||||
|
|
|
@ -485,10 +485,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -417,10 +417,6 @@ extern int kern_addr_valid(unsigned long addr);
|
|||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
||||
#define GET_IOSPACE(pfn) 0
|
||||
#define GET_PFN(pfn) (pfn)
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
|
Загрузка…
Ссылка в новой задаче