Merge branch 'pci/resource-mmap'
- use generic pci_mmap_resource_range() instead of powerpc and xtensa arch-specific versions (David Woodhouse) * pci/resource-mmap: xtensa/PCI: Use generic pci_mmap_resource_range() powerpc/pci: Use generic pci_mmap_resource_range()
This commit is contained in:
Коммит
1e1b3201ab
|
@ -76,10 +76,11 @@ extern int pci_proc_domain(struct pci_bus *bus);
|
||||||
|
|
||||||
struct vm_area_struct;
|
struct vm_area_struct;
|
||||||
|
|
||||||
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */
|
/* Tell PCI code what kind of PCI resource mappings we support */
|
||||||
#define HAVE_PCI_MMAP 1
|
#define HAVE_PCI_MMAP 1
|
||||||
#define arch_can_pci_mmap_io() 1
|
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
|
||||||
#define arch_can_pci_mmap_wc() 1
|
#define arch_can_pci_mmap_io() 1
|
||||||
|
#define arch_can_pci_mmap_wc() 1
|
||||||
|
|
||||||
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
|
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
|
||||||
size_t count);
|
size_t count);
|
||||||
|
|
|
@ -410,72 +410,22 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Platform support for /proc/bus/pci/X/Y mmap()s,
|
* Platform support for /proc/bus/pci/X/Y mmap()s.
|
||||||
* modelled on the sparc64 implementation by Dave Miller.
|
|
||||||
* -- paulus.
|
* -- paulus.
|
||||||
*/
|
*/
|
||||||
|
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
|
||||||
/*
|
|
||||||
* Adjust vm_pgoff of VMA such that it is the physical page offset
|
|
||||||
* corresponding to the 32-bit pci bus offset for DEV requested by the user.
|
|
||||||
*
|
|
||||||
* Basically, the user finds the base address for his device which he wishes
|
|
||||||
* to mmap. They read the 32-bit value from the config space base register,
|
|
||||||
* add whatever PAGE_SIZE multiple offset they wish, and feed this into the
|
|
||||||
* offset parameter of mmap on /proc/bus/pci/XXX for that device.
|
|
||||||
*
|
|
||||||
* Returns negative error code on failure, zero on success.
|
|
||||||
*/
|
|
||||||
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
|
|
||||||
resource_size_t *offset,
|
|
||||||
enum pci_mmap_state mmap_state)
|
|
||||||
{
|
{
|
||||||
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
||||||
unsigned long io_offset = 0;
|
resource_size_t ioaddr = pci_resource_start(pdev, bar);
|
||||||
int i, res_bit;
|
|
||||||
|
|
||||||
if (hose == NULL)
|
if (!hose)
|
||||||
return NULL; /* should never happen */
|
return -EINVAL;
|
||||||
|
|
||||||
/* If memory, add on the PCI bridge address offset */
|
/* Convert to an offset within this PCI controller */
|
||||||
if (mmap_state == pci_mmap_mem) {
|
ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
|
||||||
#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
|
|
||||||
*offset += hose->pci_mem_offset;
|
|
||||||
#endif
|
|
||||||
res_bit = IORESOURCE_MEM;
|
|
||||||
} else {
|
|
||||||
io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
|
|
||||||
*offset += io_offset;
|
|
||||||
res_bit = IORESOURCE_IO;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
|
||||||
* Check that the offset requested corresponds to one of the
|
return 0;
|
||||||
* resources of the device.
|
|
||||||
*/
|
|
||||||
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
|
|
||||||
struct resource *rp = &dev->resource[i];
|
|
||||||
int flags = rp->flags;
|
|
||||||
|
|
||||||
/* treat ROM as memory (should be already) */
|
|
||||||
if (i == PCI_ROM_RESOURCE)
|
|
||||||
flags |= IORESOURCE_MEM;
|
|
||||||
|
|
||||||
/* Active and same type? */
|
|
||||||
if ((flags & res_bit) == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* In the range of this resource? */
|
|
||||||
if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* found it! construct the final physical address */
|
|
||||||
if (mmap_state == pci_mmap_io)
|
|
||||||
*offset += hose->io_base_phys - io_offset;
|
|
||||||
return rp;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -527,42 +477,6 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
|
||||||
return prot;
|
return prot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the actual remap of the pages for a PCI device mapping, as
|
|
||||||
* appropriate for this architecture. The region in the process to map
|
|
||||||
* is described by vm_start and vm_end members of VMA, the base physical
|
|
||||||
* address is found in vm_pgoff.
|
|
||||||
* The pci device structure is provided so that architectures may make mapping
|
|
||||||
* decisions on a per-device or per-bus basis.
|
|
||||||
*
|
|
||||||
* Returns a negative error code on failure, zero on success.
|
|
||||||
*/
|
|
||||||
int pci_mmap_page_range(struct pci_dev *dev, int bar,
|
|
||||||
struct vm_area_struct *vma,
|
|
||||||
enum pci_mmap_state mmap_state, int write_combine)
|
|
||||||
{
|
|
||||||
resource_size_t offset =
|
|
||||||
((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
|
|
||||||
struct resource *rp;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
|
|
||||||
if (rp == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
vma->vm_pgoff = offset >> PAGE_SHIFT;
|
|
||||||
if (write_combine)
|
|
||||||
vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
|
|
||||||
else
|
|
||||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
||||||
|
|
||||||
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
|
||||||
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This provides legacy IO read access on a bus */
|
/* This provides legacy IO read access on a bus */
|
||||||
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
|
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
|
||||||
{
|
{
|
||||||
|
|
|
@ -44,9 +44,10 @@ extern struct pci_controller* pcibios_alloc_controller(void);
|
||||||
|
|
||||||
#define PCI_DMA_BUS_IS_PHYS (1)
|
#define PCI_DMA_BUS_IS_PHYS (1)
|
||||||
|
|
||||||
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
|
/* Tell PCI code what kind of PCI resource mappings we support */
|
||||||
#define HAVE_PCI_MMAP 1
|
#define HAVE_PCI_MMAP 1
|
||||||
#define arch_can_pci_mmap_io() 1
|
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
|
||||||
|
#define arch_can_pci_mmap_io() 1
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,6 @@
|
||||||
* pcibios_align_resource
|
* pcibios_align_resource
|
||||||
* pcibios_fixup_bus
|
* pcibios_fixup_bus
|
||||||
* pci_bus_add_device
|
* pci_bus_add_device
|
||||||
* pci_mmap_page_range
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct pci_controller* pci_ctrl_head;
|
struct pci_controller* pci_ctrl_head;
|
||||||
|
@ -258,98 +257,21 @@ pci_controller_num(struct pci_dev *dev)
|
||||||
#endif /* CONFIG_PROC_FS */
|
#endif /* CONFIG_PROC_FS */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Platform support for /proc/bus/pci/X/Y mmap()s,
|
* Platform support for /proc/bus/pci/X/Y mmap()s.
|
||||||
* modelled on the sparc64 implementation by Dave Miller.
|
|
||||||
* -- paulus.
|
* -- paulus.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
|
||||||
* Adjust vm_pgoff of VMA such that it is the physical page offset
|
|
||||||
* corresponding to the 32-bit pci bus offset for DEV requested by the user.
|
|
||||||
*
|
|
||||||
* Basically, the user finds the base address for his device which he wishes
|
|
||||||
* to mmap. They read the 32-bit value from the config space base register,
|
|
||||||
* add whatever PAGE_SIZE multiple offset they wish, and feed this into the
|
|
||||||
* offset parameter of mmap on /proc/bus/pci/XXX for that device.
|
|
||||||
*
|
|
||||||
* Returns negative error code on failure, zero on success.
|
|
||||||
*/
|
|
||||||
static __inline__ int
|
|
||||||
__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
|
|
||||||
enum pci_mmap_state mmap_state)
|
|
||||||
{
|
{
|
||||||
struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
|
struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata;
|
||||||
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
resource_size_t ioaddr = pci_resource_start(pdev, bar);
|
||||||
unsigned long io_offset = 0;
|
|
||||||
int i, res_bit;
|
|
||||||
|
|
||||||
if (pci_ctrl == 0)
|
if (pci_ctrl == 0)
|
||||||
return -EINVAL; /* should never happen */
|
return -EINVAL; /* should never happen */
|
||||||
|
|
||||||
/* If memory, add on the PCI bridge address offset */
|
/* Convert to an offset within this PCI controller */
|
||||||
if (mmap_state == pci_mmap_mem) {
|
ioaddr -= (unsigned long)pci_ctrl->io_space.base;
|
||||||
res_bit = IORESOURCE_MEM;
|
|
||||||
} else {
|
|
||||||
io_offset = (unsigned long)pci_ctrl->io_space.base;
|
|
||||||
offset += io_offset;
|
|
||||||
res_bit = IORESOURCE_IO;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT;
|
||||||
* Check that the offset requested corresponds to one of the
|
return 0;
|
||||||
* resources of the device.
|
|
||||||
*/
|
|
||||||
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
|
|
||||||
struct resource *rp = &dev->resource[i];
|
|
||||||
int flags = rp->flags;
|
|
||||||
|
|
||||||
/* treat ROM as memory (should be already) */
|
|
||||||
if (i == PCI_ROM_RESOURCE)
|
|
||||||
flags |= IORESOURCE_MEM;
|
|
||||||
|
|
||||||
/* Active and same type? */
|
|
||||||
if ((flags & res_bit) == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* In the range of this resource? */
|
|
||||||
if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* found it! construct the final physical address */
|
|
||||||
if (mmap_state == pci_mmap_io)
|
|
||||||
offset += pci_ctrl->io_space.start - io_offset;
|
|
||||||
vma->vm_pgoff = offset >> PAGE_SHIFT;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Perform the actual remap of the pages for a PCI device mapping, as
|
|
||||||
* appropriate for this architecture. The region in the process to map
|
|
||||||
* is described by vm_start and vm_end members of VMA, the base physical
|
|
||||||
* address is found in vm_pgoff.
|
|
||||||
* The pci device structure is provided so that architectures may make mapping
|
|
||||||
* decisions on a per-device or per-bus basis.
|
|
||||||
*
|
|
||||||
* Returns a negative error code on failure, zero on success.
|
|
||||||
*/
|
|
||||||
int pci_mmap_page_range(struct pci_dev *dev, int bar,
|
|
||||||
struct vm_area_struct *vma,
|
|
||||||
enum pci_mmap_state mmap_state,
|
|
||||||
int write_combine)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = __pci_mmap_make_offset(dev, vma, mmap_state);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
|
|
||||||
|
|
||||||
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
|
||||||
vma->vm_end - vma->vm_start,vma->vm_page_prot);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче