IOMMU Fixes for Linux v4.4-rc5

* Two similar fixes for the Intel and AMD IOMMU drivers to add
 	  proper access checks before calling handle_mm_fault.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJWdCp7AAoJECvwRC2XARrjjAIP/0ihW2zF4R622RgY1C1Cm62j
 0eb/R4UqjI3PG0KsURgDHcIm9JP5Z//dgKTOtNX9KOkHlXLcO9MMSD5chVBd4HKG
 +Mgx7RM+Mr7f6ElRUa6s1GY1tcJlGf43fW5cMQ44BJIqVXlE47go4U09D86DVgXy
 KgyBxQldeOrkXZvAG82WLjGgkdGALQjbDlI8ktmfYWXAvIRWNGJqWY16BwAYOWfb
 9d3+1JPekSSBWHC6H+qbkDb8ueO69/Ux0HL5z2Q0zchqGjBb1gnfwLcz865KZpOB
 qUwsKFSXTl+jPCrAaLYJnVqAnH4qqKaF6WKAJSIHObTSVqXKHpFHrQrlGVzOvYNn
 s3216KIMsxG2nnvSgXCOFGqM/810MH2MSo8YcF5A3celrka3j2Gj08mxInrZXN7D
 3p51HSwq8ePo4i5jppT5ldOBSjNV9N3wKWcjDb4OL+OfkJc/u2VbSHNQtpvTclsV
 V6VSfWLDC8BCmUveMH2TrawQWkKOz0LqgqfQPX+VvSCIM7tgkrgVsTJrijPtGOs1
 zid/A/cfqMdBezSVALrZfB4OVBaM2UL2LJmmLJgApYV+N55Oxmx+nxnMr0aT5KlY
 crjcnVaypkq3rG1Wjpt+nTTwtllB0yXNEywQcu2edeswmaQCqsEgQRsDqi6S2/+S
 c8l9JKoTrB4+vToYjXyW
 =qrAB
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v4.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "Two similar fixes for the Intel and AMD IOMMU drivers to add proper
  access checks before calling handle_mm_fault"

* tag 'iommu-fixes-v4.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Do access checks before calling handle_mm_fault()
  iommu/amd: Do proper access checking before calling handle_mm_fault()
This commit is contained in:
Linus Torvalds 2015-12-18 12:38:35 -08:00
Родитель 3273cba195 7f8312a3b3
Коммит ccdd96be43
2 изменённых файлов: 38 добавлений и 2 удалений

Просмотреть файл

@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
}
}
static bool access_error(struct vm_area_struct *vma, struct fault *fault)
{
unsigned long requested = 0;
if (fault->flags & PPR_FAULT_EXEC)
requested |= VM_EXEC;
if (fault->flags & PPR_FAULT_READ)
requested |= VM_READ;
if (fault->flags & PPR_FAULT_WRITE)
requested |= VM_WRITE;
return (requested & ~vma->vm_flags) != 0;
}
static void do_fault(struct work_struct *work)
{
struct fault *fault = container_of(work, struct fault, work);
@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
goto out;
}
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
/* handle_mm_fault would BUG_ON() */
/* Check if we have the right permissions on the vma */
if (access_error(vma, fault)) {
up_read(&mm->mmap_sem);
handle_fault_error(fault);
goto out;

Просмотреть файл

@ -484,6 +484,23 @@ struct page_req_dsc {
};
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
{
unsigned long requested = 0;
if (req->exe_req)
requested |= VM_EXEC;
if (req->rd_req)
requested |= VM_READ;
if (req->wr_req)
requested |= VM_WRITE;
return (requested & ~vma->vm_flags) != 0;
}
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_iommu *iommu = d;
@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!vma || address < vma->vm_start)
goto invalid;
if (access_error(vma, req))
goto invalid;
ret = handle_mm_fault(svm->mm, vma, address,
req->wr_req ? FAULT_FLAG_WRITE : 0);
if (ret & VM_FAULT_ERROR)