mm/migration: add trace events for base page and HugeTLB migrations

This adds two trace events for base page and HugeTLB page migrations.
These events, closely follow the implementation details like setting and
removing of PTE migration entries, which are essential operations for
migration.  The new CREATE_TRACE_POINTS in <mm/rmap.c> covers both
<events/migration.h> and <events/tlb.h> based trace events.  Hence drop
redundant CREATE_TRACE_POINTS from other places which could have otherwise
conflicted during build.

Link: https://lkml.kernel.org/r/1643368182-9588-3-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reported-by: kernel test robot <lkp@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Anshuman Khandual 2022-03-24 18:10:01 -07:00 коммит произвёл Linus Torvalds
Родитель 283fd6fe05
Коммит 4cc79b3303
4 изменённых файлов: 40 добавлений и 2 удалений

Просмотреть файл

@ -31,7 +31,6 @@
* We need to define the tracepoints somewhere, and tlb.c * We need to define the tracepoints somewhere, and tlb.c
* is only compiled when SMP=y. * is only compiled when SMP=y.
*/ */
#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h> #include <trace/events/tlb.h>
#include "mm_internal.h" #include "mm_internal.h"

Просмотреть файл

@ -105,6 +105,37 @@ TRACE_EVENT(mm_migrate_pages_start,
__print_symbolic(__entry->reason, MIGRATE_REASON)) __print_symbolic(__entry->reason, MIGRATE_REASON))
); );
DECLARE_EVENT_CLASS(migration_pte,
TP_PROTO(unsigned long addr, unsigned long pte, int order),
TP_ARGS(addr, pte, order),
TP_STRUCT__entry(
__field(unsigned long, addr)
__field(unsigned long, pte)
__field(int, order)
),
TP_fast_assign(
__entry->addr = addr;
__entry->pte = pte;
__entry->order = order;
),
TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order)
);
DEFINE_EVENT(migration_pte, set_migration_pte,
TP_PROTO(unsigned long addr, unsigned long pte, int order),
TP_ARGS(addr, pte, order)
);
DEFINE_EVENT(migration_pte, remove_migration_pte,
TP_PROTO(unsigned long addr, unsigned long pte, int order),
TP_ARGS(addr, pte, order)
);
#endif /* _TRACE_MIGRATE_H */ #endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */ /* This part must be outside protection */

Просмотреть файл

@ -53,7 +53,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h> #include <trace/events/migrate.h>
#include "internal.h" #include "internal.h"
@ -249,6 +248,9 @@ static bool remove_migration_pte(struct folio *folio,
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain(smp_processor_id()); mlock_page_drain(smp_processor_id());
trace_remove_migration_pte(pvmw.address, pte_val(pte),
compound_order(new));
/* No need to invalidate - it was non-present before */ /* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte); update_mmu_cache(vma, pvmw.address, pvmw.pte);
} }

Просмотреть файл

@ -76,7 +76,9 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h> #include <trace/events/tlb.h>
#include <trace/events/migrate.h>
#include "internal.h" #include "internal.h"
@ -1849,6 +1851,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_swp_uffd_wp(pteval)) if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte); swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
compound_order(&folio->page));
/* /*
* No need to invalidate here it will synchronize on * No need to invalidate here it will synchronize on
* against the special swap migration pte. * against the special swap migration pte.
@ -1917,6 +1921,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_uffd_wp(pteval)) if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte); swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte);
trace_set_migration_pte(address, pte_val(swp_pte),
compound_order(&folio->page));
/* /*
* No need to invalidate here it will synchronize on * No need to invalidate here it will synchronize on
* against the special swap migration pte. * against the special swap migration pte.