Tighten rules for ACCESS_ONCE
This series tightens the rules for ACCESS_ONCE to only work on scalar types. It also contains the necessary fixups as indicated by build bots of linux-next. Now everything is in place to prevent new non-scalar users of ACCESS_ONCE and we can continue to convert code to READ_ONCE/WRITE_ONCE. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJU2H5MAAoJEBF7vIC1phx8Jm4QALPqKOMDSUBCrqJFWJeujtv2 ILxJKsnjrAlt3dxnlVI3q6e5wi896hSce75PcvZ/vs/K3GdgMxOjrakBJGTJ2Qjg 5njW9aGJDDr/SYFX33MLWfqy222TLtpxgSz379UgXjEzB0ymMWbJJ3FnGjVqQJdp RXDutpncRySc/rGHh9UPREIRR5GvimONsWE2zxgXjUzB8vIr2fCGvHTXfIb6RKbQ yaFoihzn0m+eisc5Gy4tQ1qhhnaYyWEGrINjHTjMFTQOWTlH80BZAyQeLdbyj2K5 qloBPS/VhBTr/5TxV5onM+nVhu0LiblVNrdMHVeb7jyST4LeFOCaWK98lB3axSB5 v/2D1YKNb3g1U1x3In/oNGQvs36zGiO1uEdMF1l8ZFXgCvHmATSFSTWBtqUhb5Ew JA3YyqMTG6dpRTMSnmu3/frr4wDqnxlB/ktQC1pf3tDp87mr1ZYEy/dQld+tltjh 9Z5GSdrw0nf91wNI3DJf+26ZDdz5B+EpDnPnOKG8anI1lc/mQneI21/K/xUteFXw UZ1XGPLV2vbv9/a13u44SdjenHvQs1egsGeebMxVPoj6WmDLVmcIqinyS6NawYzn IlDGy/b3bSnXWMBP0ZVBX94KWLxqDDc4a/ayxsmxsP1tPZ+jDXjVDa7E3zskcHxG Uj5ULCPyU087t8Sl76mv =Dj70 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux Pull ACCESS_ONCE() rule tightening from Christian Borntraeger: "Tighten rules for ACCESS_ONCE This series tightens the rules for ACCESS_ONCE to only work on scalar types. It also contains the necessary fixups as indicated by build bots of linux-next. Now everything is in place to prevent new non-scalar users of ACCESS_ONCE and we can continue to convert code to READ_ONCE/WRITE_ONCE" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux: kernel: Fix sparse warning for ACCESS_ONCE next: sh: Fix compile error kernel: tighten rules for ACCESS ONCE mm/gup: Replace ACCESS_ONCE with READ_ONCE x86/spinlock: Leftover conversion ACCESS_ONCE->READ_ONCE x86/xen/p2m: Replace ACCESS_ONCE with READ_ONCE ppc/hugetlbfs: Replace ACCESS_ONCE with READ_ONCE ppc/kvm: Replace ACCESS_ONCE with READ_ONCE
This commit is contained in:
Коммит
c833e17e27
|
@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
|
||||||
* in virtual mode.
|
* in virtual mode.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
/* Down_CPPR */
|
/* Down_CPPR */
|
||||||
new_state.cppr = new_cppr;
|
new_state.cppr = new_cppr;
|
||||||
|
@ -211,7 +211,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
* pending priority
|
* pending priority
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
|
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
|
||||||
if (!old_state.xisr)
|
if (!old_state.xisr)
|
||||||
|
@ -277,7 +277,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
* whenever the MFRR is made less favored.
|
* whenever the MFRR is made less favored.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
/* Set_MFRR */
|
/* Set_MFRR */
|
||||||
new_state.mfrr = mfrr;
|
new_state.mfrr = mfrr;
|
||||||
|
@ -352,7 +352,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||||
icp_rm_clr_vcpu_irq(icp->vcpu);
|
icp_rm_clr_vcpu_irq(icp->vcpu);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
reject = 0;
|
reject = 0;
|
||||||
new_state.cppr = cppr;
|
new_state.cppr = cppr;
|
||||||
|
|
|
@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
|
||||||
icp->server_num);
|
icp->server_num);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
*reject = 0;
|
*reject = 0;
|
||||||
|
|
||||||
|
@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
|
||||||
* in virtual mode.
|
* in virtual mode.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
/* Down_CPPR */
|
/* Down_CPPR */
|
||||||
new_state.cppr = new_cppr;
|
new_state.cppr = new_cppr;
|
||||||
|
@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
* pending priority
|
* pending priority
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
|
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
|
||||||
if (!old_state.xisr)
|
if (!old_state.xisr)
|
||||||
|
@ -634,7 +634,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
* whenever the MFRR is made less favored.
|
* whenever the MFRR is made less favored.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
/* Set_MFRR */
|
/* Set_MFRR */
|
||||||
new_state.mfrr = mfrr;
|
new_state.mfrr = mfrr;
|
||||||
|
@ -679,7 +679,7 @@ static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
|
||||||
if (!icp)
|
if (!icp)
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
}
|
}
|
||||||
state = ACCESS_ONCE(icp->state);
|
state = READ_ONCE(icp->state);
|
||||||
kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
|
kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
|
||||||
kvmppc_set_gpr(vcpu, 5, state.mfrr);
|
kvmppc_set_gpr(vcpu, 5, state.mfrr);
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
|
@ -721,7 +721,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||||
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
old_state = new_state = ACCESS_ONCE(icp->state);
|
old_state = new_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
reject = 0;
|
reject = 0;
|
||||||
new_state.cppr = cppr;
|
new_state.cppr = cppr;
|
||||||
|
@ -885,7 +885,7 @@ static int xics_debug_show(struct seq_file *m, void *private)
|
||||||
if (!icp)
|
if (!icp)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
state.raw = ACCESS_ONCE(icp->state.raw);
|
state.raw = READ_ONCE(icp->state.raw);
|
||||||
seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
|
seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
|
||||||
icp->server_num, state.xisr,
|
icp->server_num, state.xisr,
|
||||||
state.pending_pri, state.cppr, state.mfrr,
|
state.pending_pri, state.cppr, state.mfrr,
|
||||||
|
@ -1082,7 +1082,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
|
||||||
* the ICS states before the ICP states.
|
* the ICS states before the ICP states.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
old_state = ACCESS_ONCE(icp->state);
|
old_state = READ_ONCE(icp->state);
|
||||||
|
|
||||||
if (new_state.mfrr <= old_state.mfrr) {
|
if (new_state.mfrr <= old_state.mfrr) {
|
||||||
resend = false;
|
resend = false;
|
||||||
|
|
|
@ -986,7 +986,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
|
||||||
*/
|
*/
|
||||||
pdshift = PUD_SHIFT;
|
pdshift = PUD_SHIFT;
|
||||||
pudp = pud_offset(&pgd, ea);
|
pudp = pud_offset(&pgd, ea);
|
||||||
pud = ACCESS_ONCE(*pudp);
|
pud = READ_ONCE(*pudp);
|
||||||
|
|
||||||
if (pud_none(pud))
|
if (pud_none(pud))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -998,7 +998,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
|
||||||
else {
|
else {
|
||||||
pdshift = PMD_SHIFT;
|
pdshift = PMD_SHIFT;
|
||||||
pmdp = pmd_offset(&pud, ea);
|
pmdp = pmd_offset(&pud, ea);
|
||||||
pmd = ACCESS_ONCE(*pmdp);
|
pmd = READ_ONCE(*pmdp);
|
||||||
/*
|
/*
|
||||||
* A hugepage collapse is captured by pmd_none, because
|
* A hugepage collapse is captured by pmd_none, because
|
||||||
* it mark the pmd none and do a hpte invalidate.
|
* it mark the pmd none and do a hpte invalidate.
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
static inline pte_t gup_get_pte(pte_t *ptep)
|
static inline pte_t gup_get_pte(pte_t *ptep)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_X2TLB
|
#ifndef CONFIG_X2TLB
|
||||||
return ACCESS_ONCE(*ptep);
|
return READ_ONCE(*ptep);
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* With get_user_pages_fast, we walk down the pagetables without
|
* With get_user_pages_fast, we walk down the pagetables without
|
||||||
|
|
|
@ -183,10 +183,10 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
|
|
||||||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_t head = ACCESS_ONCE(lock->tickets.head);
|
__ticket_t head = READ_ONCE(lock->tickets.head);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
|
||||||
/*
|
/*
|
||||||
* We need to check "unlocked" in a loop, tmp.head == head
|
* We need to check "unlocked" in a loop, tmp.head == head
|
||||||
* can be false positive because of overflow.
|
* can be false positive because of overflow.
|
||||||
|
|
|
@ -550,7 +550,7 @@ static bool alloc_p2m(unsigned long pfn)
|
||||||
mid_mfn = NULL;
|
mid_mfn = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
|
p2m_pfn = pte_pfn(READ_ONCE(*ptep));
|
||||||
if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
|
if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
|
||||||
p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
|
p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
|
||||||
/* p2m leaf page is missing */
|
/* p2m leaf page is missing */
|
||||||
|
|
|
@ -451,12 +451,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||||
* to make the compiler aware of ordering is to put the two invocations of
|
* to make the compiler aware of ordering is to put the two invocations of
|
||||||
* ACCESS_ONCE() in different C statements.
|
* ACCESS_ONCE() in different C statements.
|
||||||
*
|
*
|
||||||
* This macro does absolutely -nothing- to prevent the CPU from reordering,
|
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
|
||||||
* merging, or refetching absolutely anything at any time. Its main intended
|
* on a union member will work as long as the size of the member matches the
|
||||||
* use is to mediate communication between process-level code and irq/NMI
|
* size of the union and the size is smaller than word size.
|
||||||
* handlers, all running on the same CPU.
|
*
|
||||||
|
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
|
||||||
|
* between process-level code and irq/NMI handlers, all running on the same CPU,
|
||||||
|
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
|
||||||
|
* mutilate accesses that either do not require ordering or that interact
|
||||||
|
* with an explicit memory barrier or atomic instruction that provides the
|
||||||
|
* required ordering.
|
||||||
|
*
|
||||||
|
* If possible use READ_ONCE/ASSIGN_ONCE instead.
|
||||||
*/
|
*/
|
||||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
#define __ACCESS_ONCE(x) ({ \
|
||||||
|
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
|
||||||
|
(volatile typeof(x) *)&(x); })
|
||||||
|
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
|
||||||
|
|
||||||
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
|
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
|
||||||
#ifdef CONFIG_KPROBES
|
#ifdef CONFIG_KPROBES
|
||||||
|
|
2
mm/gup.c
2
mm/gup.c
|
@ -1092,7 +1092,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||||
|
|
||||||
pmdp = pmd_offset(&pud, addr);
|
pmdp = pmd_offset(&pud, addr);
|
||||||
do {
|
do {
|
||||||
pmd_t pmd = ACCESS_ONCE(*pmdp);
|
pmd_t pmd = READ_ONCE(*pmdp);
|
||||||
|
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||||
|
|
Загрузка…
Ссылка в новой задаче