clocksource/drivers/arch_arm_timer: Move workaround synchronisation around
We currently handle synchronisation when workarounds are enabled by having an ISB in the __arch_counter_get_cnt?ct_stable() helpers. While this works, this prevents us from relaxing this synchronisation. Instead, move it closer to the point where the synchronisation is actually needed. Further patches will subsequently relax this. Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211017124225.3018098-14-maz@kernel.org Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
This commit is contained in:
Родитель
c1153d52c4
Коммит
db26f8f2da
|
@ -32,7 +32,7 @@
|
|||
({ \
|
||||
const struct arch_timer_erratum_workaround *__wa; \
|
||||
__wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
(__wa && __wa->h) ? __wa->h : arch_timer_##h; \
|
||||
(__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
|
||||
})
|
||||
|
||||
#else
|
||||
|
@ -64,11 +64,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
|
|||
|
||||
static inline notrace u64 arch_timer_read_cntpct_el0(void)
|
||||
{
|
||||
isb();
|
||||
return read_sysreg(cntpct_el0);
|
||||
}
|
||||
|
||||
static inline notrace u64 arch_timer_read_cntvct_el0(void)
|
||||
{
|
||||
isb();
|
||||
return read_sysreg(cntvct_el0);
|
||||
}
|
||||
|
||||
|
@ -163,7 +165,6 @@ static __always_inline u64 __arch_counter_get_cntpct_stable(void)
|
|||
{
|
||||
u64 cnt;
|
||||
|
||||
isb();
|
||||
cnt = arch_timer_reg_read_stable(cntpct_el0);
|
||||
arch_counter_enforce_ordering(cnt);
|
||||
return cnt;
|
||||
|
@ -183,7 +184,6 @@ static __always_inline u64 __arch_counter_get_cntvct_stable(void)
|
|||
{
|
||||
u64 cnt;
|
||||
|
||||
isb();
|
||||
cnt = arch_timer_reg_read_stable(cntvct_el0);
|
||||
arch_counter_enforce_ordering(cnt);
|
||||
return cnt;
|
||||
|
|
Загрузка…
Ссылка в новой задаче