bpf: Make 32->64 bounds propagation slightly more robust
Make the bounds propagation in __reg_assign_32_into_64() slightly more robust and readable by aligning it similarly as we did back in the __reg_combine_64_into_32() counterpart. Meaning, only propagate or pessimize them as a smin/smax pair. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: John Fastabend <john.fastabend@gmail.com> Acked-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Родитель
3cf2b61eb0
Коммит
e572ff80f0
|
@ -1366,22 +1366,28 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
|
|||
reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
|
||||
}
|
||||
|
||||
static bool __reg32_bound_s64(s32 a)
|
||||
{
|
||||
return a >= 0 && a <= S32_MAX;
|
||||
}
|
||||
|
||||
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
|
||||
{
|
||||
reg->umin_value = reg->u32_min_value;
|
||||
reg->umax_value = reg->u32_max_value;
|
||||
/* Attempt to pull 32-bit signed bounds into 64-bit bounds
|
||||
* but must be positive otherwise set to worse case bounds
|
||||
* and refine later from tnum.
|
||||
|
||||
/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
|
||||
* be positive otherwise set to worse case bounds and refine later
|
||||
* from tnum.
|
||||
*/
|
||||
if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
|
||||
reg->smax_value = reg->s32_max_value;
|
||||
else
|
||||
reg->smax_value = U32_MAX;
|
||||
if (reg->s32_min_value >= 0)
|
||||
if (__reg32_bound_s64(reg->s32_min_value) &&
|
||||
__reg32_bound_s64(reg->s32_max_value)) {
|
||||
reg->smin_value = reg->s32_min_value;
|
||||
else
|
||||
reg->smax_value = reg->s32_max_value;
|
||||
} else {
|
||||
reg->smin_value = 0;
|
||||
reg->smax_value = U32_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
|
||||
|
|
Загрузка…
Ссылка в новой задаче