KVM: arm64/sve: Make register ioctl access errors more consistent

Currently, the way error codes are generated when processing the
SVE register access ioctls in a bit haphazard.

This patch refactors the code so that the behaviour is more
consistent: now, -EINVAL should be returned only for unrecognised
register IDs or when some other runtime error occurs.  -ENOENT is
returned for register IDs that are recognised, but whose
corresponding register (or slice) does not exist for the vcpu.

To this end, in {get,set}_sve_reg() we now delegate the
vcpu_has_sve() check down into {get,set}_sve_vls() and
sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
picked off first, then sve_reg_to_region() plays the role of
exhaustively validating or rejecting the register ID and (where
accepted) computing the applicable register region as before.

sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
returned prematurely, before checking whether reg->id is in a
recognised range.

-EPERM is now only returned when an attempt is made to access an
actually existing register slice on an unfinalized vcpu.

Fixes: e1c9c98345 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
Fixes: 9033bba4b5 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
Suggested-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Dave Martin 2019-04-11 16:13:39 +01:00 коммит произвёл Marc Zyngier
Родитель f8d4635aff
Коммит 52110aa959
1 изменённых файлов: 31 добавлений и 21 удалений

Просмотреть файл

@ -221,6 +221,9 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
if (!vcpu_has_sve(vcpu))
return -ENOENT;
if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
return -EINVAL;
@ -242,6 +245,9 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
if (!vcpu_has_sve(vcpu))
return -ENOENT;
if (kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM; /* too late! */
@ -304,7 +310,10 @@ struct sve_state_reg_region {
unsigned int upad; /* extra trailing padding in user memory */
};
/* Get sanitised bounds for user/kernel SVE register copy */
/*
* Validate SVE register ID and get sanitised bounds for user/kernel SVE
* register copy
*/
static int sve_reg_to_region(struct sve_state_reg_region *region,
struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
/* Verify that we match the UAPI header: */
BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
if ((reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_ZREG_SIZE;
maxlen = SVE_SIG_ZREG_SIZE(vq);
} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_PREG_SIZE;
maxlen = SVE_SIG_PREG_SIZE(vq);
} else {
return -ENOENT;
return -EINVAL;
}
sve_state_size = vcpu_sve_state_size(vcpu);
@ -369,24 +383,22 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
int ret;
struct sve_state_reg_region region;
char __user *uptr = (char __user *)reg->addr;
if (!vcpu_has_sve(vcpu))
return -ENOENT;
/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS)
return get_sve_vls(vcpu, reg);
/* Otherwise, reg is an architectural SVE register... */
/* Try to interpret reg ID as an architectural SVE register... */
ret = sve_reg_to_region(&region, vcpu, reg);
if (ret)
return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM;
if (sve_reg_to_region(&region, vcpu, reg))
return -ENOENT;
if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
region.klen) ||
clear_user(uptr + region.klen, region.upad))
@ -397,24 +409,22 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
int ret;
struct sve_state_reg_region region;
const char __user *uptr = (const char __user *)reg->addr;
if (!vcpu_has_sve(vcpu))
return -ENOENT;
/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS)
return set_sve_vls(vcpu, reg);
/* Otherwise, reg is an architectural SVE register... */
/* Try to interpret reg ID as an architectural SVE register... */
ret = sve_reg_to_region(&region, vcpu, reg);
if (ret)
return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM;
if (sve_reg_to_region(&region, vcpu, reg))
return -ENOENT;
if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
region.klen))
return -EFAULT;