KVM: s390: Support keyless subset guest mode
If the KSS facility is available on the machine, we also make it available for our KVM guests. The KSS facility bypasses storage key management as long as the guest does not issue a related instruction. When that happens, the control is returned to the host, which has to turn off KSS for a guest vcpu before retrying the instruction. Signed-off-by: Corey S. McQuay <csmcquay@linux.vnet.ibm.com> Signed-off-by: Farhan Ali <alifm@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
Родитель
71cb1bf66e
Коммит
730cd632c4
|
@ -122,6 +122,7 @@ struct esca_block {
|
||||||
#define CPUSTAT_SLSR 0x00002000
|
#define CPUSTAT_SLSR 0x00002000
|
||||||
#define CPUSTAT_ZARCH 0x00000800
|
#define CPUSTAT_ZARCH 0x00000800
|
||||||
#define CPUSTAT_MCDS 0x00000100
|
#define CPUSTAT_MCDS 0x00000100
|
||||||
|
#define CPUSTAT_KSS 0x00000200
|
||||||
#define CPUSTAT_SM 0x00000080
|
#define CPUSTAT_SM 0x00000080
|
||||||
#define CPUSTAT_IBS 0x00000040
|
#define CPUSTAT_IBS 0x00000040
|
||||||
#define CPUSTAT_GED2 0x00000010
|
#define CPUSTAT_GED2 0x00000010
|
||||||
|
@ -185,6 +186,7 @@ struct kvm_s390_sie_block {
|
||||||
#define ICPT_OPEREXC 0x2C
|
#define ICPT_OPEREXC 0x2C
|
||||||
#define ICPT_PARTEXEC 0x38
|
#define ICPT_PARTEXEC 0x38
|
||||||
#define ICPT_IOINST 0x40
|
#define ICPT_IOINST 0x40
|
||||||
|
#define ICPT_KSS 0x5c
|
||||||
__u8 icptcode; /* 0x0050 */
|
__u8 icptcode; /* 0x0050 */
|
||||||
__u8 icptstatus; /* 0x0051 */
|
__u8 icptstatus; /* 0x0051 */
|
||||||
__u16 ihcpu; /* 0x0052 */
|
__u16 ihcpu; /* 0x0052 */
|
||||||
|
|
|
@ -119,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
|
||||||
#define KVM_S390_VM_CPU_FEAT_CMMA 10
|
#define KVM_S390_VM_CPU_FEAT_CMMA 10
|
||||||
#define KVM_S390_VM_CPU_FEAT_PFMFI 11
|
#define KVM_S390_VM_CPU_FEAT_PFMFI 11
|
||||||
#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
|
#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
|
||||||
|
#define KVM_S390_VM_CPU_FEAT_KSS 13
|
||||||
struct kvm_s390_vm_cpu_feat {
|
struct kvm_s390_vm_cpu_feat {
|
||||||
__u64 feat[16];
|
__u64 feat[16];
|
||||||
};
|
};
|
||||||
|
|
|
@ -426,6 +426,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
|
||||||
case ICPT_PARTEXEC:
|
case ICPT_PARTEXEC:
|
||||||
rc = handle_partial_execution(vcpu);
|
rc = handle_partial_execution(vcpu);
|
||||||
break;
|
break;
|
||||||
|
case ICPT_KSS:
|
||||||
|
rc = kvm_s390_skey_check_enable(vcpu);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
|
@ -300,6 +300,8 @@ static void kvm_s390_cpu_feat_init(void)
|
||||||
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
|
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
|
||||||
if (sclp.has_ibs)
|
if (sclp.has_ibs)
|
||||||
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
|
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
|
||||||
|
if (sclp.has_kss)
|
||||||
|
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
|
||||||
/*
|
/*
|
||||||
* KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
|
* KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
|
||||||
* all skey handling functions read/set the skey from the PGSTE
|
* all skey handling functions read/set the skey from the PGSTE
|
||||||
|
@ -2034,7 +2036,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
|
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
|
||||||
| SDNXC;
|
| SDNXC;
|
||||||
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
|
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
|
||||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
|
||||||
|
if (sclp.has_kss)
|
||||||
|
atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
|
||||||
|
else
|
||||||
|
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||||
|
|
||||||
if (vcpu->kvm->arch.use_cmma) {
|
if (vcpu->kvm->arch.use_cmma) {
|
||||||
rc = kvm_s390_vcpu_setup_cmma(vcpu);
|
rc = kvm_s390_vcpu_setup_cmma(vcpu);
|
||||||
|
|
|
@ -254,6 +254,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
|
||||||
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
|
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
|
||||||
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
|
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
|
||||||
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
|
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
|
||||||
|
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
/* implemented in vsie.c */
|
/* implemented in vsie.c */
|
||||||
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
|
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -198,18 +198,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __skey_check_enable(struct kvm_vcpu *vcpu)
|
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
|
||||||
|
|
||||||
trace_kvm_s390_skey_related_inst(vcpu);
|
trace_kvm_s390_skey_related_inst(vcpu);
|
||||||
if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
|
if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
|
||||||
|
!(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = s390_enable_skey();
|
rc = s390_enable_skey();
|
||||||
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
|
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
|
||||||
if (!rc)
|
if (!rc) {
|
||||||
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
|
if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
|
||||||
|
atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
|
||||||
|
else
|
||||||
|
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
|
||||||
|
ICTL_RRBE);
|
||||||
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +225,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
vcpu->stat.instruction_storage_key++;
|
vcpu->stat.instruction_storage_key++;
|
||||||
rc = __skey_check_enable(vcpu);
|
rc = kvm_s390_skey_check_enable(vcpu);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
if (sclp.has_skey) {
|
if (sclp.has_skey) {
|
||||||
|
@ -916,7 +923,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
|
||||||
int rc = __skey_check_enable(vcpu);
|
int rc = kvm_s390_skey_check_enable(vcpu);
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -117,6 +117,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||||
newflags |= cpuflags & CPUSTAT_SM;
|
newflags |= cpuflags & CPUSTAT_SM;
|
||||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
|
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
|
||||||
newflags |= cpuflags & CPUSTAT_IBS;
|
newflags |= cpuflags & CPUSTAT_IBS;
|
||||||
|
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
|
||||||
|
newflags |= cpuflags & CPUSTAT_KSS;
|
||||||
|
|
||||||
atomic_set(&scb_s->cpuflags, newflags);
|
atomic_set(&scb_s->cpuflags, newflags);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -289,7 +291,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||||
* bits. Therefore we cannot provide interpretation and would later
|
* bits. Therefore we cannot provide interpretation and would later
|
||||||
* have to provide own emulation handlers.
|
* have to provide own emulation handlers.
|
||||||
*/
|
*/
|
||||||
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
|
||||||
|
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||||
|
|
||||||
scb_s->icpua = scb_o->icpua;
|
scb_s->icpua = scb_o->icpua;
|
||||||
|
|
||||||
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
|
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
|
||||||
|
|
Загрузка…
Ссылка в новой задаче