2017-11-24 17:00:33 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-03-25 20:47:29 +03:00
|
|
|
/*
|
2012-07-20 13:15:04 +04:00
|
|
|
* handling privileged instructions
|
2008-03-25 20:47:29 +03:00
|
|
|
*
|
2019-05-31 19:12:38 +03:00
|
|
|
* Copyright IBM Corp. 2008, 2020
|
2008-03-25 20:47:29 +03:00
|
|
|
*
|
|
|
|
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
|
|
|
* Christian Borntraeger <borntraeger@de.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kvm.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/gfp.h>
|
2008-03-25 20:47:29 +03:00
|
|
|
#include <linux/errno.h>
|
2013-03-25 20:22:55 +04:00
|
|
|
#include <linux/compat.h>
|
2017-02-04 02:16:44 +03:00
|
|
|
#include <linux/mm_types.h>
|
2020-06-09 07:32:42 +03:00
|
|
|
#include <linux/pgtable.h>
|
2017-02-04 02:16:44 +03:00
|
|
|
|
2013-03-05 16:14:46 +04:00
|
|
|
#include <asm/asm-offsets.h>
|
2013-07-26 17:04:01 +04:00
|
|
|
#include <asm/facility.h>
|
2008-03-25 20:47:29 +03:00
|
|
|
#include <asm/current.h>
|
|
|
|
#include <asm/debug.h>
|
|
|
|
#include <asm/ebcdic.h>
|
|
|
|
#include <asm/sysinfo.h>
|
2016-08-04 18:54:42 +03:00
|
|
|
#include <asm/page-states.h>
|
2016-03-08 13:49:57 +03:00
|
|
|
#include <asm/gmap.h>
|
2013-06-12 15:54:53 +04:00
|
|
|
#include <asm/io.h>
|
2012-12-20 18:32:09 +04:00
|
|
|
#include <asm/ptrace.h>
|
2016-05-10 10:50:21 +03:00
|
|
|
#include <asm/sclp.h>
|
2019-05-21 18:34:34 +03:00
|
|
|
#include <asm/ap.h>
|
2008-03-25 20:47:29 +03:00
|
|
|
#include "gaccess.h"
|
|
|
|
#include "kvm-s390.h"
|
2012-07-23 19:20:29 +04:00
|
|
|
#include "trace.h"
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2016-08-15 05:53:22 +03:00
|
|
|
static int handle_ri(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_ri++;
|
|
|
|
|
2016-08-15 05:53:22 +03:00
|
|
|
if (test_kvm_facility(vcpu->kvm, 64)) {
|
2017-02-09 19:15:41 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
|
2017-03-13 13:48:28 +03:00
|
|
|
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
|
2016-08-15 05:53:22 +03:00
|
|
|
kvm_s390_retry_instr(vcpu);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
|
|
|
|
return handle_ri(vcpu);
|
|
|
|
else
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2016-11-29 09:17:55 +03:00
|
|
|
static int handle_gs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_gs++;
|
|
|
|
|
2016-11-29 09:17:55 +03:00
|
|
|
if (test_kvm_facility(vcpu->kvm, 133)) {
|
|
|
|
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
|
|
|
|
preempt_disable();
|
|
|
|
__ctl_set_bit(2, 4);
|
|
|
|
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
|
|
|
|
restore_gs_cb(current->thread.gs_cb);
|
|
|
|
preempt_enable();
|
|
|
|
vcpu->arch.sie_block->ecb |= ECB_GS;
|
|
|
|
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
|
|
|
vcpu->arch.gs_enabled = 1;
|
|
|
|
kvm_s390_retry_instr(vcpu);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int code = vcpu->arch.sie_block->ipb & 0xff;
|
|
|
|
|
|
|
|
if (code == 0x49 || code == 0x4d)
|
|
|
|
return handle_gs(vcpu);
|
|
|
|
else
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2013-09-12 12:33:49 +04:00
|
|
|
/* Handle SCK (SET CLOCK) interception */
|
|
|
|
static int handle_set_clock(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-02-07 14:46:43 +03:00
|
|
|
struct kvm_s390_vm_tod_clock gtod = { 0 };
|
2015-05-12 10:49:14 +03:00
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2018-02-07 14:46:43 +03:00
|
|
|
u64 op2;
|
2013-09-12 12:33:49 +04:00
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_sck++;
|
|
|
|
|
2013-09-12 12:33:49 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2013-09-12 12:33:49 +04:00
|
|
|
if (op2 & 7) /* Operand must be on a doubleword boundary */
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2018-02-07 14:46:43 +03:00
|
|
|
rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
|
2014-01-01 19:50:11 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2013-09-12 12:33:49 +04:00
|
|
|
|
2018-02-07 14:46:43 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
|
|
|
|
kvm_s390_set_tod_clock(vcpu->kvm, >od);
|
2013-09-12 12:33:49 +04:00
|
|
|
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-25 20:47:29 +03:00
|
|
|
static int handle_set_prefix(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 operand2;
|
2014-01-01 19:47:12 +04:00
|
|
|
u32 address;
|
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
vcpu->stat.instruction_spx++;
|
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
/* must be word boundary */
|
2013-03-25 20:22:53 +04:00
|
|
|
if (operand2 & 3)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
/* get the value */
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
|
2014-01-01 19:47:12 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
|
|
|
|
|
|
|
address &= 0x7fffe000u;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the new value is valid memory. We only need to check the
|
|
|
|
* first page, since address is 8k aligned and memory pieces are always
|
|
|
|
* at least 1MB aligned and have at least a size of 1MB.
|
|
|
|
*/
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, address))
|
2013-03-25 20:22:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2012-01-11 14:19:32 +04:00
|
|
|
kvm_s390_set_prefix(vcpu, address);
|
2012-07-23 19:20:29 +04:00
|
|
|
trace_kvm_s390_handle_prefix(vcpu, 1, address);
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_store_prefix(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 operand2;
|
|
|
|
u32 address;
|
2014-01-01 19:52:47 +04:00
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stpx++;
|
2012-12-20 18:32:07 +04:00
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
/* must be word boundary */
|
2013-03-25 20:22:53 +04:00
|
|
|
if (operand2 & 3)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2014-05-13 18:58:30 +04:00
|
|
|
address = kvm_s390_get_prefix(vcpu);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
/* get the value */
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
|
2014-01-01 19:52:47 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
|
2012-07-23 19:20:29 +04:00
|
|
|
trace_kvm_s390_handle_prefix(vcpu, 0, address);
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2014-01-01 19:53:27 +04:00
|
|
|
u16 vcpu_id = vcpu->vcpu_id;
|
|
|
|
u64 ga;
|
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stap++;
|
2012-12-20 18:32:07 +04:00
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
ga = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2014-01-01 19:53:27 +04:00
|
|
|
if (ga & 1)
|
2013-03-25 20:22:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
|
2014-01-01 19:53:27 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
|
2014-01-01 19:53:27 +04:00
|
|
|
trace_kvm_s390_handle_stap(vcpu, ga);
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-25 00:12:56 +03:00
|
|
|
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
|
2014-01-14 21:11:14 +04:00
|
|
|
{
|
2018-02-15 18:33:47 +03:00
|
|
|
int rc;
|
2016-05-10 10:40:09 +03:00
|
|
|
|
|
|
|
trace_kvm_s390_skey_related_inst(vcpu);
|
2018-02-15 18:33:47 +03:00
|
|
|
/* Already enabled? */
|
2018-07-20 15:51:21 +03:00
|
|
|
if (vcpu->arch.skey_enabled)
|
2018-02-15 18:33:47 +03:00
|
|
|
return 0;
|
2014-01-14 21:11:14 +04:00
|
|
|
|
2014-10-23 14:09:17 +04:00
|
|
|
rc = s390_enable_skey();
|
2016-05-10 10:40:09 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
|
2018-02-15 18:33:47 +03:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
|
|
|
|
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
|
|
|
|
if (!vcpu->kvm->arch.use_skf)
|
2018-07-20 15:51:21 +03:00
|
|
|
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
2018-02-15 18:33:47 +03:00
|
|
|
else
|
2018-07-20 15:51:21 +03:00
|
|
|
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
|
|
|
|
vcpu->arch.skey_enabled = true;
|
2018-02-15 18:33:47 +03:00
|
|
|
return 0;
|
2014-01-14 21:11:14 +04:00
|
|
|
}
|
|
|
|
|
2016-05-10 10:50:21 +03:00
|
|
|
static int try_handle_skey(struct kvm_vcpu *vcpu)
|
2008-03-25 20:47:29 +03:00
|
|
|
{
|
2016-05-10 10:40:09 +03:00
|
|
|
int rc;
|
2014-01-14 21:11:14 +04:00
|
|
|
|
2017-02-25 00:12:56 +03:00
|
|
|
rc = kvm_s390_skey_check_enable(vcpu);
|
2014-10-23 14:09:17 +04:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2018-02-15 18:33:47 +03:00
|
|
|
if (vcpu->kvm->arch.use_skf) {
|
2016-05-10 10:50:21 +03:00
|
|
|
/* with storage-key facility, SIE interprets it for us */
|
|
|
|
kvm_s390_retry_instr(vcpu);
|
|
|
|
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2013-06-20 19:22:01 +04:00
|
|
|
|
2016-05-10 10:50:21 +03:00
|
|
|
static int handle_iske(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-07-18 15:40:22 +03:00
|
|
|
unsigned long gaddr, vmaddr;
|
2016-05-10 10:50:21 +03:00
|
|
|
unsigned char key;
|
|
|
|
int reg1, reg2;
|
2018-07-18 15:40:22 +03:00
|
|
|
bool unlocked;
|
2016-05-10 10:50:21 +03:00
|
|
|
int rc;
|
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_iske++;
|
|
|
|
|
2017-12-04 14:19:11 +03:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2016-05-10 10:50:21 +03:00
|
|
|
rc = try_handle_skey(vcpu);
|
|
|
|
if (rc)
|
|
|
|
return rc != -EAGAIN ? rc : 0;
|
|
|
|
|
|
|
|
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
|
|
|
|
2018-07-18 15:40:22 +03:00
|
|
|
gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
|
|
|
gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
|
|
|
|
gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
|
|
|
|
vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
|
|
|
|
if (kvm_is_error_hva(vmaddr))
|
2016-05-10 10:50:21 +03:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
2018-07-18 15:40:22 +03:00
|
|
|
retry:
|
|
|
|
unlocked = false;
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_lock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
rc = get_guest_storage_key(current->mm, vmaddr, &key);
|
|
|
|
|
|
|
|
if (rc) {
|
2020-08-12 04:39:01 +03:00
|
|
|
rc = fixup_user_fault(current->mm, vmaddr,
|
2018-07-18 15:40:22 +03:00
|
|
|
FAULT_FLAG_WRITE, &unlocked);
|
|
|
|
if (!rc) {
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-08-30 11:13:55 +03:00
|
|
|
if (rc == -EFAULT)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2016-05-10 10:50:21 +03:00
|
|
|
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
|
|
|
|
vcpu->run->s.regs.gprs[reg1] |= key;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_rrbe(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-07-18 15:40:22 +03:00
|
|
|
unsigned long vmaddr, gaddr;
|
2016-05-10 10:50:21 +03:00
|
|
|
int reg1, reg2;
|
2018-07-18 15:40:22 +03:00
|
|
|
bool unlocked;
|
2016-05-10 10:50:21 +03:00
|
|
|
int rc;
|
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_rrbe++;
|
|
|
|
|
2017-12-04 14:19:11 +03:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2016-05-10 10:50:21 +03:00
|
|
|
rc = try_handle_skey(vcpu);
|
|
|
|
if (rc)
|
|
|
|
return rc != -EAGAIN ? rc : 0;
|
|
|
|
|
|
|
|
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
|
|
|
|
2018-07-18 15:40:22 +03:00
|
|
|
gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
|
|
|
gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
|
|
|
|
gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
|
|
|
|
vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
|
|
|
|
if (kvm_is_error_hva(vmaddr))
|
2016-05-10 10:50:21 +03:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
2018-07-18 15:40:22 +03:00
|
|
|
retry:
|
|
|
|
unlocked = false;
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_lock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
rc = reset_guest_reference_bit(current->mm, vmaddr);
|
|
|
|
if (rc < 0) {
|
2020-08-12 04:39:01 +03:00
|
|
|
rc = fixup_user_fault(current->mm, vmaddr,
|
2018-07-18 15:40:22 +03:00
|
|
|
FAULT_FLAG_WRITE, &unlocked);
|
|
|
|
if (!rc) {
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-08-30 11:13:55 +03:00
|
|
|
if (rc == -EFAULT)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2016-05-10 10:50:21 +03:00
|
|
|
kvm_s390_set_psw_cc(vcpu, rc);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SSKE_NQ 0x8
|
|
|
|
#define SSKE_MR 0x4
|
|
|
|
#define SSKE_MC 0x2
|
|
|
|
#define SSKE_MB 0x1
|
|
|
|
static int handle_sske(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
|
|
|
|
unsigned long start, end;
|
|
|
|
unsigned char key, oldkey;
|
|
|
|
int reg1, reg2;
|
2018-07-18 15:40:22 +03:00
|
|
|
bool unlocked;
|
2016-05-10 10:50:21 +03:00
|
|
|
int rc;
|
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_sske++;
|
|
|
|
|
2017-12-04 14:19:11 +03:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2016-05-10 10:50:21 +03:00
|
|
|
rc = try_handle_skey(vcpu);
|
|
|
|
if (rc)
|
|
|
|
return rc != -EAGAIN ? rc : 0;
|
|
|
|
|
|
|
|
if (!test_kvm_facility(vcpu->kvm, 8))
|
|
|
|
m3 &= ~SSKE_MB;
|
|
|
|
if (!test_kvm_facility(vcpu->kvm, 10))
|
|
|
|
m3 &= ~(SSKE_MC | SSKE_MR);
|
|
|
|
if (!test_kvm_facility(vcpu->kvm, 14))
|
|
|
|
m3 &= ~SSKE_NQ;
|
|
|
|
|
|
|
|
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
|
|
|
|
|
|
|
key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
|
|
|
|
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
|
|
|
start = kvm_s390_logical_to_effective(vcpu, start);
|
|
|
|
if (m3 & SSKE_MB) {
|
|
|
|
/* start already designates an absolute address */
|
2017-07-05 08:37:14 +03:00
|
|
|
end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
|
2016-05-10 10:50:21 +03:00
|
|
|
} else {
|
|
|
|
start = kvm_s390_real_to_abs(vcpu, start);
|
|
|
|
end = start + PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (start != end) {
|
2018-07-18 15:40:22 +03:00
|
|
|
unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
|
|
|
|
unlocked = false;
|
2016-05-10 10:50:21 +03:00
|
|
|
|
2018-07-18 15:40:22 +03:00
|
|
|
if (kvm_is_error_hva(vmaddr))
|
2016-05-10 10:50:21 +03:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_lock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
|
2016-05-10 10:50:21 +03:00
|
|
|
m3 & SSKE_NQ, m3 & SSKE_MR,
|
|
|
|
m3 & SSKE_MC);
|
2018-07-18 15:40:22 +03:00
|
|
|
|
|
|
|
if (rc < 0) {
|
2020-08-12 04:39:01 +03:00
|
|
|
rc = fixup_user_fault(current->mm, vmaddr,
|
2018-07-18 15:40:22 +03:00
|
|
|
FAULT_FLAG_WRITE, &unlocked);
|
|
|
|
rc = !rc ? -EAGAIN : rc;
|
|
|
|
}
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
if (rc == -EFAULT)
|
2016-05-10 10:50:21 +03:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
2021-10-22 18:26:48 +03:00
|
|
|
if (rc == -EAGAIN)
|
|
|
|
continue;
|
2018-08-30 11:13:55 +03:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
start += PAGE_SIZE;
|
2017-01-02 10:51:02 +03:00
|
|
|
}
|
2016-05-10 10:50:21 +03:00
|
|
|
|
|
|
|
if (m3 & (SSKE_MC | SSKE_MR)) {
|
|
|
|
if (m3 & SSKE_MB) {
|
|
|
|
/* skey in reg1 is unpredictable */
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
|
|
|
} else {
|
|
|
|
kvm_s390_set_psw_cc(vcpu, rc);
|
|
|
|
vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
|
|
|
|
vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (m3 & SSKE_MB) {
|
2017-06-03 11:19:55 +03:00
|
|
|
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
|
2016-05-10 10:50:21 +03:00
|
|
|
vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
|
|
|
|
else
|
|
|
|
vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
|
|
|
|
end = kvm_s390_logical_to_effective(vcpu, end);
|
|
|
|
vcpu->run->s.regs.gprs[reg2] |= end;
|
|
|
|
}
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-10 17:33:28 +04:00
|
|
|
static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->stat.instruction_ipte_interlock++;
|
2017-06-03 11:56:07 +03:00
|
|
|
if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
|
2014-01-10 17:33:28 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
|
2015-11-04 15:47:58 +03:00
|
|
|
kvm_s390_retry_instr(vcpu);
|
2014-01-10 17:33:28 +04:00
|
|
|
VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-12 12:33:48 +04:00
|
|
|
static int handle_test_block(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
gpa_t addr;
|
|
|
|
int reg2;
|
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_tb++;
|
|
|
|
|
2013-09-12 12:33:48 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
|
|
|
kvm_s390_get_regs_rre(vcpu, NULL, ®2);
|
|
|
|
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
2014-03-07 15:14:23 +04:00
|
|
|
addr = kvm_s390_logical_to_effective(vcpu, addr);
|
2015-03-03 14:26:14 +03:00
|
|
|
if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
|
2014-03-07 15:14:23 +04:00
|
|
|
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
2013-09-12 12:33:48 +04:00
|
|
|
addr = kvm_s390_real_to_abs(vcpu, addr);
|
|
|
|
|
2014-01-01 19:53:49 +04:00
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, addr))
|
2013-09-12 12:33:48 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
/*
|
|
|
|
* We don't expect errors on modern systems, and do not care
|
|
|
|
* about storage keys (yet), so let's just clear the page.
|
|
|
|
*/
|
2014-01-01 19:53:49 +04:00
|
|
|
if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
|
2013-09-12 12:33:48 +04:00
|
|
|
return -EFAULT;
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 0);
|
|
|
|
vcpu->run->s.regs.gprs[0] = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-20 18:32:12 +04:00
|
|
|
static int handle_tpi(struct kvm_vcpu *vcpu)
|
2008-03-25 20:47:29 +03:00
|
|
|
{
|
2012-12-20 18:32:12 +04:00
|
|
|
struct kvm_s390_interrupt_info *inti;
|
2014-01-01 19:55:48 +04:00
|
|
|
unsigned long len;
|
|
|
|
u32 tpi_data[3];
|
2015-02-04 17:53:42 +03:00
|
|
|
int rc;
|
2013-03-05 16:14:46 +04:00
|
|
|
u64 addr;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2012-12-20 18:32:12 +04:00
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_tpi++;
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2013-03-25 20:22:53 +04:00
|
|
|
if (addr & 3)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2015-02-04 17:53:42 +03:00
|
|
|
|
2013-10-09 16:15:54 +04:00
|
|
|
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
|
2015-02-04 17:53:42 +03:00
|
|
|
if (!inti) {
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-01 19:55:48 +04:00
|
|
|
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
|
|
|
|
tpi_data[1] = inti->io.io_int_parm;
|
|
|
|
tpi_data[2] = inti->io.io_int_word;
|
2013-03-05 16:14:46 +04:00
|
|
|
if (addr) {
|
|
|
|
/*
|
|
|
|
* Store the two-word I/O interruption code into the
|
|
|
|
* provided area.
|
|
|
|
*/
|
2014-01-01 19:55:48 +04:00
|
|
|
len = sizeof(tpi_data) - 4;
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = write_guest(vcpu, addr, ar, &tpi_data, len);
|
2015-02-04 17:53:42 +03:00
|
|
|
if (rc) {
|
|
|
|
rc = kvm_s390_inject_prog_cond(vcpu, rc);
|
|
|
|
goto reinject_interrupt;
|
|
|
|
}
|
2013-03-05 16:14:46 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Store the three-word I/O interruption code into
|
|
|
|
* the appropriate lowcore area.
|
|
|
|
*/
|
2014-01-01 19:55:48 +04:00
|
|
|
len = sizeof(tpi_data);
|
2015-02-04 17:53:42 +03:00
|
|
|
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
|
|
|
|
/* failed writes to the low core are not recoverable */
|
2014-01-01 19:55:48 +04:00
|
|
|
rc = -EFAULT;
|
2015-02-04 17:53:42 +03:00
|
|
|
goto reinject_interrupt;
|
|
|
|
}
|
2013-03-05 16:14:46 +04:00
|
|
|
}
|
2015-02-04 17:53:42 +03:00
|
|
|
|
|
|
|
/* irq was successfully handed to the guest */
|
|
|
|
kfree(inti);
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 1);
|
|
|
|
return 0;
|
|
|
|
reinject_interrupt:
|
2014-01-08 21:07:54 +04:00
|
|
|
/*
|
|
|
|
* If we encounter a problem storing the interruption code, the
|
|
|
|
* instruction is suppressed from the guest's view: reinject the
|
|
|
|
* interrupt.
|
|
|
|
*/
|
2015-02-04 17:59:11 +03:00
|
|
|
if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
|
|
|
|
kfree(inti);
|
|
|
|
rc = -EFAULT;
|
|
|
|
}
|
2015-02-04 17:53:42 +03:00
|
|
|
/* don't set the cc, a pgm irq was injected or we drop to user space */
|
2014-01-01 19:55:48 +04:00
|
|
|
return rc ? -EFAULT : 0;
|
2008-03-25 20:47:29 +03:00
|
|
|
}
|
|
|
|
|
2012-12-20 18:32:12 +04:00
|
|
|
static int handle_tsch(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2013-07-03 17:18:35 +04:00
|
|
|
struct kvm_s390_interrupt_info *inti = NULL;
|
|
|
|
const u64 isc_mask = 0xffUL << 24; /* all iscs set */
|
2012-12-20 18:32:12 +04:00
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_tsch++;
|
|
|
|
|
2013-07-03 17:18:35 +04:00
|
|
|
/* a valid schid has at least one bit set */
|
|
|
|
if (vcpu->run->s.regs.gprs[1])
|
|
|
|
inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
|
|
|
|
vcpu->run->s.regs.gprs[1]);
|
2012-12-20 18:32:12 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare exit to userspace.
|
|
|
|
* We indicate whether we dequeued a pending I/O interrupt
|
|
|
|
* so that userspace can re-inject it if the instruction gets
|
|
|
|
* a program check. While this may re-order the pending I/O
|
|
|
|
* interrupts, this is no problem since the priority is kept
|
|
|
|
* intact.
|
|
|
|
*/
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
|
|
|
|
vcpu->run->s390_tsch.dequeued = !!inti;
|
|
|
|
if (inti) {
|
|
|
|
vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
|
|
|
|
vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
|
|
|
|
vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
|
|
|
|
vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
|
|
|
|
}
|
|
|
|
vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
|
|
|
|
kfree(inti);
|
|
|
|
return -EREMOTE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_io_inst(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
|
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2012-12-20 18:32:12 +04:00
|
|
|
if (vcpu->kvm->arch.css_support) {
|
|
|
|
/*
|
|
|
|
* Most I/O instructions will be handled by userspace.
|
|
|
|
* Exceptions are tpi and the interrupt portion of tsch.
|
|
|
|
*/
|
|
|
|
if (vcpu->arch.sie_block->ipa == 0xb236)
|
|
|
|
return handle_tpi(vcpu);
|
|
|
|
if (vcpu->arch.sie_block->ipa == 0xb235)
|
|
|
|
return handle_tsch(vcpu);
|
|
|
|
/* Handle in userspace. */
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_io_other++;
|
2012-12-20 18:32:12 +04:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
} else {
|
|
|
|
/*
|
2013-12-13 15:53:42 +04:00
|
|
|
* Set condition code 3 to stop the guest from issuing channel
|
2012-12-20 18:32:12 +04:00
|
|
|
* I/O instructions.
|
|
|
|
*/
|
2013-07-26 17:04:06 +04:00
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
2012-12-20 18:32:12 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-21 18:34:34 +03:00
|
|
|
/*
|
|
|
|
* handle_pqap: Handling pqap interception
|
|
|
|
* @vcpu: the vcpu having issue the pqap instruction
|
|
|
|
*
|
|
|
|
* We now support PQAP/AQIC instructions and we need to correctly
|
|
|
|
* answer the guest even if no dedicated driver's hook is available.
|
|
|
|
*
|
|
|
|
* The intercepting code calls a dedicated callback for this instruction
|
|
|
|
* if a driver did register one in the CRYPTO satellite of the
|
|
|
|
* SIE block.
|
|
|
|
*
|
|
|
|
* If no callback is available, the queues are not available, return this
|
|
|
|
* response code to the caller and set CC to 3.
|
|
|
|
* Else return the response code returned by the callback.
|
|
|
|
*/
|
|
|
|
static int handle_pqap(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct ap_queue_status status = {};
|
2021-08-24 00:20:46 +03:00
|
|
|
crypto_hook pqap_hook;
|
2019-05-21 18:34:34 +03:00
|
|
|
unsigned long reg0;
|
|
|
|
int ret;
|
|
|
|
uint8_t fc;
|
|
|
|
|
|
|
|
/* Verify that the AP instruction are available */
|
|
|
|
if (!ap_instructions_available())
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
/* Verify that the guest is allowed to use AP instructions */
|
|
|
|
if (!(vcpu->arch.sie_block->eca & ECA_APIE))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
/*
|
|
|
|
* The only possibly intercepted functions when AP instructions are
|
|
|
|
* available for the guest are AQIC and TAPQ with the t bit set
|
|
|
|
* since we do not set IC.3 (FIII) we currently will only intercept
|
|
|
|
* the AQIC function code.
|
2020-05-05 10:27:15 +03:00
|
|
|
* Note: running nested under z/VM can result in intercepts for other
|
|
|
|
* function codes, e.g. PQAP(QCI). We do not support this and bail out.
|
2019-05-21 18:34:34 +03:00
|
|
|
*/
|
|
|
|
reg0 = vcpu->run->s.regs.gprs[0];
|
|
|
|
fc = (reg0 >> 24) & 0xff;
|
2020-05-05 10:27:15 +03:00
|
|
|
if (fc != 0x03)
|
2019-05-21 18:34:34 +03:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/* PQAP instruction is allowed for guest kernel only */
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
|
|
|
/* Common PQAP instruction specification exceptions */
|
|
|
|
/* bits 41-47 must all be zeros */
|
|
|
|
if (reg0 & 0x007f0000UL)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
/* APFT not install and T bit set */
|
|
|
|
if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
/* APXA not installed and APID greater 64 or APQI greater 16 */
|
|
|
|
if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
|
|
|
/* AQIC function code specific exception */
|
|
|
|
/* facility 65 not present for AQIC function code */
|
|
|
|
if (!test_kvm_facility(vcpu->kvm, 65))
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
|
|
|
/*
|
2021-08-24 00:20:46 +03:00
|
|
|
* If the hook callback is registered, there will be a pointer to the
|
|
|
|
* hook function pointer in the kvm_s390_crypto structure. Lock the
|
|
|
|
* owner, retrieve the hook function pointer and call the hook.
|
2019-05-21 18:34:34 +03:00
|
|
|
*/
|
2021-08-24 00:20:46 +03:00
|
|
|
down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
|
2019-05-21 18:34:34 +03:00
|
|
|
if (vcpu->kvm->arch.crypto.pqap_hook) {
|
2021-08-24 00:20:46 +03:00
|
|
|
pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
|
|
|
|
ret = pqap_hook(vcpu);
|
2019-05-21 18:34:34 +03:00
|
|
|
if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
2021-08-24 00:20:46 +03:00
|
|
|
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
|
2019-05-21 18:34:34 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2021-08-24 00:20:46 +03:00
|
|
|
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
|
2019-05-21 18:34:34 +03:00
|
|
|
/*
|
|
|
|
* A vfio_driver must register a hook.
|
|
|
|
* No hook means no driver to enable the SIE CRYCB and no queues.
|
|
|
|
* We send this response to the guest.
|
|
|
|
*/
|
|
|
|
status.response_code = 0x01;
|
|
|
|
memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-25 20:47:29 +03:00
|
|
|
static int handle_stfl(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int rc;
|
2015-02-02 17:42:51 +03:00
|
|
|
unsigned int fac;
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stfl++;
|
2013-06-20 19:22:01 +04:00
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-02-02 17:42:51 +03:00
|
|
|
/*
|
|
|
|
* We need to shift the lower 32 facility bits (bit 0-31) from a u64
|
|
|
|
* into a u32 memory representation. They will remain bits 0-31.
|
|
|
|
*/
|
2015-12-02 10:53:52 +03:00
|
|
|
fac = *vcpu->kvm->arch.model.fac_list >> 32;
|
2015-12-31 12:29:00 +03:00
|
|
|
rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
|
2015-02-02 17:42:51 +03:00
|
|
|
&fac, sizeof(fac));
|
2013-03-05 16:14:43 +04:00
|
|
|
if (rc)
|
2014-01-01 19:56:41 +04:00
|
|
|
return rc;
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
|
2015-02-02 17:42:51 +03:00
|
|
|
trace_kvm_s390_handle_stfl(vcpu, fac);
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-20 18:32:09 +04:00
|
|
|
#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
|
|
|
|
#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
|
2013-03-25 20:22:49 +04:00
|
|
|
#define PSW_ADDR_24 0x0000000000ffffffUL
|
2012-12-20 18:32:09 +04:00
|
|
|
#define PSW_ADDR_31 0x000000007fffffffUL
|
|
|
|
|
2014-04-17 11:10:40 +04:00
|
|
|
int is_valid_psw(psw_t *psw)
|
|
|
|
{
|
2013-03-25 20:22:52 +04:00
|
|
|
if (psw->mask & PSW_MASK_UNASSIGNED)
|
|
|
|
return 0;
|
|
|
|
if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
|
|
|
|
if (psw->addr & ~PSW_ADDR_31)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
|
|
|
|
return 0;
|
|
|
|
if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
|
|
|
|
return 0;
|
2014-04-17 11:10:40 +04:00
|
|
|
if (psw->addr & 1)
|
|
|
|
return 0;
|
2013-03-25 20:22:52 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-12-20 18:32:09 +04:00
|
|
|
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2013-03-25 20:22:52 +04:00
|
|
|
psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
|
2012-12-20 18:32:09 +04:00
|
|
|
psw_compat_t new_psw;
|
2013-03-25 20:22:52 +04:00
|
|
|
u64 addr;
|
2014-01-01 19:57:42 +04:00
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2012-12-20 18:32:09 +04:00
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_lpsw++;
|
|
|
|
|
2013-03-25 20:22:52 +04:00
|
|
|
if (gpsw->mask & PSW_MASK_PSTATE)
|
2013-06-20 19:21:59 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2013-03-25 20:22:51 +04:00
|
|
|
if (addr & 7)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2014-01-01 19:57:42 +04:00
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
|
2014-01-01 19:57:42 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2013-03-25 20:22:51 +04:00
|
|
|
if (!(new_psw.mask & PSW32_MASK_BASE))
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2013-03-25 20:22:52 +04:00
|
|
|
gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
|
|
|
|
gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
|
|
|
|
gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
|
|
|
|
if (!is_valid_psw(gpsw))
|
2013-03-25 20:22:51 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2012-12-20 18:32:09 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_lpswe(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
psw_t new_psw;
|
2013-03-25 20:22:52 +04:00
|
|
|
u64 addr;
|
2014-01-01 19:57:42 +04:00
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2012-12-20 18:32:09 +04:00
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_lpswe++;
|
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2013-03-25 20:22:51 +04:00
|
|
|
if (addr & 7)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
|
2014-01-01 19:57:42 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2013-03-25 20:22:52 +04:00
|
|
|
vcpu->arch.sie_block->gpsw = new_psw;
|
|
|
|
if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
|
2013-03-25 20:22:51 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2012-12-20 18:32:09 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-25 20:47:29 +03:00
|
|
|
static int handle_stidp(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-04-04 15:27:51 +03:00
|
|
|
u64 stidp_data = vcpu->kvm->arch.model.cpuid;
|
2008-03-25 20:47:29 +03:00
|
|
|
u64 operand2;
|
2014-01-01 19:58:16 +04:00
|
|
|
int rc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stidp++;
|
2012-12-20 18:32:07 +04:00
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2013-03-25 20:22:53 +04:00
|
|
|
if (operand2 & 7)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
|
2014-01-01 19:58:16 +04:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
|
|
|
{
|
|
|
|
int cpus = 0;
|
|
|
|
int n;
|
|
|
|
|
2014-02-24 13:11:41 +04:00
|
|
|
cpus = atomic_read(&vcpu->kvm->online_vcpus);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
/* deal with other level 3 hypervisors */
|
2012-09-06 16:42:13 +04:00
|
|
|
if (stsi(mem, 3, 2, 2))
|
2008-03-25 20:47:29 +03:00
|
|
|
mem->count = 0;
|
|
|
|
if (mem->count < 8)
|
|
|
|
mem->count++;
|
|
|
|
for (n = mem->count - 1; n > 0 ; n--)
|
|
|
|
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
|
|
|
|
|
2015-03-03 11:54:41 +03:00
|
|
|
memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
|
2008-03-25 20:47:29 +03:00
|
|
|
mem->vm[0].cpus_total = cpus;
|
|
|
|
mem->vm[0].cpus_configured = cpus;
|
|
|
|
mem->vm[0].cpus_standby = 0;
|
|
|
|
mem->vm[0].cpus_reserved = 0;
|
|
|
|
mem->vm[0].caf = 1000;
|
|
|
|
memcpy(mem->vm[0].name, "KVMguest", 8);
|
|
|
|
ASCEBC(mem->vm[0].name, 8);
|
|
|
|
memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
|
|
|
|
ASCEBC(mem->vm[0].cpi, 16);
|
|
|
|
}
|
|
|
|
|
2016-12-09 14:44:40 +03:00
|
|
|
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
|
2015-01-30 18:55:56 +03:00
|
|
|
u8 fc, u8 sel1, u16 sel2)
|
|
|
|
{
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
|
|
|
|
vcpu->run->s390_stsi.addr = addr;
|
|
|
|
vcpu->run->s390_stsi.ar = ar;
|
|
|
|
vcpu->run->s390_stsi.fc = fc;
|
|
|
|
vcpu->run->s390_stsi.sel1 = sel1;
|
|
|
|
vcpu->run->s390_stsi.sel2 = sel2;
|
|
|
|
}
|
|
|
|
|
2008-03-25 20:47:29 +03:00
|
|
|
static int handle_stsi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2012-01-11 14:20:32 +04:00
|
|
|
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
|
|
|
|
int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
|
|
|
|
int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
|
2013-03-25 20:22:54 +04:00
|
|
|
unsigned long mem = 0;
|
2008-03-25 20:47:29 +03:00
|
|
|
u64 operand2;
|
2013-03-25 20:22:53 +04:00
|
|
|
int rc = 0;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2008-03-25 20:47:29 +03:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stsi++;
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2013-06-20 19:22:01 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2013-06-20 19:22:05 +04:00
|
|
|
if (fc > 3) {
|
2013-07-26 17:04:06 +04:00
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
2013-06-20 19:22:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2008-03-25 20:47:29 +03:00
|
|
|
|
2013-06-20 19:22:05 +04:00
|
|
|
if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
|
|
|
|
|| vcpu->run->s.regs.gprs[1] & 0xffff0000)
|
2008-03-25 20:47:29 +03:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2013-06-20 19:22:05 +04:00
|
|
|
if (fc == 0) {
|
2012-01-11 14:20:32 +04:00
|
|
|
vcpu->run->s.regs.gprs[0] = 3 << 28;
|
2013-07-26 17:04:06 +04:00
|
|
|
kvm_s390_set_psw_cc(vcpu, 0);
|
2008-03-25 20:47:29 +03:00
|
|
|
return 0;
|
2013-06-20 19:22:05 +04:00
|
|
|
}
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
2013-06-20 19:22:05 +04:00
|
|
|
|
2019-05-31 19:12:38 +03:00
|
|
|
if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
|
2013-06-20 19:22:05 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
|
|
|
switch (fc) {
|
2008-03-25 20:47:29 +03:00
|
|
|
case 1: /* same handling for 1 and 2 */
|
|
|
|
case 2:
|
2020-11-06 10:34:23 +03:00
|
|
|
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
2008-03-25 20:47:29 +03:00
|
|
|
if (!mem)
|
2013-03-25 20:22:54 +04:00
|
|
|
goto out_no_data;
|
2012-09-06 16:42:13 +04:00
|
|
|
if (stsi((void *) mem, fc, sel1, sel2))
|
2013-03-25 20:22:54 +04:00
|
|
|
goto out_no_data;
|
2008-03-25 20:47:29 +03:00
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if (sel1 != 2 || sel2 != 2)
|
2013-03-25 20:22:54 +04:00
|
|
|
goto out_no_data;
|
2020-11-06 10:34:23 +03:00
|
|
|
mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
2008-03-25 20:47:29 +03:00
|
|
|
if (!mem)
|
2013-03-25 20:22:54 +04:00
|
|
|
goto out_no_data;
|
2008-03-25 20:47:29 +03:00
|
|
|
handle_stsi_3_2_2(vcpu, (void *) mem);
|
|
|
|
break;
|
|
|
|
}
|
2019-05-31 19:12:38 +03:00
|
|
|
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
|
|
|
|
memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
|
|
|
|
PAGE_SIZE);
|
|
|
|
rc = 0;
|
|
|
|
} else {
|
|
|
|
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
|
|
|
|
}
|
2014-01-01 19:58:59 +04:00
|
|
|
if (rc) {
|
|
|
|
rc = kvm_s390_inject_prog_cond(vcpu, rc);
|
|
|
|
goto out;
|
2008-03-25 20:47:29 +03:00
|
|
|
}
|
2015-01-30 18:55:56 +03:00
|
|
|
if (vcpu->kvm->arch.user_stsi) {
|
|
|
|
insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
|
|
|
|
rc = -EREMOTE;
|
|
|
|
}
|
2012-07-23 19:20:29 +04:00
|
|
|
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
|
2008-03-25 20:47:29 +03:00
|
|
|
free_page(mem);
|
2013-07-26 17:04:06 +04:00
|
|
|
kvm_s390_set_psw_cc(vcpu, 0);
|
2012-01-11 14:20:32 +04:00
|
|
|
vcpu->run->s.regs.gprs[0] = 0;
|
2015-01-30 18:55:56 +03:00
|
|
|
return rc;
|
2013-03-25 20:22:54 +04:00
|
|
|
out_no_data:
|
2013-07-26 17:04:06 +04:00
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
2014-01-01 19:58:59 +04:00
|
|
|
out:
|
2013-03-25 20:22:54 +04:00
|
|
|
free_page(mem);
|
2013-03-25 20:22:53 +04:00
|
|
|
return rc;
|
2008-03-25 20:47:29 +03:00
|
|
|
}
|
|
|
|
|
2009-01-22 12:28:29 +03:00
|
|
|
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
|
2008-03-25 20:47:29 +03:00
|
|
|
{
|
2016-04-08 18:52:39 +03:00
|
|
|
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
|
|
|
|
case 0x02:
|
|
|
|
return handle_stidp(vcpu);
|
|
|
|
case 0x04:
|
|
|
|
return handle_set_clock(vcpu);
|
|
|
|
case 0x10:
|
|
|
|
return handle_set_prefix(vcpu);
|
|
|
|
case 0x11:
|
|
|
|
return handle_store_prefix(vcpu);
|
|
|
|
case 0x12:
|
|
|
|
return handle_store_cpu_address(vcpu);
|
|
|
|
case 0x14:
|
|
|
|
return kvm_s390_handle_vsie(vcpu);
|
|
|
|
case 0x21:
|
|
|
|
case 0x50:
|
|
|
|
return handle_ipte_interlock(vcpu);
|
|
|
|
case 0x29:
|
|
|
|
return handle_iske(vcpu);
|
|
|
|
case 0x2a:
|
|
|
|
return handle_rrbe(vcpu);
|
|
|
|
case 0x2b:
|
|
|
|
return handle_sske(vcpu);
|
|
|
|
case 0x2c:
|
|
|
|
return handle_test_block(vcpu);
|
|
|
|
case 0x30:
|
|
|
|
case 0x31:
|
|
|
|
case 0x32:
|
|
|
|
case 0x33:
|
|
|
|
case 0x34:
|
|
|
|
case 0x35:
|
|
|
|
case 0x36:
|
|
|
|
case 0x37:
|
|
|
|
case 0x38:
|
|
|
|
case 0x39:
|
|
|
|
case 0x3a:
|
|
|
|
case 0x3b:
|
|
|
|
case 0x3c:
|
|
|
|
case 0x5f:
|
|
|
|
case 0x74:
|
|
|
|
case 0x76:
|
|
|
|
return handle_io_inst(vcpu);
|
|
|
|
case 0x56:
|
|
|
|
return handle_sthyi(vcpu);
|
|
|
|
case 0x7d:
|
|
|
|
return handle_stsi(vcpu);
|
2019-05-21 18:34:34 +03:00
|
|
|
case 0xaf:
|
|
|
|
return handle_pqap(vcpu);
|
2016-04-08 18:52:39 +03:00
|
|
|
case 0xb1:
|
|
|
|
return handle_stfl(vcpu);
|
|
|
|
case 0xb2:
|
|
|
|
return handle_lpswe(vcpu);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2008-03-25 20:47:29 +03:00
|
|
|
}
|
2011-07-24 12:48:17 +04:00
|
|
|
|
2012-12-20 18:32:09 +04:00
|
|
|
static int handle_epsw(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int reg1, reg2;
|
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_epsw++;
|
|
|
|
|
2013-06-12 15:54:57 +04:00
|
|
|
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
2012-12-20 18:32:09 +04:00
|
|
|
|
|
|
|
/* This basically extracts the mask half of the psw. */
|
2013-07-26 17:04:05 +04:00
|
|
|
vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
|
2012-12-20 18:32:09 +04:00
|
|
|
vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
|
|
|
|
if (reg2) {
|
2013-07-26 17:04:05 +04:00
|
|
|
vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
|
2012-12-20 18:32:09 +04:00
|
|
|
vcpu->run->s.regs.gprs[reg2] |=
|
2013-07-26 17:04:05 +04:00
|
|
|
vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
|
2012-12-20 18:32:09 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-12 15:54:53 +04:00
|
|
|
#define PFMF_RESERVED 0xfffc0101UL
|
|
|
|
#define PFMF_SK 0x00020000UL
|
|
|
|
#define PFMF_CF 0x00010000UL
|
|
|
|
#define PFMF_UI 0x00008000UL
|
|
|
|
#define PFMF_FSC 0x00007000UL
|
|
|
|
#define PFMF_NQ 0x00000800UL
|
|
|
|
#define PFMF_MR 0x00000400UL
|
|
|
|
#define PFMF_MC 0x00000200UL
|
|
|
|
#define PFMF_KEY 0x000000feUL
|
|
|
|
|
|
|
|
static int handle_pfmf(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-05-10 10:43:11 +03:00
|
|
|
bool mr = false, mc = false, nq;
|
2013-06-12 15:54:53 +04:00
|
|
|
int reg1, reg2;
|
|
|
|
unsigned long start, end;
|
2016-05-10 10:43:11 +03:00
|
|
|
unsigned char key;
|
2013-06-12 15:54:53 +04:00
|
|
|
|
|
|
|
vcpu->stat.instruction_pfmf++;
|
|
|
|
|
|
|
|
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
|
|
|
|
2015-11-13 15:31:58 +03:00
|
|
|
if (!test_kvm_facility(vcpu->kvm, 8))
|
2013-06-12 15:54:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
2013-06-20 19:21:59 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
2013-06-12 15:54:53 +04:00
|
|
|
|
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2016-03-04 13:08:09 +03:00
|
|
|
/* Only provide non-quiescing support if enabled for the guest */
|
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
|
|
|
|
!test_kvm_facility(vcpu->kvm, 14))
|
2013-06-12 15:54:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2016-05-10 10:43:11 +03:00
|
|
|
/* Only provide conditional-SSKE support if enabled for the guest */
|
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
|
|
|
|
test_kvm_facility(vcpu->kvm, 10)) {
|
|
|
|
mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
|
|
|
|
mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
|
|
|
|
}
|
|
|
|
|
|
|
|
nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
|
|
|
|
key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
|
2013-06-12 15:54:53 +04:00
|
|
|
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
2014-11-10 17:59:32 +03:00
|
|
|
start = kvm_s390_logical_to_effective(vcpu, start);
|
2013-09-09 19:58:38 +04:00
|
|
|
|
2016-04-13 11:09:47 +03:00
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
|
|
|
|
if (kvm_s390_check_low_addr_prot_real(vcpu, start))
|
|
|
|
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
|
|
|
}
|
|
|
|
|
2013-06-12 15:54:53 +04:00
|
|
|
switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
|
|
|
|
case 0x00000000:
|
2016-04-13 11:09:47 +03:00
|
|
|
/* only 4k frames specify a real address */
|
|
|
|
start = kvm_s390_real_to_abs(vcpu, start);
|
2017-07-05 08:37:14 +03:00
|
|
|
end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
|
2013-06-12 15:54:53 +04:00
|
|
|
break;
|
|
|
|
case 0x00001000:
|
2017-07-05 08:37:14 +03:00
|
|
|
end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
|
2013-06-12 15:54:53 +04:00
|
|
|
break;
|
|
|
|
case 0x00002000:
|
2015-02-18 13:13:03 +03:00
|
|
|
/* only support 2G frame size if EDAT2 is available and we are
|
|
|
|
not in 24-bit addressing mode */
|
|
|
|
if (!test_kvm_facility(vcpu->kvm, 78) ||
|
2017-06-03 11:19:55 +03:00
|
|
|
psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
|
2015-02-18 13:13:03 +03:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
2017-07-05 08:37:14 +03:00
|
|
|
end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
|
2015-02-18 13:13:03 +03:00
|
|
|
break;
|
2013-06-12 15:54:53 +04:00
|
|
|
default:
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
}
|
2014-11-10 17:59:32 +03:00
|
|
|
|
2016-05-12 15:07:05 +03:00
|
|
|
while (start != end) {
|
2018-07-18 15:40:22 +03:00
|
|
|
unsigned long vmaddr;
|
|
|
|
bool unlocked = false;
|
2013-09-09 19:58:38 +04:00
|
|
|
|
|
|
|
/* Translate guest address to host address */
|
2018-07-18 15:40:22 +03:00
|
|
|
vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
|
|
|
|
if (kvm_is_error_hva(vmaddr))
|
2013-06-12 15:54:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
|
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
|
2018-07-04 17:50:01 +03:00
|
|
|
if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
|
2013-06-12 15:54:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
|
2017-02-25 00:12:56 +03:00
|
|
|
int rc = kvm_s390_skey_check_enable(vcpu);
|
2014-10-23 14:09:17 +04:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_lock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
rc = cond_set_guest_storage_key(current->mm, vmaddr,
|
2016-05-10 10:43:11 +03:00
|
|
|
key, NULL, nq, mr, mc);
|
2018-07-18 15:40:22 +03:00
|
|
|
if (rc < 0) {
|
2020-08-12 04:39:01 +03:00
|
|
|
rc = fixup_user_fault(current->mm, vmaddr,
|
2018-07-18 15:40:22 +03:00
|
|
|
FAULT_FLAG_WRITE, &unlocked);
|
|
|
|
rc = !rc ? -EAGAIN : rc;
|
|
|
|
}
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(current->mm);
|
2018-07-18 15:40:22 +03:00
|
|
|
if (rc == -EFAULT)
|
2013-06-12 15:54:53 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
2018-08-30 11:13:55 +03:00
|
|
|
if (rc == -EAGAIN)
|
|
|
|
continue;
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2018-07-18 15:40:22 +03:00
|
|
|
}
|
2018-08-30 11:13:55 +03:00
|
|
|
start += PAGE_SIZE;
|
2013-06-12 15:54:53 +04:00
|
|
|
}
|
2016-04-13 16:47:21 +03:00
|
|
|
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
|
2017-06-03 11:19:55 +03:00
|
|
|
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
|
2016-04-13 16:47:21 +03:00
|
|
|
vcpu->run->s.regs.gprs[reg2] = end;
|
|
|
|
} else {
|
|
|
|
vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
|
|
|
|
end = kvm_s390_logical_to_effective(vcpu, end);
|
|
|
|
vcpu->run->s.regs.gprs[reg2] |= end;
|
|
|
|
}
|
|
|
|
}
|
2013-06-12 15:54:53 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-30 19:33:25 +03:00
|
|
|
/*
|
2020-06-09 07:33:54 +03:00
|
|
|
* Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
|
2018-04-30 19:33:25 +03:00
|
|
|
*/
|
|
|
|
static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
|
2016-08-04 18:54:42 +03:00
|
|
|
{
|
|
|
|
int r1, r2, nappended, entries;
|
|
|
|
unsigned long gfn, hva, res, pgstev, ptev;
|
|
|
|
unsigned long *cbrlo;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't need to set SD.FPF.SK to 1 here, because if we have a
|
|
|
|
* machine check here we either handle it or crash
|
|
|
|
*/
|
|
|
|
|
|
|
|
kvm_s390_get_regs_rre(vcpu, &r1, &r2);
|
|
|
|
gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
|
|
|
|
hva = gfn_to_hva(vcpu->kvm, gfn);
|
|
|
|
entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
|
|
|
|
|
|
|
|
if (kvm_is_error_hva(hva))
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
|
|
|
|
nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
|
|
|
|
if (nappended < 0) {
|
|
|
|
res = orc ? 0x10 : 0;
|
|
|
|
vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
|
|
|
|
/*
|
|
|
|
* Set the block-content state part of the result. 0 means resident, so
|
|
|
|
* nothing to do if the page is valid. 2 is for preserved pages
|
|
|
|
* (non-present and non-zero), and 3 for zero pages (non-present and
|
|
|
|
* zero).
|
|
|
|
*/
|
|
|
|
if (ptev & _PAGE_INVALID) {
|
|
|
|
res |= 2;
|
|
|
|
if (pgstev & _PGSTE_GPS_ZERO)
|
|
|
|
res |= 1;
|
|
|
|
}
|
2016-08-29 16:56:55 +03:00
|
|
|
if (pgstev & _PGSTE_GPS_NODAT)
|
|
|
|
res |= 0x20;
|
2016-08-04 18:54:42 +03:00
|
|
|
vcpu->run->s.regs.gprs[r1] = res;
|
|
|
|
/*
|
|
|
|
* It is possible that all the normal 511 slots were full, in which case
|
|
|
|
* we will now write in the 512th slot, which is reserved for host use.
|
|
|
|
* In both cases we let the normal essa handling code process all the
|
|
|
|
* slots, including the reserved one, if needed.
|
|
|
|
*/
|
|
|
|
if (nappended > 0) {
|
|
|
|
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
|
|
|
|
cbrlo[entries] = gfn << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
2018-04-30 19:33:25 +03:00
|
|
|
if (orc) {
|
|
|
|
struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
|
|
|
|
|
|
|
|
/* Increment only if we are really flipping the bit */
|
|
|
|
if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
|
|
|
|
atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
|
2016-08-04 18:54:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nappended;
|
|
|
|
}
|
|
|
|
|
2013-04-17 19:36:29 +04:00
|
|
|
static int handle_essa(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
/* entries expected to be 1FF */
|
|
|
|
int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
|
2016-04-12 14:32:25 +03:00
|
|
|
unsigned long *cbrlo;
|
2013-04-17 19:36:29 +04:00
|
|
|
struct gmap *gmap;
|
2016-08-04 18:54:42 +03:00
|
|
|
int i, orc;
|
2013-04-17 19:36:29 +04:00
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
|
2013-04-17 19:36:29 +04:00
|
|
|
gmap = vcpu->arch.gmap;
|
|
|
|
vcpu->stat.instruction_essa++;
|
2015-05-07 16:41:57 +03:00
|
|
|
if (!vcpu->kvm->arch.use_cmma)
|
2013-04-17 19:36:29 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
2016-08-04 18:54:42 +03:00
|
|
|
/* Check for invalid operation request code */
|
|
|
|
orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
|
2016-08-29 16:56:55 +03:00
|
|
|
/* ORCs 0-6 are always valid */
|
|
|
|
if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
|
|
|
|
: ESSA_SET_STABLE_IF_RESIDENT))
|
2013-04-17 19:36:29 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2018-04-30 19:33:25 +03:00
|
|
|
if (!vcpu->kvm->arch.migration_mode) {
|
2016-08-04 18:54:42 +03:00
|
|
|
/*
|
|
|
|
* CMMA is enabled in the KVM settings, but is disabled in
|
|
|
|
* the SIE block and in the mm_context, and we are not doing
|
|
|
|
* a migration. Enable CMMA in the mm_context.
|
|
|
|
* Since we need to take a write lock to write to the context
|
|
|
|
* to avoid races with storage keys handling, we check if the
|
|
|
|
* value really needs to be written to; if the value is
|
|
|
|
* already correct, we do nothing and avoid the lock.
|
|
|
|
*/
|
2018-02-16 14:16:14 +03:00
|
|
|
if (vcpu->kvm->mm->context.uses_cmm == 0) {
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_write_lock(vcpu->kvm->mm);
|
2018-02-16 14:16:14 +03:00
|
|
|
vcpu->kvm->mm->context.uses_cmm = 1;
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_write_unlock(vcpu->kvm->mm);
|
2016-08-04 18:54:42 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are here, we are supposed to have CMMA enabled in
|
|
|
|
* the SIE block. Enabling CMMA works on a per-CPU basis,
|
|
|
|
* while the context use_cmma flag is per process.
|
|
|
|
* It's possible that the context flag is enabled and the
|
|
|
|
* SIE flag is not, so we set the flag always; if it was
|
|
|
|
* already set, nothing changes, otherwise we enable it
|
|
|
|
* on this CPU too.
|
|
|
|
*/
|
|
|
|
vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
|
|
|
|
/* Retry the ESSA instruction */
|
|
|
|
kvm_s390_retry_instr(vcpu);
|
|
|
|
} else {
|
2018-04-30 19:33:25 +03:00
|
|
|
int srcu_idx;
|
|
|
|
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_lock(vcpu->kvm->mm);
|
2018-04-30 19:33:25 +03:00
|
|
|
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
|
|
|
i = __do_essa(vcpu, orc);
|
|
|
|
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(vcpu->kvm->mm);
|
2016-08-04 18:54:42 +03:00
|
|
|
if (i < 0)
|
|
|
|
return i;
|
2018-04-30 19:33:25 +03:00
|
|
|
/* Account for the possible extra cbrl entry */
|
2016-08-04 18:54:42 +03:00
|
|
|
entries += i;
|
|
|
|
}
|
2013-04-17 19:36:29 +04:00
|
|
|
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
|
|
|
|
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_lock(gmap->mm);
|
2016-04-12 14:32:25 +03:00
|
|
|
for (i = 0; i < entries; ++i)
|
|
|
|
__gmap_zap(gmap, cbrlo[i]);
|
2020-06-09 07:33:25 +03:00
|
|
|
mmap_read_unlock(gmap->mm);
|
2013-04-17 19:36:29 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-20 18:32:09 +04:00
|
|
|
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-04-08 18:52:39 +03:00
|
|
|
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
|
|
|
|
case 0x8a:
|
|
|
|
case 0x8e:
|
|
|
|
case 0x8f:
|
|
|
|
return handle_ipte_interlock(vcpu);
|
|
|
|
case 0x8d:
|
|
|
|
return handle_epsw(vcpu);
|
|
|
|
case 0xab:
|
|
|
|
return handle_essa(vcpu);
|
|
|
|
case 0xaf:
|
|
|
|
return handle_pfmf(vcpu);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2012-12-20 18:32:09 +04:00
|
|
|
}
|
|
|
|
|
2013-06-20 19:22:04 +04:00
|
|
|
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
|
|
|
|
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
|
2014-10-29 12:07:16 +03:00
|
|
|
int reg, rc, nr_regs;
|
|
|
|
u32 ctl_array[16];
|
2014-01-01 19:59:21 +04:00
|
|
|
u64 ga;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2013-06-20 19:22:04 +04:00
|
|
|
|
|
|
|
vcpu->stat.instruction_lctl++;
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
|
2013-06-20 19:22:04 +04:00
|
|
|
|
2014-01-01 19:59:21 +04:00
|
|
|
if (ga & 3)
|
2013-06-20 19:22:04 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
|
2014-01-01 19:59:21 +04:00
|
|
|
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
|
2013-06-20 19:22:04 +04:00
|
|
|
|
2014-10-29 12:07:16 +03:00
|
|
|
nr_regs = ((reg3 - reg1) & 0xf) + 1;
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
|
2014-10-29 12:07:16 +03:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
2013-06-20 19:22:04 +04:00
|
|
|
reg = reg1;
|
2014-10-29 12:07:16 +03:00
|
|
|
nr_regs = 0;
|
2013-06-20 19:22:04 +04:00
|
|
|
do {
|
|
|
|
vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
|
2014-10-29 12:07:16 +03:00
|
|
|
vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
|
2013-06-20 19:22:04 +04:00
|
|
|
if (reg == reg3)
|
|
|
|
break;
|
|
|
|
reg = (reg + 1) % 16;
|
|
|
|
} while (1);
|
2014-10-31 11:24:20 +03:00
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
2013-06-20 19:22:04 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-23 13:47:13 +04:00
|
|
|
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
|
|
|
|
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
|
2014-10-29 12:07:16 +03:00
|
|
|
int reg, rc, nr_regs;
|
|
|
|
u32 ctl_array[16];
|
2014-01-23 13:47:13 +04:00
|
|
|
u64 ga;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2014-01-23 13:47:13 +04:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stctl++;
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
|
2014-01-23 13:47:13 +04:00
|
|
|
|
|
|
|
if (ga & 3)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
|
2014-01-23 13:47:13 +04:00
|
|
|
trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
|
|
|
|
|
|
|
|
reg = reg1;
|
2014-10-29 12:07:16 +03:00
|
|
|
nr_regs = 0;
|
2014-01-23 13:47:13 +04:00
|
|
|
do {
|
2014-10-29 12:07:16 +03:00
|
|
|
ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
|
2014-01-23 13:47:13 +04:00
|
|
|
if (reg == reg3)
|
|
|
|
break;
|
|
|
|
reg = (reg + 1) % 16;
|
|
|
|
} while (1);
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
|
2014-10-29 12:07:16 +03:00
|
|
|
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
|
2014-01-23 13:47:13 +04:00
|
|
|
}
|
|
|
|
|
2013-06-20 19:22:04 +04:00
|
|
|
static int handle_lctlg(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
|
|
|
|
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
|
2014-10-29 12:07:16 +03:00
|
|
|
int reg, rc, nr_regs;
|
|
|
|
u64 ctl_array[16];
|
|
|
|
u64 ga;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2013-06-20 19:22:04 +04:00
|
|
|
|
|
|
|
vcpu->stat.instruction_lctlg++;
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
|
2013-06-20 19:22:04 +04:00
|
|
|
|
2014-01-01 19:59:21 +04:00
|
|
|
if (ga & 7)
|
2013-06-20 19:22:04 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
|
2014-01-01 19:59:21 +04:00
|
|
|
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
|
2013-06-20 19:22:04 +04:00
|
|
|
|
2014-10-29 12:07:16 +03:00
|
|
|
nr_regs = ((reg3 - reg1) & 0xf) + 1;
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
|
2014-10-29 12:07:16 +03:00
|
|
|
if (rc)
|
|
|
|
return kvm_s390_inject_prog_cond(vcpu, rc);
|
|
|
|
reg = reg1;
|
|
|
|
nr_regs = 0;
|
2013-06-20 19:22:04 +04:00
|
|
|
do {
|
2014-10-29 12:07:16 +03:00
|
|
|
vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
|
2013-06-20 19:22:04 +04:00
|
|
|
if (reg == reg3)
|
|
|
|
break;
|
|
|
|
reg = (reg + 1) % 16;
|
|
|
|
} while (1);
|
2014-10-31 11:24:20 +03:00
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
2013-06-20 19:22:04 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-23 13:47:13 +04:00
|
|
|
static int handle_stctg(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
|
|
|
|
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
|
2014-10-29 12:07:16 +03:00
|
|
|
int reg, rc, nr_regs;
|
|
|
|
u64 ctl_array[16];
|
|
|
|
u64 ga;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2014-01-23 13:47:13 +04:00
|
|
|
|
|
|
|
vcpu->stat.instruction_stctg++;
|
|
|
|
|
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2015-01-19 13:24:51 +03:00
|
|
|
ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
|
2014-01-23 13:47:13 +04:00
|
|
|
|
|
|
|
if (ga & 7)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
|
|
|
|
2015-07-21 13:44:57 +03:00
|
|
|
VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
|
2014-01-23 13:47:13 +04:00
|
|
|
trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
|
|
|
|
|
2014-10-29 12:07:16 +03:00
|
|
|
reg = reg1;
|
|
|
|
nr_regs = 0;
|
2014-01-23 13:47:13 +04:00
|
|
|
do {
|
2014-10-29 12:07:16 +03:00
|
|
|
ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
|
2014-01-23 13:47:13 +04:00
|
|
|
if (reg == reg3)
|
|
|
|
break;
|
|
|
|
reg = (reg + 1) % 16;
|
|
|
|
} while (1);
|
2015-01-19 13:24:51 +03:00
|
|
|
rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
|
2014-10-29 12:07:16 +03:00
|
|
|
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
|
2014-01-23 13:47:13 +04:00
|
|
|
}
|
|
|
|
|
2013-06-20 19:22:04 +04:00
|
|
|
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
|
2012-12-20 18:32:10 +04:00
|
|
|
{
|
2016-04-08 18:52:39 +03:00
|
|
|
switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
|
|
|
|
case 0x25:
|
|
|
|
return handle_stctg(vcpu);
|
|
|
|
case 0x2f:
|
|
|
|
return handle_lctlg(vcpu);
|
|
|
|
case 0x60:
|
|
|
|
case 0x61:
|
|
|
|
case 0x62:
|
|
|
|
return handle_ri(vcpu);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2012-12-20 18:32:10 +04:00
|
|
|
}
|
|
|
|
|
2011-07-24 12:48:17 +04:00
|
|
|
static int handle_tprot(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2022-02-11 21:22:08 +03:00
|
|
|
u64 address, operand2;
|
|
|
|
unsigned long gpa;
|
|
|
|
u8 access_key;
|
2014-02-04 17:48:07 +04:00
|
|
|
bool writable;
|
2022-02-11 21:22:08 +03:00
|
|
|
int ret, cc;
|
2016-12-09 14:44:40 +03:00
|
|
|
u8 ar;
|
2011-07-24 12:48:17 +04:00
|
|
|
|
|
|
|
vcpu->stat.instruction_tprot++;
|
|
|
|
|
2013-06-20 19:22:00 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
|
|
|
|
2022-02-11 21:22:08 +03:00
|
|
|
kvm_s390_get_base_disp_sse(vcpu, &address, &operand2, &ar, NULL);
|
|
|
|
access_key = (operand2 & 0xf0) >> 4;
|
2012-12-20 18:32:07 +04:00
|
|
|
|
2011-07-24 12:48:17 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
|
2014-02-04 17:48:07 +04:00
|
|
|
ipte_lock(vcpu);
|
2022-02-11 21:22:08 +03:00
|
|
|
|
|
|
|
ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
|
|
|
|
GACC_STORE, access_key);
|
|
|
|
if (ret == 0) {
|
|
|
|
gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
|
|
|
|
} else if (ret == PGM_PROTECTION) {
|
|
|
|
writable = false;
|
2014-02-04 17:48:07 +04:00
|
|
|
/* Write protected? Try again with read-only... */
|
2022-02-11 21:22:08 +03:00
|
|
|
ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
|
|
|
|
GACC_FETCH, access_key);
|
2014-02-04 17:48:07 +04:00
|
|
|
}
|
2022-02-11 21:22:08 +03:00
|
|
|
if (ret >= 0) {
|
|
|
|
cc = -1;
|
|
|
|
|
|
|
|
/* Fetching permitted; storing permitted */
|
|
|
|
if (ret == 0 && writable)
|
|
|
|
cc = 0;
|
|
|
|
/* Fetching permitted; storing not permitted */
|
|
|
|
else if (ret == 0 && !writable)
|
|
|
|
cc = 1;
|
|
|
|
/* Fetching not permitted; storing not permitted */
|
|
|
|
else if (ret == PGM_PROTECTION)
|
|
|
|
cc = 2;
|
|
|
|
/* Translation not available */
|
|
|
|
else if (ret != PGM_ADDRESSING && ret != PGM_TRANSLATION_SPEC)
|
|
|
|
cc = 3;
|
|
|
|
|
|
|
|
if (cc != -1) {
|
|
|
|
kvm_s390_set_psw_cc(vcpu, cc);
|
2014-02-04 17:48:07 +04:00
|
|
|
ret = 0;
|
2022-02-11 21:22:08 +03:00
|
|
|
} else {
|
|
|
|
ret = kvm_s390_inject_program_int(vcpu, ret);
|
2014-02-04 17:48:07 +04:00
|
|
|
}
|
|
|
|
}
|
2013-03-05 16:14:42 +04:00
|
|
|
|
2014-02-04 17:48:07 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
|
|
|
|
ipte_unlock(vcpu);
|
|
|
|
return ret;
|
2011-07-24 12:48:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-04-08 18:52:39 +03:00
|
|
|
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
|
|
|
|
case 0x01:
|
2011-07-24 12:48:17 +04:00
|
|
|
return handle_tprot(vcpu);
|
2016-04-08 18:52:39 +03:00
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2011-07-24 12:48:17 +04:00
|
|
|
}
|
|
|
|
|
2012-04-24 11:24:44 +04:00
|
|
|
static int handle_sckpf(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u32 value;
|
|
|
|
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_sckpf++;
|
|
|
|
|
2012-04-24 11:24:44 +04:00
|
|
|
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
2013-06-20 19:21:59 +04:00
|
|
|
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
2012-04-24 11:24:44 +04:00
|
|
|
|
|
|
|
if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
|
|
|
|
return kvm_s390_inject_program_int(vcpu,
|
|
|
|
PGM_SPECIFICATION);
|
|
|
|
|
|
|
|
value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
|
|
|
|
vcpu->arch.sie_block->todpr = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-18 10:18:13 +03:00
|
|
|
static int handle_ptff(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2018-01-23 15:28:40 +03:00
|
|
|
vcpu->stat.instruction_ptff++;
|
|
|
|
|
2016-07-18 10:18:13 +03:00
|
|
|
/* we don't emulate any control instructions yet */
|
|
|
|
kvm_s390_set_psw_cc(vcpu, 3);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-24 11:24:44 +04:00
|
|
|
int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-04-08 18:52:39 +03:00
|
|
|
switch (vcpu->arch.sie_block->ipa & 0x00ff) {
|
|
|
|
case 0x04:
|
|
|
|
return handle_ptff(vcpu);
|
|
|
|
case 0x07:
|
|
|
|
return handle_sckpf(vcpu);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2012-04-24 11:24:44 +04:00
|
|
|
}
|