Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (85 commits) [S390] provide documentation for hvc_iucv kernel parameter. [S390] convert ctcm printks to dev_xxx and pr_xxx macros. [S390] convert zfcp printks to pr_xxx macros. [S390] convert vmlogrdr printks to pr_xxx macros. [S390] convert zfcp dumper printks to pr_xxx macros. [S390] convert cpu related printks to pr_xxx macros. [S390] convert qeth printks to dev_xxx and pr_xxx macros. [S390] convert sclp printks to pr_xxx macros. [S390] convert iucv printks to dev_xxx and pr_xxx macros. [S390] convert ap_bus printks to pr_xxx macros. [S390] convert dcssblk and extmem printks messages to pr_xxx macros. [S390] convert monwriter printks to pr_xxx macros. [S390] convert s390 debug feature printks to pr_xxx macros. [S390] convert monreader printks to pr_xxx macros. [S390] convert appldata printks to pr_xxx macros. [S390] convert setup printks to pr_xxx macros. [S390] convert hypfs printks to pr_xxx macros. [S390] convert time printks to pr_xxx macros. [S390] convert cpacf printks to pr_xxx macros. [S390] convert cio printks to pr_xxx macros. ...
This commit is contained in:
Коммит
1db2a5c11e
|
@ -823,6 +823,9 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
|
|
||||||
hlt [BUGS=ARM,SH]
|
hlt [BUGS=ARM,SH]
|
||||||
|
|
||||||
|
hvc_iucv= [S390] Number of z/VM IUCV Hypervisor console (HVC)
|
||||||
|
back-ends. Valid parameters: 0..8
|
||||||
|
|
||||||
i8042.debug [HW] Toggle i8042 debug mode
|
i8042.debug [HW] Toggle i8042 debug mode
|
||||||
i8042.direct [HW] Put keyboard port into non-translated mode
|
i8042.direct [HW] Put keyboard port into non-translated mode
|
||||||
i8042.dumbkbd [HW] Pretend that controller can only read data from
|
i8042.dumbkbd [HW] Pretend that controller can only read data from
|
||||||
|
@ -2292,6 +2295,14 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
See comment before function dc390_setup() in
|
See comment before function dc390_setup() in
|
||||||
drivers/scsi/tmscsim.c.
|
drivers/scsi/tmscsim.c.
|
||||||
|
|
||||||
|
topology= [S390]
|
||||||
|
Format: {off | on}
|
||||||
|
Specify if the kernel should make use of the cpu
|
||||||
|
topology informations if the hardware supports these.
|
||||||
|
The scheduler will make use of these informations and
|
||||||
|
e.g. base its process migration decisions on it.
|
||||||
|
Default is off.
|
||||||
|
|
||||||
tp720= [HW,PS2]
|
tp720= [HW,PS2]
|
||||||
|
|
||||||
trix= [HW,OSS] MediaTrix AudioTrix Pro
|
trix= [HW,OSS] MediaTrix AudioTrix Pro
|
||||||
|
|
|
@ -267,7 +267,7 @@ extern int ucache_bsize;
|
||||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||||
struct linux_binprm;
|
struct linux_binprm;
|
||||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
int executable_stack);
|
int uses_interp);
|
||||||
#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
|
#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
|
||||||
* This is called from binfmt_elf, we create the special vma for the
|
* This is called from binfmt_elf, we create the special vma for the
|
||||||
* vDSO and insert it into the mm struct tree
|
* vDSO and insert it into the mm struct tree
|
||||||
*/
|
*/
|
||||||
int arch_setup_additional_pages(struct linux_binprm *bprm,
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
int executable_stack)
|
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct page **vdso_pagelist;
|
struct page **vdso_pagelist;
|
||||||
|
|
|
@ -43,6 +43,9 @@ config GENERIC_HWEIGHT
|
||||||
config GENERIC_TIME
|
config GENERIC_TIME
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config GENERIC_TIME_VSYSCALL
|
||||||
|
def_bool y
|
||||||
|
|
||||||
config GENERIC_CLOCKEVENTS
|
config GENERIC_CLOCKEVENTS
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
@ -66,10 +69,15 @@ config PGSTE
|
||||||
bool
|
bool
|
||||||
default y if KVM
|
default y if KVM
|
||||||
|
|
||||||
|
config VIRT_CPU_ACCOUNTING
|
||||||
|
def_bool y
|
||||||
|
|
||||||
mainmenu "Linux Kernel Configuration"
|
mainmenu "Linux Kernel Configuration"
|
||||||
|
|
||||||
config S390
|
config S390
|
||||||
def_bool y
|
def_bool y
|
||||||
|
select USE_GENERIC_SMP_HELPERS if SMP
|
||||||
|
select HAVE_FUNCTION_TRACER
|
||||||
select HAVE_OPROFILE
|
select HAVE_OPROFILE
|
||||||
select HAVE_KPROBES
|
select HAVE_KPROBES
|
||||||
select HAVE_KRETPROBES
|
select HAVE_KRETPROBES
|
||||||
|
@ -225,6 +233,14 @@ config MARCH_Z9_109
|
||||||
Class (z9 BC). The kernel will be slightly faster but will not
|
Class (z9 BC). The kernel will be slightly faster but will not
|
||||||
work on older machines such as the z990, z890, z900, and z800.
|
work on older machines such as the z990, z890, z900, and z800.
|
||||||
|
|
||||||
|
config MARCH_Z10
|
||||||
|
bool "IBM System z10"
|
||||||
|
help
|
||||||
|
Select this to enable optimizations for IBM System z10. The
|
||||||
|
kernel will be slightly faster but will not work on older
|
||||||
|
machines such as the z990, z890, z900, z800, z9-109, z9-ec
|
||||||
|
and z9-bc.
|
||||||
|
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
config PACK_STACK
|
config PACK_STACK
|
||||||
|
@ -343,16 +359,6 @@ config QDIO
|
||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
config QDIO_DEBUG
|
|
||||||
bool "Extended debugging information"
|
|
||||||
depends on QDIO
|
|
||||||
help
|
|
||||||
Say Y here to get extended debugging output in
|
|
||||||
/sys/kernel/debug/s390dbf/qdio...
|
|
||||||
Warning: this option reduces the performance of the QDIO module.
|
|
||||||
|
|
||||||
If unsure, say N.
|
|
||||||
|
|
||||||
config CHSC_SCH
|
config CHSC_SCH
|
||||||
tristate "Support for CHSC subchannels"
|
tristate "Support for CHSC subchannels"
|
||||||
help
|
help
|
||||||
|
@ -466,22 +472,9 @@ config PAGE_STATES
|
||||||
hypervisor. The ESSA instruction is used to do the states
|
hypervisor. The ESSA instruction is used to do the states
|
||||||
changes between a page that has content and the unused state.
|
changes between a page that has content and the unused state.
|
||||||
|
|
||||||
config VIRT_TIMER
|
|
||||||
bool "Virtual CPU timer support"
|
|
||||||
help
|
|
||||||
This provides a kernel interface for virtual CPU timers.
|
|
||||||
Default is disabled.
|
|
||||||
|
|
||||||
config VIRT_CPU_ACCOUNTING
|
|
||||||
bool "Base user process accounting on virtual cpu timer"
|
|
||||||
depends on VIRT_TIMER
|
|
||||||
help
|
|
||||||
Select this option to use CPU timer deltas to do user
|
|
||||||
process accounting.
|
|
||||||
|
|
||||||
config APPLDATA_BASE
|
config APPLDATA_BASE
|
||||||
bool "Linux - VM Monitor Stream, base infrastructure"
|
bool "Linux - VM Monitor Stream, base infrastructure"
|
||||||
depends on PROC_FS && VIRT_TIMER=y
|
depends on PROC_FS
|
||||||
help
|
help
|
||||||
This provides a kernel interface for creating and updating z/VM APPLDATA
|
This provides a kernel interface for creating and updating z/VM APPLDATA
|
||||||
monitor records. The monitor records are updated at certain time
|
monitor records. The monitor records are updated at certain time
|
||||||
|
|
|
@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
|
||||||
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
|
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
|
||||||
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
|
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
|
||||||
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
|
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
|
||||||
|
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
|
||||||
|
|
||||||
#KBUILD_IMAGE is necessary for make rpm
|
#KBUILD_IMAGE is necessary for make rpm
|
||||||
KBUILD_IMAGE :=arch/s390/boot/image
|
KBUILD_IMAGE :=arch/s390/boot/image
|
||||||
|
|
|
@ -26,10 +26,6 @@
|
||||||
#define CTL_APPLDATA_NET_SUM 2125
|
#define CTL_APPLDATA_NET_SUM 2125
|
||||||
#define CTL_APPLDATA_PROC 2126
|
#define CTL_APPLDATA_PROC 2126
|
||||||
|
|
||||||
#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
|
|
||||||
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
|
|
||||||
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
|
|
||||||
|
|
||||||
struct appldata_ops {
|
struct appldata_ops {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct ctl_table_header *sysctl_header;
|
struct ctl_table_header *sysctl_header;
|
||||||
|
|
|
@ -10,6 +10,9 @@
|
||||||
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "appldata"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -32,7 +35,6 @@
|
||||||
#include "appldata.h"
|
#include "appldata.h"
|
||||||
|
|
||||||
|
|
||||||
#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
|
|
||||||
#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
|
#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
|
||||||
sampling interval in
|
sampling interval in
|
||||||
milliseconds */
|
milliseconds */
|
||||||
|
@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
|
||||||
(unsigned long) ops->data, ops->size,
|
(unsigned long) ops->data, ops->size,
|
||||||
ops->mod_lvl);
|
ops->mod_lvl);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
P_ERROR("START DIAG 0xDC for %s failed, "
|
pr_err("Starting the data collection for %s "
|
||||||
"return code: %d\n", ops->name, rc);
|
"failed with rc=%d\n", ops->name, rc);
|
||||||
module_put(ops->owner);
|
module_put(ops->owner);
|
||||||
} else
|
} else
|
||||||
ops->active = 1;
|
ops->active = 1;
|
||||||
|
@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
|
||||||
(unsigned long) ops->data, ops->size,
|
(unsigned long) ops->data, ops->size,
|
||||||
ops->mod_lvl);
|
ops->mod_lvl);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
P_ERROR("STOP DIAG 0xDC for %s failed, "
|
pr_err("Stopping the data collection for %s "
|
||||||
"return code: %d\n", ops->name, rc);
|
"failed with rc=%d\n", ops->name, rc);
|
||||||
module_put(ops->owner);
|
module_put(ops->owner);
|
||||||
}
|
}
|
||||||
spin_unlock(&appldata_ops_lock);
|
spin_unlock(&appldata_ops_lock);
|
||||||
|
|
|
@ -9,6 +9,9 @@
|
||||||
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "appldata"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -22,7 +25,6 @@
|
||||||
#include "appldata.h"
|
#include "appldata.h"
|
||||||
|
|
||||||
|
|
||||||
#define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */
|
|
||||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||||
|
|
||||||
|
@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data)
|
||||||
(unsigned long) ops.data, new_size,
|
(unsigned long) ops.data, new_size,
|
||||||
ops.mod_lvl);
|
ops.mod_lvl);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
P_ERROR("os: START NEW DIAG 0xDC failed, "
|
pr_err("Starting a new OS data collection "
|
||||||
"return code: %d, new size = %i\n", rc,
|
"failed with rc=%d\n", rc);
|
||||||
new_size);
|
|
||||||
|
|
||||||
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
|
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
|
||||||
APPLDATA_STOP_REC,
|
APPLDATA_STOP_REC,
|
||||||
(unsigned long) ops.data, ops.size,
|
(unsigned long) ops.data, ops.size,
|
||||||
ops.mod_lvl);
|
ops.mod_lvl);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
P_ERROR("os: STOP OLD DIAG 0xDC failed, "
|
pr_err("Stopping a faulty OS data "
|
||||||
"return code: %d, old size = %i\n", rc,
|
"collection failed with rc=%d\n", rc);
|
||||||
ops.size);
|
|
||||||
else
|
|
||||||
P_INFO("os: old record size = %i stopped\n",
|
|
||||||
ops.size);
|
|
||||||
}
|
}
|
||||||
ops.size = new_size;
|
ops.size = new_size;
|
||||||
}
|
}
|
||||||
|
@ -178,8 +175,8 @@ static int __init appldata_os_init(void)
|
||||||
max_size = sizeof(struct appldata_os_data) +
|
max_size = sizeof(struct appldata_os_data) +
|
||||||
(NR_CPUS * sizeof(struct appldata_os_per_cpu));
|
(NR_CPUS * sizeof(struct appldata_os_per_cpu));
|
||||||
if (max_size > APPLDATA_MAX_REC_SIZE) {
|
if (max_size > APPLDATA_MAX_REC_SIZE) {
|
||||||
P_ERROR("Max. size of OS record = %i, bigger than maximum "
|
pr_err("Maximum OS record size %i exceeds the maximum "
|
||||||
"record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE);
|
"record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,9 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "aes_s390"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <crypto/aes.h>
|
#include <crypto/aes.h>
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
|
||||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||||
|
|
||||||
if (IS_ERR(sctx->fallback.cip)) {
|
if (IS_ERR(sctx->fallback.cip)) {
|
||||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
pr_err("Allocating AES fallback algorithm %s failed\n",
|
||||||
|
name);
|
||||||
return PTR_ERR(sctx->fallback.blk);
|
return PTR_ERR(sctx->fallback.blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
|
||||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||||
|
|
||||||
if (IS_ERR(sctx->fallback.blk)) {
|
if (IS_ERR(sctx->fallback.blk)) {
|
||||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
pr_err("Allocating AES fallback algorithm %s failed\n",
|
||||||
|
name);
|
||||||
return PTR_ERR(sctx->fallback.blk);
|
return PTR_ERR(sctx->fallback.blk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,9 +520,8 @@ static int __init aes_s390_init(void)
|
||||||
|
|
||||||
/* z9 109 and z9 BC/EC only support 128 bit key length */
|
/* z9 109 and z9 BC/EC only support 128 bit key length */
|
||||||
if (keylen_flag == AES_KEYLEN_128)
|
if (keylen_flag == AES_KEYLEN_128)
|
||||||
printk(KERN_INFO
|
pr_info("AES hardware acceleration is only available for"
|
||||||
"aes_s390: hardware acceleration only available for "
|
" 128-bit keys\n");
|
||||||
"128 bit keys\n");
|
|
||||||
|
|
||||||
ret = crypto_register_alg(&aes_alg);
|
ret = crypto_register_alg(&aes_alg);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -3,10 +3,13 @@
|
||||||
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
|
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
|
||||||
* implementation.
|
* implementation.
|
||||||
*
|
*
|
||||||
* Copyright (C) IBM Corp. 2006
|
* Copyright IBM Corp. 2006, 2008
|
||||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "hypfs"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
@ -527,13 +530,14 @@ __init int hypfs_diag_init(void)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (diag204_probe()) {
|
if (diag204_probe()) {
|
||||||
printk(KERN_ERR "hypfs: diag 204 not working.");
|
pr_err("The hardware system does not support hypfs\n");
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
rc = diag224_get_name_table();
|
rc = diag224_get_name_table();
|
||||||
if (rc) {
|
if (rc) {
|
||||||
diag204_free_buffer();
|
diag204_free_buffer();
|
||||||
printk(KERN_ERR "hypfs: could not get name table.\n");
|
pr_err("The hardware system does not provide all "
|
||||||
|
"functions required by hypfs\n");
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,13 @@
|
||||||
* arch/s390/hypfs/inode.c
|
* arch/s390/hypfs/inode.c
|
||||||
* Hypervisor filesystem for Linux on s390.
|
* Hypervisor filesystem for Linux on s390.
|
||||||
*
|
*
|
||||||
* Copyright (C) IBM Corp. 2006
|
* Copyright IBM Corp. 2006, 2008
|
||||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "hypfs"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
else
|
else
|
||||||
rc = hypfs_diag_create_files(sb, sb->s_root);
|
rc = hypfs_diag_create_files(sb, sb->s_root);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
printk(KERN_ERR "hypfs: Update failed\n");
|
pr_err("Updating the hypfs tree failed\n");
|
||||||
hypfs_delete_tree(sb->s_root);
|
hypfs_delete_tree(sb->s_root);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
|
||||||
break;
|
break;
|
||||||
case opt_err:
|
case opt_err:
|
||||||
default:
|
default:
|
||||||
printk(KERN_ERR "hypfs: Unrecognized mount option "
|
pr_err("%s is not a valid mount option\n", str);
|
||||||
"\"%s\" or missing value\n", str);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
}
|
}
|
||||||
hypfs_update_update(sb);
|
hypfs_update_update(sb);
|
||||||
sb->s_root = root_dentry;
|
sb->s_root = root_dentry;
|
||||||
printk(KERN_INFO "hypfs: Hypervisor filesystem mounted\n");
|
pr_info("Hypervisor filesystem mounted\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_tree:
|
err_tree:
|
||||||
|
@ -513,7 +515,7 @@ fail_sysfs:
|
||||||
if (!MACHINE_IS_VM)
|
if (!MACHINE_IS_VM)
|
||||||
hypfs_diag_exit();
|
hypfs_diag_exit();
|
||||||
fail_diag:
|
fail_diag:
|
||||||
printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
|
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
#ifndef __ASMS390_AUXVEC_H
|
#ifndef __ASMS390_AUXVEC_H
|
||||||
#define __ASMS390_AUXVEC_H
|
#define __ASMS390_AUXVEC_H
|
||||||
|
|
||||||
|
#define AT_SYSINFO_EHDR 33
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -47,7 +47,10 @@
|
||||||
|
|
||||||
#endif /* CONFIG_DEBUG_BUGVERBOSE */
|
#endif /* CONFIG_DEBUG_BUGVERBOSE */
|
||||||
|
|
||||||
#define BUG() __EMIT_BUG(0)
|
#define BUG() do { \
|
||||||
|
__EMIT_BUG(0); \
|
||||||
|
for (;;); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define WARN_ON(x) ({ \
|
#define WARN_ON(x) ({ \
|
||||||
int __ret_warn_on = !!(x); \
|
int __ret_warn_on = !!(x); \
|
||||||
|
|
|
@ -11,32 +11,39 @@
|
||||||
|
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
|
|
||||||
#ifdef __GNUC__
|
#define __BIG_ENDIAN
|
||||||
|
|
||||||
|
#ifndef __s390x__
|
||||||
|
# define __SWAB_64_THRU_32__
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __s390x__
|
#ifdef __s390x__
|
||||||
static inline __u64 ___arch__swab64p(const __u64 *x)
|
static inline __u64 __arch_swab64p(const __u64 *x)
|
||||||
{
|
{
|
||||||
__u64 result;
|
__u64 result;
|
||||||
|
|
||||||
asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
|
asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
#define __arch_swab64p __arch_swab64p
|
||||||
|
|
||||||
static inline __u64 ___arch__swab64(__u64 x)
|
static inline __u64 __arch_swab64(__u64 x)
|
||||||
{
|
{
|
||||||
__u64 result;
|
__u64 result;
|
||||||
|
|
||||||
asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
|
asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
#define __arch_swab64 __arch_swab64
|
||||||
|
|
||||||
static inline void ___arch__swab64s(__u64 *x)
|
static inline void __arch_swab64s(__u64 *x)
|
||||||
{
|
{
|
||||||
*x = ___arch__swab64p(x);
|
*x = __arch_swab64p(x);
|
||||||
}
|
}
|
||||||
|
#define __arch_swab64s __arch_swab64s
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
|
|
||||||
static inline __u32 ___arch__swab32p(const __u32 *x)
|
static inline __u32 __arch_swab32p(const __u32 *x)
|
||||||
{
|
{
|
||||||
__u32 result;
|
__u32 result;
|
||||||
|
|
||||||
|
@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x)
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
#define __arch_swab32p __arch_swab32p
|
||||||
|
|
||||||
static inline __u32 ___arch__swab32(__u32 x)
|
#ifdef __s390x__
|
||||||
|
static inline __u32 __arch_swab32(__u32 x)
|
||||||
{
|
{
|
||||||
#ifndef __s390x__
|
|
||||||
return ___arch__swab32p(&x);
|
|
||||||
#else /* __s390x__ */
|
|
||||||
__u32 result;
|
__u32 result;
|
||||||
|
|
||||||
asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
|
asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
|
||||||
return result;
|
return result;
|
||||||
|
}
|
||||||
|
#define __arch_swab32 __arch_swab32
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ void ___arch__swab32s(__u32 *x)
|
static inline __u16 __arch_swab16p(const __u16 *x)
|
||||||
{
|
|
||||||
*x = ___arch__swab32p(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ __u16 ___arch__swab16p(const __u16 *x)
|
|
||||||
{
|
{
|
||||||
__u16 result;
|
__u16 result;
|
||||||
|
|
||||||
|
@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
#define __arch_swab16p __arch_swab16p
|
||||||
|
|
||||||
static __inline__ __u16 ___arch__swab16(__u16 x)
|
#include <linux/byteorder.h>
|
||||||
{
|
|
||||||
return ___arch__swab16p(&x);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ void ___arch__swab16s(__u16 *x)
|
|
||||||
{
|
|
||||||
*x = ___arch__swab16p(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef __s390x__
|
|
||||||
#define __arch__swab64(x) ___arch__swab64(x)
|
|
||||||
#define __arch__swab64p(x) ___arch__swab64p(x)
|
|
||||||
#define __arch__swab64s(x) ___arch__swab64s(x)
|
|
||||||
#endif /* __s390x__ */
|
|
||||||
#define __arch__swab32(x) ___arch__swab32(x)
|
|
||||||
#define __arch__swab16(x) ___arch__swab16(x)
|
|
||||||
#define __arch__swab32p(x) ___arch__swab32p(x)
|
|
||||||
#define __arch__swab16p(x) ___arch__swab16p(x)
|
|
||||||
#define __arch__swab32s(x) ___arch__swab32s(x)
|
|
||||||
#define __arch__swab16s(x) ___arch__swab16s(x)
|
|
||||||
|
|
||||||
#ifndef __s390x__
|
|
||||||
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
|
|
||||||
# define __BYTEORDER_HAS_U64__
|
|
||||||
# define __SWAB_64_THRU_32__
|
|
||||||
#endif
|
|
||||||
#else /* __s390x__ */
|
|
||||||
#define __BYTEORDER_HAS_U64__
|
|
||||||
#endif /* __s390x__ */
|
|
||||||
|
|
||||||
#endif /* __GNUC__ */
|
|
||||||
|
|
||||||
#include <linux/byteorder/big_endian.h>
|
|
||||||
|
|
||||||
#endif /* _S390_BYTEORDER_H */
|
#endif /* _S390_BYTEORDER_H */
|
||||||
|
|
|
@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t;
|
||||||
#include <asm/system.h> /* for save_access_regs */
|
#include <asm/system.h> /* for save_access_regs */
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
|
extern unsigned int vdso_enabled;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is used to ensure we don't load something for the wrong architecture.
|
* This is used to ensure we don't load something for the wrong architecture.
|
||||||
*/
|
*/
|
||||||
|
@ -191,4 +195,16 @@ do { \
|
||||||
current->mm->context.noexec == 0; \
|
current->mm->context.noexec == 0; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define ARCH_DLINFO \
|
||||||
|
do { \
|
||||||
|
if (vdso_enabled) \
|
||||||
|
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
||||||
|
(unsigned long)current->mm->context.vdso_base); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
struct linux_binprm;
|
||||||
|
|
||||||
|
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||||
|
int arch_setup_additional_pages(struct linux_binprm *, int);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -248,8 +248,8 @@ struct dcw {
|
||||||
#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
|
#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
|
||||||
TCCB_MAX_DCW * sizeof(struct dcw) + \
|
TCCB_MAX_DCW * sizeof(struct dcw) + \
|
||||||
sizeof(struct tccb_tcat))
|
sizeof(struct tccb_tcat))
|
||||||
#define TCCB_SAC_DEFAULT 0xf901
|
#define TCCB_SAC_DEFAULT 0x1ffe
|
||||||
#define TCCB_SAC_INTRG 0xf902
|
#define TCCB_SAC_INTRG 0x1fff
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct tccb_tcah - Transport-Command-Area Header (TCAH)
|
* struct tccb_tcah - Transport-Command-Area Header (TCAH)
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
#ifndef _ASM_S390_FTRACE_H
|
||||||
|
#define _ASM_S390_FTRACE_H
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
extern void _mcount(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _ASM_S390_FTRACE_H */
|
|
@ -17,6 +17,7 @@
|
||||||
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
|
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
|
||||||
/* Adapter interrupts. */
|
/* Adapter interrupts. */
|
||||||
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
|
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
|
||||||
|
#define AP_ISC 6 /* adjunct processor (crypto) devices */
|
||||||
|
|
||||||
/* Functions for registration of I/O interruption subclasses */
|
/* Functions for registration of I/O interruption subclasses */
|
||||||
void isc_register(unsigned int isc);
|
void isc_register(unsigned int isc);
|
||||||
|
|
|
@ -6,6 +6,7 @@ typedef struct {
|
||||||
struct list_head pgtable_list;
|
struct list_head pgtable_list;
|
||||||
unsigned long asce_bits;
|
unsigned long asce_bits;
|
||||||
unsigned long asce_limit;
|
unsigned long asce_limit;
|
||||||
|
unsigned long vdso_base;
|
||||||
int noexec;
|
int noexec;
|
||||||
int has_pgste; /* The mmu context has extended page tables */
|
int has_pgste; /* The mmu context has extended page tables */
|
||||||
int alloc_pgste; /* cloned contexts will have extended page tables */
|
int alloc_pgste; /* cloned contexts will have extended page tables */
|
||||||
|
|
|
@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order);
|
||||||
#include <asm-generic/memory_model.h>
|
#include <asm-generic/memory_model.h>
|
||||||
#include <asm-generic/page.h>
|
#include <asm-generic/page.h>
|
||||||
|
|
||||||
|
#define __HAVE_ARCH_GATE_AREA 1
|
||||||
|
|
||||||
#endif /* _S390_PAGE_H */
|
#endif /* _S390_PAGE_H */
|
||||||
|
|
|
@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *);
|
||||||
|
|
||||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||||
{
|
{
|
||||||
|
typedef struct { char _[n]; } addrtype;
|
||||||
|
|
||||||
*s = val;
|
*s = val;
|
||||||
n = (n / 256) - 1;
|
n = (n / 256) - 1;
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||||
"0: mvc 256(256,%0),0(%0)\n"
|
"0: mvc 256(256,%0),0(%0)\n"
|
||||||
" la %0,256(%0)\n"
|
" la %0,256(%0)\n"
|
||||||
" brct %1,0b\n"
|
" brct %1,0b\n"
|
||||||
: "+a" (s), "+d" (n));
|
: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
|
||||||
|
: "m" (*(addrtype *) s));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#ifndef __ASM_S390_PROCESSOR_H
|
#ifndef __ASM_S390_PROCESSOR_H
|
||||||
#define __ASM_S390_PROCESSOR_H
|
#define __ASM_S390_PROCESSOR_H
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
@ -258,7 +259,7 @@ static inline void enabled_wait(void)
|
||||||
* Function to drop a processor into disabled wait state
|
* Function to drop a processor into disabled wait state
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void disabled_wait(unsigned long code)
|
static inline void ATTRIB_NORET disabled_wait(unsigned long code)
|
||||||
{
|
{
|
||||||
unsigned long ctl_buf;
|
unsigned long ctl_buf;
|
||||||
psw_t dw_psw;
|
psw_t dw_psw;
|
||||||
|
@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code)
|
||||||
: "=m" (ctl_buf)
|
: "=m" (ctl_buf)
|
||||||
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
|
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
|
while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -373,16 +373,16 @@ struct qdio_initialize {
|
||||||
#define QDIO_FLAG_SYNC_OUTPUT 0x02
|
#define QDIO_FLAG_SYNC_OUTPUT 0x02
|
||||||
#define QDIO_FLAG_PCI_OUT 0x10
|
#define QDIO_FLAG_PCI_OUT 0x10
|
||||||
|
|
||||||
extern int qdio_initialize(struct qdio_initialize *init_data);
|
extern int qdio_initialize(struct qdio_initialize *);
|
||||||
extern int qdio_allocate(struct qdio_initialize *init_data);
|
extern int qdio_allocate(struct qdio_initialize *);
|
||||||
extern int qdio_establish(struct qdio_initialize *init_data);
|
extern int qdio_establish(struct qdio_initialize *);
|
||||||
extern int qdio_activate(struct ccw_device *);
|
extern int qdio_activate(struct ccw_device *);
|
||||||
|
|
||||||
extern int do_QDIO(struct ccw_device*, unsigned int flags,
|
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
|
||||||
int q_nr, int qidx, int count);
|
int q_nr, int bufnr, int count);
|
||||||
extern int qdio_cleanup(struct ccw_device*, int how);
|
extern int qdio_cleanup(struct ccw_device*, int);
|
||||||
extern int qdio_shutdown(struct ccw_device*, int how);
|
extern int qdio_shutdown(struct ccw_device*, int);
|
||||||
extern int qdio_free(struct ccw_device *);
|
extern int qdio_free(struct ccw_device *);
|
||||||
extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev);
|
extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
|
||||||
|
|
||||||
#endif /* __QDIO_H__ */
|
#endif /* __QDIO_H__ */
|
||||||
|
|
|
@ -61,6 +61,7 @@ typedef enum
|
||||||
{
|
{
|
||||||
ec_schedule=0,
|
ec_schedule=0,
|
||||||
ec_call_function,
|
ec_call_function,
|
||||||
|
ec_call_function_single,
|
||||||
ec_bit_last
|
ec_bit_last
|
||||||
} ec_bit_sig;
|
} ec_bit_sig;
|
||||||
|
|
||||||
|
|
|
@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu);
|
||||||
extern struct mutex smp_cpu_state_mutex;
|
extern struct mutex smp_cpu_state_mutex;
|
||||||
extern int smp_cpu_polarization[];
|
extern int smp_cpu_polarization[];
|
||||||
|
|
||||||
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
|
extern void arch_send_call_function_single_ipi(int cpu);
|
||||||
void *info, int wait);
|
extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
|
|
|
@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
|
||||||
return r0;
|
return r0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Service level reporting interface.
|
||||||
|
*/
|
||||||
|
struct service_level {
|
||||||
|
struct list_head list;
|
||||||
|
void (*seq_print)(struct seq_file *, struct service_level *);
|
||||||
|
};
|
||||||
|
|
||||||
|
int register_service_level(struct service_level *);
|
||||||
|
int unregister_service_level(struct service_level *);
|
||||||
|
|
||||||
#endif /* __ASM_S390_SYSINFO_H */
|
#endif /* __ASM_S390_SYSINFO_H */
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#define __ASM_SYSTEM_H
|
#define __ASM_SYSTEM_H
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs)
|
||||||
prev = __switch_to(prev,next); \
|
prev = __switch_to(prev,next); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
extern void account_vtime(struct task_struct *);
|
extern void account_vtime(struct task_struct *);
|
||||||
extern void account_tick_vtime(struct task_struct *);
|
extern void account_tick_vtime(struct task_struct *);
|
||||||
extern void account_system_vtime(struct task_struct *);
|
extern void account_system_vtime(struct task_struct *);
|
||||||
#else
|
|
||||||
#define account_vtime(x) do { /* empty */ } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PFAULT
|
#ifdef CONFIG_PFAULT
|
||||||
extern void pfault_irq_init(void);
|
extern void pfault_irq_init(void);
|
||||||
|
@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask)
|
||||||
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
|
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
|
||||||
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
|
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
|
||||||
|
|
||||||
int stfle(unsigned long long *list, int doublewords);
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
extern void smp_ctl_set_bit(int cr, int bit);
|
extern void smp_ctl_set_bit(int cr, int bit);
|
||||||
|
@ -438,6 +433,23 @@ static inline unsigned int stfl(void)
|
||||||
return S390_lowcore.stfl_fac_list;
|
return S390_lowcore.stfl_fac_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int __stfle(unsigned long long *list, int doublewords)
|
||||||
|
{
|
||||||
|
typedef struct { unsigned long long _[doublewords]; } addrtype;
|
||||||
|
register unsigned long __nr asm("0") = doublewords - 1;
|
||||||
|
|
||||||
|
asm volatile(".insn s,0xb2b00000,%0" /* stfle */
|
||||||
|
: "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
|
||||||
|
return __nr + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int stfle(unsigned long long *list, int doublewords)
|
||||||
|
{
|
||||||
|
if (!(stfl() & (1UL << 24)))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
return __stfle(list, doublewords);
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned short stap(void)
|
static inline unsigned short stap(void)
|
||||||
{
|
{
|
||||||
unsigned short cpu_address;
|
unsigned short cpu_address;
|
||||||
|
|
|
@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer);
|
||||||
extern void init_cpu_vtimer(void);
|
extern void init_cpu_vtimer(void);
|
||||||
extern void vtime_init(void);
|
extern void vtime_init(void);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_TIMER
|
|
||||||
|
|
||||||
extern void vtime_start_cpu_timer(void);
|
extern void vtime_start_cpu_timer(void);
|
||||||
extern void vtime_stop_cpu_timer(void);
|
extern void vtime_stop_cpu_timer(void);
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static inline void vtime_start_cpu_timer(void) { }
|
|
||||||
static inline void vtime_stop_cpu_timer(void) { }
|
|
||||||
|
|
||||||
#endif /* CONFIG_VIRT_TIMER */
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#endif /* _ASM_S390_TIMER_H */
|
#endif /* _ASM_S390_TIMER_H */
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
#ifndef __S390_VDSO_H__
|
||||||
|
#define __S390_VDSO_H__
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
/* Default link addresses for the vDSOs */
|
||||||
|
#define VDSO32_LBASE 0
|
||||||
|
#define VDSO64_LBASE 0
|
||||||
|
|
||||||
|
#define VDSO_VERSION_STRING LINUX_2.6.26
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note about this structure:
|
||||||
|
*
|
||||||
|
* NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this
|
||||||
|
* structure is supposed to be known only to the function in the vdso
|
||||||
|
* itself and may change without notice.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct vdso_data {
|
||||||
|
__u64 tb_update_count; /* Timebase atomicity ctr 0x00 */
|
||||||
|
__u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
|
||||||
|
__u64 xtime_clock_sec; /* Kernel time 0x10 */
|
||||||
|
__u64 xtime_clock_nsec; /* 0x18 */
|
||||||
|
__u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */
|
||||||
|
__u64 wtom_clock_nsec; /* 0x28 */
|
||||||
|
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
|
||||||
|
__u32 tz_dsttime; /* Type of dst correction 0x34 */
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct vdso_data *vdso_data;
|
||||||
|
|
||||||
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
#endif /* __S390_VDSO_H__ */
|
|
@ -2,6 +2,11 @@
|
||||||
# Makefile for the linux kernel.
|
# Makefile for the linux kernel.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
ifdef CONFIG_FUNCTION_TRACER
|
||||||
|
# Do not trace early boot code
|
||||||
|
CFLAGS_REMOVE_early.o = -pg
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Passing null pointers is ok for smp code, since we access the lowcore here.
|
# Passing null pointers is ok for smp code, since we access the lowcore here.
|
||||||
#
|
#
|
||||||
|
@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull
|
||||||
#
|
#
|
||||||
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||||
|
|
||||||
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
|
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
|
||||||
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
|
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
|
||||||
s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
|
s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
|
||||||
|
vdso.o vtime.o
|
||||||
|
|
||||||
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
|
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
|
||||||
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
|
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
|
||||||
|
@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
|
||||||
compat_wrapper.o compat_exec_domain.o \
|
compat_wrapper.o compat_exec_domain.o \
|
||||||
$(compat-obj-y)
|
$(compat-obj-y)
|
||||||
|
|
||||||
obj-$(CONFIG_VIRT_TIMER) += vtime.o
|
|
||||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||||
|
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
|
||||||
|
|
||||||
# Kexec part
|
# Kexec part
|
||||||
S390_KEXEC_OBJS := machine_kexec.o crash.o
|
S390_KEXEC_OBJS := machine_kexec.o crash.o
|
||||||
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
|
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
|
||||||
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
|
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
|
||||||
|
|
||||||
|
# vdso
|
||||||
|
obj-$(CONFIG_64BIT) += vdso64/
|
||||||
|
obj-$(CONFIG_32BIT) += vdso32/
|
||||||
|
obj-$(CONFIG_COMPAT) += vdso32/
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/kbuild.h>
|
#include <linux/kbuild.h>
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
int main(void)
|
int main(void)
|
||||||
{
|
{
|
||||||
|
@ -38,5 +39,19 @@ int main(void)
|
||||||
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
|
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
|
||||||
DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
|
DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
|
||||||
DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
|
DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
|
||||||
|
BLANK();
|
||||||
|
/* timeval/timezone offsets for use by vdso */
|
||||||
|
DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count));
|
||||||
|
DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
|
||||||
|
DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
|
||||||
|
DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
|
||||||
|
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
|
||||||
|
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
|
||||||
|
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
|
||||||
|
/* constants used by the vdso */
|
||||||
|
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
|
||||||
|
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
|
||||||
|
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,9 @@
|
||||||
* Christian Borntraeger (cborntra@de.ibm.com),
|
* Christian Borntraeger (cborntra@de.ibm.com),
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cpcmd"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
|
||||||
(((unsigned long)response + rlen) >> 31)) {
|
(((unsigned long)response + rlen) >> 31)) {
|
||||||
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
|
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
|
||||||
if (!lowbuf) {
|
if (!lowbuf) {
|
||||||
printk(KERN_WARNING
|
pr_warning("The cpcmd kernel function failed to "
|
||||||
"cpcmd: could not allocate response buffer\n");
|
"allocate a response buffer\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
spin_lock_irqsave(&cpcmd_lock, flags);
|
spin_lock_irqsave(&cpcmd_lock, flags);
|
||||||
|
|
|
@ -10,6 +10,9 @@
|
||||||
* Bugreports to: <Linux390@de.ibm.com>
|
* Bugreports to: <Linux390@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "s390dbf"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/stddef.h>
|
#include <linux/stddef.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode)
|
||||||
debug_info_free(rc);
|
debug_info_free(rc);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
if(!rc || (mode == NO_AREAS))
|
if (mode == NO_AREAS)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for(i = 0; i < in->nr_areas; i++){
|
for(i = 0; i < in->nr_areas; i++){
|
||||||
|
@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
|
||||||
/* Since debugfs currently does not support uid/gid other than root, */
|
/* Since debugfs currently does not support uid/gid other than root, */
|
||||||
/* we do not allow gid/uid != 0 until we get support for that. */
|
/* we do not allow gid/uid != 0 until we get support for that. */
|
||||||
if ((uid != 0) || (gid != 0))
|
if ((uid != 0) || (gid != 0))
|
||||||
printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
|
pr_warning("Root becomes the owner of all s390dbf files "
|
||||||
"= 0 are supported. Using root as owner now!");
|
"in sysfs\n");
|
||||||
if (!initialized)
|
if (!initialized)
|
||||||
BUG();
|
BUG();
|
||||||
mutex_lock(&debug_mutex);
|
mutex_lock(&debug_mutex);
|
||||||
|
@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
|
||||||
debug_register_view(rc, &debug_pages_view);
|
debug_register_view(rc, &debug_pages_view);
|
||||||
out:
|
out:
|
||||||
if (!rc){
|
if (!rc){
|
||||||
printk(KERN_ERR "debug: debug_register failed for %s\n",name);
|
pr_err("Registering debug feature %s failed\n", name);
|
||||||
}
|
}
|
||||||
mutex_unlock(&debug_mutex);
|
mutex_unlock(&debug_mutex);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
|
||||||
if(pages_per_area > 0){
|
if(pages_per_area > 0){
|
||||||
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
|
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
|
||||||
if(!new_areas) {
|
if(!new_areas) {
|
||||||
printk(KERN_WARNING "debug: could not allocate memory "\
|
pr_info("Allocating memory for %i pages failed\n",
|
||||||
"for pagenumber: %i\n",pages_per_area);
|
pages_per_area);
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
|
||||||
memset(id->active_entries,0,sizeof(int)*id->nr_areas);
|
memset(id->active_entries,0,sizeof(int)*id->nr_areas);
|
||||||
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
|
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
|
||||||
spin_unlock_irqrestore(&id->lock,flags);
|
spin_unlock_irqrestore(&id->lock,flags);
|
||||||
printk(KERN_INFO "debug: %s: set new size (%i pages)\n"\
|
pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area);
|
||||||
,id->name, pages_per_area);
|
|
||||||
out:
|
out:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level)
|
||||||
spin_lock_irqsave(&id->lock,flags);
|
spin_lock_irqsave(&id->lock,flags);
|
||||||
if(new_level == DEBUG_OFF_LEVEL){
|
if(new_level == DEBUG_OFF_LEVEL){
|
||||||
id->level = DEBUG_OFF_LEVEL;
|
id->level = DEBUG_OFF_LEVEL;
|
||||||
printk(KERN_INFO "debug: %s: switched off\n",id->name);
|
pr_info("%s: switched off\n",id->name);
|
||||||
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
|
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
|
||||||
printk(KERN_INFO
|
pr_info("%s: level %i is out of range (%i - %i)\n",
|
||||||
"debug: %s: level %i is out of range (%i - %i)\n",
|
|
||||||
id->name, new_level, 0, DEBUG_MAX_LEVEL);
|
id->name, new_level, 0, DEBUG_MAX_LEVEL);
|
||||||
} else {
|
} else {
|
||||||
id->level = new_level;
|
id->level = new_level;
|
||||||
|
@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
|
||||||
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
|
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
|
||||||
id , &debug_file_ops);
|
id , &debug_file_ops);
|
||||||
if (!pde){
|
if (!pde){
|
||||||
printk(KERN_WARNING "debug: debugfs_create_file() failed!"\
|
pr_err("Registering view %s/%s failed due to out of "
|
||||||
" Cannot register view %s/%s\n", id->name,view->name);
|
"memory\n", id->name,view->name);
|
||||||
rc = -1;
|
rc = -1;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (i == DEBUG_MAX_VIEWS) {
|
if (i == DEBUG_MAX_VIEWS) {
|
||||||
printk(KERN_WARNING "debug: cannot register view %s/%s\n",
|
pr_err("Registering view %s/%s would exceed the maximum "
|
||||||
id->name,view->name);
|
"number of views %i\n", id->name, view->name, i);
|
||||||
printk(KERN_WARNING
|
|
||||||
"debug: maximum number of views reached (%i)!\n", i);
|
|
||||||
debugfs_remove(pde);
|
debugfs_remove(pde);
|
||||||
rc = -1;
|
rc = -1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view,
|
||||||
new_level = debug_get_uint(str);
|
new_level = debug_get_uint(str);
|
||||||
}
|
}
|
||||||
if(new_level < 0) {
|
if(new_level < 0) {
|
||||||
printk(KERN_INFO "debug: level `%s` is not valid\n", str);
|
pr_warning("%s is not a valid level for a debug "
|
||||||
|
"feature\n", str);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
debug_set_level(id, new_level);
|
debug_set_level(id, new_level);
|
||||||
|
@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]);
|
pr_info("Flushing debug data failed because %c is not a valid "
|
||||||
|
"area\n", input_buf[0]);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
*offset += user_len;
|
*offset += user_len;
|
||||||
|
|
|
@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT
|
||||||
* R15 - kernel stack pointer
|
* R15 - kernel stack pointer
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro STORE_TIMER lc_offset
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
stpt \lc_offset
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
|
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
|
||||||
lm %r10,%r11,\lc_from
|
lm %r10,%r11,\lc_from
|
||||||
sl %r10,\lc_to
|
sl %r10,\lc_to
|
||||||
|
@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT
|
||||||
al %r10,BASED(.Lc_1)
|
al %r10,BASED(.Lc_1)
|
||||||
1: stm %r10,%r11,\lc_sum
|
1: stm %r10,%r11,\lc_sum
|
||||||
.endm
|
.endm
|
||||||
#endif
|
|
||||||
|
|
||||||
.macro SAVE_ALL_BASE savearea
|
.macro SAVE_ALL_BASE savearea
|
||||||
stm %r12,%r15,\savearea
|
stm %r12,%r15,\savearea
|
||||||
|
@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT
|
||||||
ni \psworg+1,0xfd # clear wait state bit
|
ni \psworg+1,0xfd # clear wait state bit
|
||||||
.endif
|
.endif
|
||||||
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
|
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
|
||||||
STORE_TIMER __LC_EXIT_TIMER
|
stpt __LC_EXIT_TIMER
|
||||||
lpsw \psworg # back to caller
|
lpsw \psworg # back to caller
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -247,20 +239,18 @@ __critical_start:
|
||||||
|
|
||||||
.globl system_call
|
.globl system_call
|
||||||
system_call:
|
system_call:
|
||||||
STORE_TIMER __LC_SYNC_ENTER_TIMER
|
stpt __LC_SYNC_ENTER_TIMER
|
||||||
sysc_saveall:
|
sysc_saveall:
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA
|
SAVE_ALL_BASE __LC_SAVE_AREA
|
||||||
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
lh %r7,0x8a # get svc number from lowcore
|
lh %r7,0x8a # get svc number from lowcore
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
sysc_vtime:
|
sysc_vtime:
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
sysc_stime:
|
sysc_stime:
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
sysc_update:
|
sysc_update:
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
#endif
|
|
||||||
sysc_do_svc:
|
sysc_do_svc:
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
ltr %r7,%r7 # test for svc 0
|
ltr %r7,%r7 # test for svc 0
|
||||||
|
@ -436,7 +426,7 @@ ret_from_fork:
|
||||||
basr %r14,%r1
|
basr %r14,%r1
|
||||||
TRACE_IRQS_ON
|
TRACE_IRQS_ON
|
||||||
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
|
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
|
||||||
b BASED(sysc_return)
|
b BASED(sysc_tracenogo)
|
||||||
|
|
||||||
#
|
#
|
||||||
# kernel_execve function needs to deal with pt_regs that is not
|
# kernel_execve function needs to deal with pt_regs that is not
|
||||||
|
@ -490,20 +480,18 @@ pgm_check_handler:
|
||||||
* we just ignore the PER event (FIXME: is there anything we have to do
|
* we just ignore the PER event (FIXME: is there anything we have to do
|
||||||
* for LPSW?).
|
* for LPSW?).
|
||||||
*/
|
*/
|
||||||
STORE_TIMER __LC_SYNC_ENTER_TIMER
|
stpt __LC_SYNC_ENTER_TIMER
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA
|
SAVE_ALL_BASE __LC_SAVE_AREA
|
||||||
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
|
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
|
||||||
bnz BASED(pgm_per) # got per exception -> special case
|
bnz BASED(pgm_per) # got per exception -> special case
|
||||||
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
bz BASED(pgm_no_vtime)
|
bz BASED(pgm_no_vtime)
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
pgm_no_vtime:
|
pgm_no_vtime:
|
||||||
#endif
|
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
l %r3,__LC_PGM_ILC # load program interruption code
|
l %r3,__LC_PGM_ILC # load program interruption code
|
||||||
|
@ -536,14 +524,12 @@ pgm_per:
|
||||||
pgm_per_std:
|
pgm_per_std:
|
||||||
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
bz BASED(pgm_no_vtime2)
|
bz BASED(pgm_no_vtime2)
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
pgm_no_vtime2:
|
pgm_no_vtime2:
|
||||||
#endif
|
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
l %r1,__TI_task(%r9)
|
l %r1,__TI_task(%r9)
|
||||||
|
@ -565,11 +551,9 @@ pgm_no_vtime2:
|
||||||
pgm_svcper:
|
pgm_svcper:
|
||||||
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
#endif
|
|
||||||
lh %r7,0x8a # get svc number from lowcore
|
lh %r7,0x8a # get svc number from lowcore
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
|
@ -599,19 +583,17 @@ kernel_per:
|
||||||
|
|
||||||
.globl io_int_handler
|
.globl io_int_handler
|
||||||
io_int_handler:
|
io_int_handler:
|
||||||
STORE_TIMER __LC_ASYNC_ENTER_TIMER
|
stpt __LC_ASYNC_ENTER_TIMER
|
||||||
stck __LC_INT_CLOCK
|
stck __LC_INT_CLOCK
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA+16
|
SAVE_ALL_BASE __LC_SAVE_AREA+16
|
||||||
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
|
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
|
||||||
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
|
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
bz BASED(io_no_vtime)
|
bz BASED(io_no_vtime)
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
io_no_vtime:
|
io_no_vtime:
|
||||||
#endif
|
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
|
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
|
||||||
|
@ -741,19 +723,17 @@ io_notify_resume:
|
||||||
|
|
||||||
.globl ext_int_handler
|
.globl ext_int_handler
|
||||||
ext_int_handler:
|
ext_int_handler:
|
||||||
STORE_TIMER __LC_ASYNC_ENTER_TIMER
|
stpt __LC_ASYNC_ENTER_TIMER
|
||||||
stck __LC_INT_CLOCK
|
stck __LC_INT_CLOCK
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA+16
|
SAVE_ALL_BASE __LC_SAVE_AREA+16
|
||||||
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
|
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
|
||||||
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
|
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
bz BASED(ext_no_vtime)
|
bz BASED(ext_no_vtime)
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
ext_no_vtime:
|
ext_no_vtime:
|
||||||
#endif
|
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||||
|
@ -776,7 +756,6 @@ mcck_int_handler:
|
||||||
la %r12,__LC_MCK_OLD_PSW
|
la %r12,__LC_MCK_OLD_PSW
|
||||||
tm __LC_MCCK_CODE,0x80 # system damage?
|
tm __LC_MCCK_CODE,0x80 # system damage?
|
||||||
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
|
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
|
||||||
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
|
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
|
||||||
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
|
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
|
||||||
|
@ -793,9 +772,7 @@ mcck_int_handler:
|
||||||
la %r14,__LC_LAST_UPDATE_TIMER
|
la %r14,__LC_LAST_UPDATE_TIMER
|
||||||
0: spt 0(%r14)
|
0: spt 0(%r14)
|
||||||
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
|
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
|
||||||
1:
|
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
|
||||||
#endif
|
|
||||||
tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
|
|
||||||
bno BASED(mcck_int_main) # no -> skip cleanup critical
|
bno BASED(mcck_int_main) # no -> skip cleanup critical
|
||||||
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
|
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
|
||||||
bnz BASED(mcck_int_main) # from user -> load async stack
|
bnz BASED(mcck_int_main) # from user -> load async stack
|
||||||
|
@ -812,7 +789,6 @@ mcck_int_main:
|
||||||
be BASED(0f)
|
be BASED(0f)
|
||||||
l %r15,__LC_PANIC_STACK # load panic stack
|
l %r15,__LC_PANIC_STACK # load panic stack
|
||||||
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
|
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
|
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
|
||||||
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
|
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
|
@ -821,7 +797,6 @@ mcck_int_main:
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
mcck_no_vtime:
|
mcck_no_vtime:
|
||||||
#endif
|
|
||||||
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||||
l %r1,BASED(.Ls390_mcck)
|
l %r1,BASED(.Ls390_mcck)
|
||||||
|
@ -843,16 +818,13 @@ mcck_no_vtime:
|
||||||
mcck_return:
|
mcck_return:
|
||||||
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
|
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
|
||||||
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
|
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
|
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
|
||||||
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
|
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
|
||||||
bno BASED(0f)
|
bno BASED(0f)
|
||||||
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
|
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
|
||||||
stpt __LC_EXIT_TIMER
|
stpt __LC_EXIT_TIMER
|
||||||
lpsw __LC_RETURN_MCCK_PSW # back to caller
|
lpsw __LC_RETURN_MCCK_PSW # back to caller
|
||||||
0:
|
0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
|
||||||
#endif
|
|
||||||
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
|
|
||||||
lpsw __LC_RETURN_MCCK_PSW # back to caller
|
lpsw __LC_RETURN_MCCK_PSW # back to caller
|
||||||
|
|
||||||
RESTORE_ALL __LC_RETURN_MCCK_PSW,0
|
RESTORE_ALL __LC_RETURN_MCCK_PSW,0
|
||||||
|
@ -976,13 +948,11 @@ cleanup_system_call:
|
||||||
b BASED(1f)
|
b BASED(1f)
|
||||||
0: la %r12,__LC_SAVE_AREA+32
|
0: la %r12,__LC_SAVE_AREA+32
|
||||||
1:
|
1:
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
|
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
|
||||||
bh BASED(0f)
|
bh BASED(0f)
|
||||||
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
|
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
|
||||||
bhe BASED(cleanup_vtime)
|
bhe BASED(cleanup_vtime)
|
||||||
#endif
|
|
||||||
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
|
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
|
||||||
bh BASED(0f)
|
bh BASED(0f)
|
||||||
mvc __LC_SAVE_AREA(16),0(%r12)
|
mvc __LC_SAVE_AREA(16),0(%r12)
|
||||||
|
@ -993,7 +963,6 @@ cleanup_system_call:
|
||||||
l %r12,__LC_SAVE_AREA+48 # argh
|
l %r12,__LC_SAVE_AREA+48 # argh
|
||||||
st %r15,12(%r12)
|
st %r15,12(%r12)
|
||||||
lh %r7,0x8a
|
lh %r7,0x8a
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
cleanup_vtime:
|
cleanup_vtime:
|
||||||
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
|
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
|
||||||
bhe BASED(cleanup_stime)
|
bhe BASED(cleanup_stime)
|
||||||
|
@ -1004,18 +973,15 @@ cleanup_stime:
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
cleanup_update:
|
cleanup_update:
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
#endif
|
|
||||||
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
|
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
|
||||||
la %r12,__LC_RETURN_PSW
|
la %r12,__LC_RETURN_PSW
|
||||||
br %r14
|
br %r14
|
||||||
cleanup_system_call_insn:
|
cleanup_system_call_insn:
|
||||||
.long sysc_saveall + 0x80000000
|
.long sysc_saveall + 0x80000000
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.long system_call + 0x80000000
|
.long system_call + 0x80000000
|
||||||
.long sysc_vtime + 0x80000000
|
.long sysc_vtime + 0x80000000
|
||||||
.long sysc_stime + 0x80000000
|
.long sysc_stime + 0x80000000
|
||||||
.long sysc_update + 0x80000000
|
.long sysc_update + 0x80000000
|
||||||
#endif
|
|
||||||
|
|
||||||
cleanup_sysc_return:
|
cleanup_sysc_return:
|
||||||
mvc __LC_RETURN_PSW(4),0(%r12)
|
mvc __LC_RETURN_PSW(4),0(%r12)
|
||||||
|
@ -1026,11 +992,9 @@ cleanup_sysc_return:
|
||||||
cleanup_sysc_leave:
|
cleanup_sysc_leave:
|
||||||
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
|
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
|
||||||
be BASED(2f)
|
be BASED(2f)
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
|
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
|
||||||
be BASED(2f)
|
be BASED(2f)
|
||||||
#endif
|
|
||||||
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
|
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
|
||||||
c %r12,BASED(.Lmck_old_psw)
|
c %r12,BASED(.Lmck_old_psw)
|
||||||
bne BASED(0f)
|
bne BASED(0f)
|
||||||
|
@ -1043,9 +1007,7 @@ cleanup_sysc_leave:
|
||||||
br %r14
|
br %r14
|
||||||
cleanup_sysc_leave_insn:
|
cleanup_sysc_leave_insn:
|
||||||
.long sysc_done - 4 + 0x80000000
|
.long sysc_done - 4 + 0x80000000
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.long sysc_done - 8 + 0x80000000
|
.long sysc_done - 8 + 0x80000000
|
||||||
#endif
|
|
||||||
|
|
||||||
cleanup_io_return:
|
cleanup_io_return:
|
||||||
mvc __LC_RETURN_PSW(4),0(%r12)
|
mvc __LC_RETURN_PSW(4),0(%r12)
|
||||||
|
@ -1056,11 +1018,9 @@ cleanup_io_return:
|
||||||
cleanup_io_leave:
|
cleanup_io_leave:
|
||||||
clc 4(4,%r12),BASED(cleanup_io_leave_insn)
|
clc 4(4,%r12),BASED(cleanup_io_leave_insn)
|
||||||
be BASED(2f)
|
be BASED(2f)
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
|
clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
|
||||||
be BASED(2f)
|
be BASED(2f)
|
||||||
#endif
|
|
||||||
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
|
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
|
||||||
c %r12,BASED(.Lmck_old_psw)
|
c %r12,BASED(.Lmck_old_psw)
|
||||||
bne BASED(0f)
|
bne BASED(0f)
|
||||||
|
@ -1073,9 +1033,7 @@ cleanup_io_leave:
|
||||||
br %r14
|
br %r14
|
||||||
cleanup_io_leave_insn:
|
cleanup_io_leave_insn:
|
||||||
.long io_done - 4 + 0x80000000
|
.long io_done - 4 + 0x80000000
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.long io_done - 8 + 0x80000000
|
.long io_done - 8 + 0x80000000
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Integer constants
|
* Integer constants
|
||||||
|
|
|
@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||||
#define LOCKDEP_SYS_EXIT
|
#define LOCKDEP_SYS_EXIT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.macro STORE_TIMER lc_offset
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
stpt \lc_offset
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
|
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
|
||||||
lg %r10,\lc_from
|
lg %r10,\lc_from
|
||||||
slg %r10,\lc_to
|
slg %r10,\lc_to
|
||||||
alg %r10,\lc_sum
|
alg %r10,\lc_sum
|
||||||
stg %r10,\lc_sum
|
stg %r10,\lc_sum
|
||||||
.endm
|
.endm
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register usage in interrupt handlers:
|
* Register usage in interrupt handlers:
|
||||||
|
@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
|
||||||
ni \psworg+1,0xfd # clear wait state bit
|
ni \psworg+1,0xfd # clear wait state bit
|
||||||
.endif
|
.endif
|
||||||
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
|
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
|
||||||
STORE_TIMER __LC_EXIT_TIMER
|
stpt __LC_EXIT_TIMER
|
||||||
lpswe \psworg # back to caller
|
lpswe \psworg # back to caller
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -233,20 +225,18 @@ __critical_start:
|
||||||
|
|
||||||
.globl system_call
|
.globl system_call
|
||||||
system_call:
|
system_call:
|
||||||
STORE_TIMER __LC_SYNC_ENTER_TIMER
|
stpt __LC_SYNC_ENTER_TIMER
|
||||||
sysc_saveall:
|
sysc_saveall:
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA
|
SAVE_ALL_BASE __LC_SAVE_AREA
|
||||||
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
|
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
sysc_vtime:
|
sysc_vtime:
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
sysc_stime:
|
sysc_stime:
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
sysc_update:
|
sysc_update:
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
#endif
|
|
||||||
sysc_do_svc:
|
sysc_do_svc:
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
ltgr %r7,%r7 # test for svc 0
|
ltgr %r7,%r7 # test for svc 0
|
||||||
|
@ -417,7 +407,7 @@ ret_from_fork:
|
||||||
0: brasl %r14,schedule_tail
|
0: brasl %r14,schedule_tail
|
||||||
TRACE_IRQS_ON
|
TRACE_IRQS_ON
|
||||||
stosm 24(%r15),0x03 # reenable interrupts
|
stosm 24(%r15),0x03 # reenable interrupts
|
||||||
j sysc_return
|
j sysc_tracenogo
|
||||||
|
|
||||||
#
|
#
|
||||||
# kernel_execve function needs to deal with pt_regs that is not
|
# kernel_execve function needs to deal with pt_regs that is not
|
||||||
|
@ -469,20 +459,18 @@ pgm_check_handler:
|
||||||
* we just ignore the PER event (FIXME: is there anything we have to do
|
* we just ignore the PER event (FIXME: is there anything we have to do
|
||||||
* for LPSW?).
|
* for LPSW?).
|
||||||
*/
|
*/
|
||||||
STORE_TIMER __LC_SYNC_ENTER_TIMER
|
stpt __LC_SYNC_ENTER_TIMER
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA
|
SAVE_ALL_BASE __LC_SAVE_AREA
|
||||||
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
|
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
|
||||||
jnz pgm_per # got per exception -> special case
|
jnz pgm_per # got per exception -> special case
|
||||||
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
jz pgm_no_vtime
|
jz pgm_no_vtime
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
pgm_no_vtime:
|
pgm_no_vtime:
|
||||||
#endif
|
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
|
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
|
@ -516,14 +504,12 @@ pgm_per:
|
||||||
pgm_per_std:
|
pgm_per_std:
|
||||||
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
jz pgm_no_vtime2
|
jz pgm_no_vtime2
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
pgm_no_vtime2:
|
pgm_no_vtime2:
|
||||||
#endif
|
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
lg %r1,__TI_task(%r9)
|
lg %r1,__TI_task(%r9)
|
||||||
|
@ -545,11 +531,9 @@ pgm_no_vtime2:
|
||||||
pgm_svcper:
|
pgm_svcper:
|
||||||
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
#endif
|
|
||||||
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
|
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
lg %r1,__TI_task(%r9)
|
lg %r1,__TI_task(%r9)
|
||||||
|
@ -575,19 +559,17 @@ kernel_per:
|
||||||
*/
|
*/
|
||||||
.globl io_int_handler
|
.globl io_int_handler
|
||||||
io_int_handler:
|
io_int_handler:
|
||||||
STORE_TIMER __LC_ASYNC_ENTER_TIMER
|
stpt __LC_ASYNC_ENTER_TIMER
|
||||||
stck __LC_INT_CLOCK
|
stck __LC_INT_CLOCK
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA+32
|
SAVE_ALL_BASE __LC_SAVE_AREA+32
|
||||||
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
|
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
|
||||||
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
|
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
jz io_no_vtime
|
jz io_no_vtime
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
io_no_vtime:
|
io_no_vtime:
|
||||||
#endif
|
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||||
|
@ -739,19 +721,17 @@ io_notify_resume:
|
||||||
*/
|
*/
|
||||||
.globl ext_int_handler
|
.globl ext_int_handler
|
||||||
ext_int_handler:
|
ext_int_handler:
|
||||||
STORE_TIMER __LC_ASYNC_ENTER_TIMER
|
stpt __LC_ASYNC_ENTER_TIMER
|
||||||
stck __LC_INT_CLOCK
|
stck __LC_INT_CLOCK
|
||||||
SAVE_ALL_BASE __LC_SAVE_AREA+32
|
SAVE_ALL_BASE __LC_SAVE_AREA+32
|
||||||
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
|
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
|
||||||
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
|
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
jz ext_no_vtime
|
jz ext_no_vtime
|
||||||
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
ext_no_vtime:
|
ext_no_vtime:
|
||||||
#endif
|
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
la %r2,SP_PTREGS(%r15) # address of register-save area
|
la %r2,SP_PTREGS(%r15) # address of register-save area
|
||||||
|
@ -773,7 +753,6 @@ mcck_int_handler:
|
||||||
la %r12,__LC_MCK_OLD_PSW
|
la %r12,__LC_MCK_OLD_PSW
|
||||||
tm __LC_MCCK_CODE,0x80 # system damage?
|
tm __LC_MCCK_CODE,0x80 # system damage?
|
||||||
jo mcck_int_main # yes -> rest of mcck code invalid
|
jo mcck_int_main # yes -> rest of mcck code invalid
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
la %r14,4095
|
la %r14,4095
|
||||||
mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
|
||||||
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
|
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
|
||||||
|
@ -791,9 +770,7 @@ mcck_int_handler:
|
||||||
la %r14,__LC_LAST_UPDATE_TIMER
|
la %r14,__LC_LAST_UPDATE_TIMER
|
||||||
0: spt 0(%r14)
|
0: spt 0(%r14)
|
||||||
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
|
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
|
||||||
1:
|
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
|
||||||
#endif
|
|
||||||
tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
|
|
||||||
jno mcck_int_main # no -> skip cleanup critical
|
jno mcck_int_main # no -> skip cleanup critical
|
||||||
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
|
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
|
||||||
jnz mcck_int_main # from user -> load kernel stack
|
jnz mcck_int_main # from user -> load kernel stack
|
||||||
|
@ -809,7 +786,6 @@ mcck_int_main:
|
||||||
jz 0f
|
jz 0f
|
||||||
lg %r15,__LC_PANIC_STACK # load panic stack
|
lg %r15,__LC_PANIC_STACK # load panic stack
|
||||||
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
|
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
|
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
|
||||||
jno mcck_no_vtime # no -> no timer update
|
jno mcck_no_vtime # no -> no timer update
|
||||||
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
|
||||||
|
@ -818,7 +794,6 @@ mcck_int_main:
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
mcck_no_vtime:
|
mcck_no_vtime:
|
||||||
#endif
|
|
||||||
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
|
||||||
la %r2,SP_PTREGS(%r15) # load pt_regs
|
la %r2,SP_PTREGS(%r15) # load pt_regs
|
||||||
brasl %r14,s390_do_machine_check
|
brasl %r14,s390_do_machine_check
|
||||||
|
@ -839,14 +814,11 @@ mcck_return:
|
||||||
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
|
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
|
||||||
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
|
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
|
||||||
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
|
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
|
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
|
||||||
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
|
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
|
||||||
jno 0f
|
jno 0f
|
||||||
stpt __LC_EXIT_TIMER
|
stpt __LC_EXIT_TIMER
|
||||||
0:
|
0: lpswe __LC_RETURN_MCCK_PSW # back to caller
|
||||||
#endif
|
|
||||||
lpswe __LC_RETURN_MCCK_PSW # back to caller
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restart interruption handler, kick starter for additional CPUs
|
* Restart interruption handler, kick starter for additional CPUs
|
||||||
|
@ -964,13 +936,11 @@ cleanup_system_call:
|
||||||
j 1f
|
j 1f
|
||||||
0: la %r12,__LC_SAVE_AREA+64
|
0: la %r12,__LC_SAVE_AREA+64
|
||||||
1:
|
1:
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
|
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
|
||||||
jh 0f
|
jh 0f
|
||||||
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
|
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
|
||||||
jhe cleanup_vtime
|
jhe cleanup_vtime
|
||||||
#endif
|
|
||||||
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
|
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
|
||||||
jh 0f
|
jh 0f
|
||||||
mvc __LC_SAVE_AREA(32),0(%r12)
|
mvc __LC_SAVE_AREA(32),0(%r12)
|
||||||
|
@ -981,7 +951,6 @@ cleanup_system_call:
|
||||||
lg %r12,__LC_SAVE_AREA+96 # argh
|
lg %r12,__LC_SAVE_AREA+96 # argh
|
||||||
stg %r15,24(%r12)
|
stg %r15,24(%r12)
|
||||||
llgh %r7,__LC_SVC_INT_CODE
|
llgh %r7,__LC_SVC_INT_CODE
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
cleanup_vtime:
|
cleanup_vtime:
|
||||||
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
|
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
|
||||||
jhe cleanup_stime
|
jhe cleanup_stime
|
||||||
|
@ -992,18 +961,15 @@ cleanup_stime:
|
||||||
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
|
||||||
cleanup_update:
|
cleanup_update:
|
||||||
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
|
||||||
#endif
|
|
||||||
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
|
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
|
||||||
la %r12,__LC_RETURN_PSW
|
la %r12,__LC_RETURN_PSW
|
||||||
br %r14
|
br %r14
|
||||||
cleanup_system_call_insn:
|
cleanup_system_call_insn:
|
||||||
.quad sysc_saveall
|
.quad sysc_saveall
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.quad system_call
|
.quad system_call
|
||||||
.quad sysc_vtime
|
.quad sysc_vtime
|
||||||
.quad sysc_stime
|
.quad sysc_stime
|
||||||
.quad sysc_update
|
.quad sysc_update
|
||||||
#endif
|
|
||||||
|
|
||||||
cleanup_sysc_return:
|
cleanup_sysc_return:
|
||||||
mvc __LC_RETURN_PSW(8),0(%r12)
|
mvc __LC_RETURN_PSW(8),0(%r12)
|
||||||
|
@ -1014,11 +980,9 @@ cleanup_sysc_return:
|
||||||
cleanup_sysc_leave:
|
cleanup_sysc_leave:
|
||||||
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
|
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
|
||||||
je 2f
|
je 2f
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
|
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
|
||||||
je 2f
|
je 2f
|
||||||
#endif
|
|
||||||
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
|
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
|
||||||
cghi %r12,__LC_MCK_OLD_PSW
|
cghi %r12,__LC_MCK_OLD_PSW
|
||||||
jne 0f
|
jne 0f
|
||||||
|
@ -1031,9 +995,7 @@ cleanup_sysc_leave:
|
||||||
br %r14
|
br %r14
|
||||||
cleanup_sysc_leave_insn:
|
cleanup_sysc_leave_insn:
|
||||||
.quad sysc_done - 4
|
.quad sysc_done - 4
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.quad sysc_done - 8
|
.quad sysc_done - 8
|
||||||
#endif
|
|
||||||
|
|
||||||
cleanup_io_return:
|
cleanup_io_return:
|
||||||
mvc __LC_RETURN_PSW(8),0(%r12)
|
mvc __LC_RETURN_PSW(8),0(%r12)
|
||||||
|
@ -1044,11 +1006,9 @@ cleanup_io_return:
|
||||||
cleanup_io_leave:
|
cleanup_io_leave:
|
||||||
clc 8(8,%r12),BASED(cleanup_io_leave_insn)
|
clc 8(8,%r12),BASED(cleanup_io_leave_insn)
|
||||||
je 2f
|
je 2f
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||||
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
|
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
|
||||||
je 2f
|
je 2f
|
||||||
#endif
|
|
||||||
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
|
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
|
||||||
cghi %r12,__LC_MCK_OLD_PSW
|
cghi %r12,__LC_MCK_OLD_PSW
|
||||||
jne 0f
|
jne 0f
|
||||||
|
@ -1061,9 +1021,7 @@ cleanup_io_leave:
|
||||||
br %r14
|
br %r14
|
||||||
cleanup_io_leave_insn:
|
cleanup_io_leave_insn:
|
||||||
.quad io_done - 4
|
.quad io_done - 4
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
.quad io_done - 8
|
.quad io_done - 8
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Integer constants
|
* Integer constants
|
||||||
|
|
|
@ -461,6 +461,55 @@ start:
|
||||||
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
|
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
|
||||||
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
|
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
|
||||||
|
|
||||||
|
#
|
||||||
|
# startup-code at 0x10000, running in absolute addressing mode
|
||||||
|
# this is called either by the ipl loader or directly by PSW restart
|
||||||
|
# or linload or SALIPL
|
||||||
|
#
|
||||||
|
.org 0x10000
|
||||||
|
startup:basr %r13,0 # get base
|
||||||
|
.LPG0:
|
||||||
|
|
||||||
|
#ifndef CONFIG_MARCH_G5
|
||||||
|
# check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10}
|
||||||
|
stidp __LC_CPUID # store cpuid
|
||||||
|
lhi %r0,(3f-2f) / 2
|
||||||
|
la %r1,2f-.LPG0(%r13)
|
||||||
|
0: clc __LC_CPUID+4(2),0(%r1)
|
||||||
|
jne 3f
|
||||||
|
lpsw 1f-.LPG0(13) # machine type not good enough, crash
|
||||||
|
.align 16
|
||||||
|
1: .long 0x000a0000,0x00000000
|
||||||
|
2:
|
||||||
|
#if defined(CONFIG_MARCH_Z10)
|
||||||
|
.short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096
|
||||||
|
#elif defined(CONFIG_MARCH_Z9_109)
|
||||||
|
.short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086
|
||||||
|
#elif defined(CONFIG_MARCH_Z990)
|
||||||
|
.short 0x9672, 0x2064, 0x2066
|
||||||
|
#elif defined(CONFIG_MARCH_Z900)
|
||||||
|
.short 0x9672
|
||||||
|
#endif
|
||||||
|
3: la %r1,2(%r1)
|
||||||
|
brct %r0,0b
|
||||||
|
#endif
|
||||||
|
|
||||||
|
l %r13,0f-.LPG0(%r13)
|
||||||
|
b 0(%r13)
|
||||||
|
0: .long startup_continue
|
||||||
|
|
||||||
|
#
|
||||||
|
# params at 10400 (setup.h)
|
||||||
|
#
|
||||||
|
.org PARMAREA
|
||||||
|
.long 0,0 # IPL_DEVICE
|
||||||
|
.long 0,0 # INITRD_START
|
||||||
|
.long 0,0 # INITRD_SIZE
|
||||||
|
|
||||||
|
.org COMMAND_LINE
|
||||||
|
.byte "root=/dev/ram0 ro"
|
||||||
|
.byte 0
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
#include "head64.S"
|
#include "head64.S"
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -10,34 +10,13 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#
|
|
||||||
# startup-code at 0x10000, running in absolute addressing mode
|
|
||||||
# this is called either by the ipl loader or directly by PSW restart
|
|
||||||
# or linload or SALIPL
|
|
||||||
#
|
|
||||||
.org 0x10000
|
|
||||||
startup:basr %r13,0 # get base
|
|
||||||
.LPG0: l %r13,0f-.LPG0(%r13)
|
|
||||||
b 0(%r13)
|
|
||||||
0: .long startup_continue
|
|
||||||
|
|
||||||
#
|
|
||||||
# params at 10400 (setup.h)
|
|
||||||
#
|
|
||||||
.org PARMAREA
|
|
||||||
.long 0,0 # IPL_DEVICE
|
|
||||||
.long 0,0 # INITRD_START
|
|
||||||
.long 0,0 # INITRD_SIZE
|
|
||||||
|
|
||||||
.org COMMAND_LINE
|
|
||||||
.byte "root=/dev/ram0 ro"
|
|
||||||
.byte 0
|
|
||||||
|
|
||||||
.org 0x11000
|
.org 0x11000
|
||||||
|
|
||||||
startup_continue:
|
startup_continue:
|
||||||
basr %r13,0 # get base
|
basr %r13,0 # get base
|
||||||
.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
|
.LPG1:
|
||||||
|
|
||||||
|
mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
|
||||||
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
|
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
|
||||||
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
|
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
|
||||||
# move IPL device to lowcore
|
# move IPL device to lowcore
|
||||||
|
@ -50,7 +29,6 @@ startup_continue:
|
||||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
|
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
|
||||||
st %r15,__LC_KERNEL_STACK # set end of kernel stack
|
st %r15,__LC_KERNEL_STACK # set end of kernel stack
|
||||||
ahi %r15,-96
|
ahi %r15,-96
|
||||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
|
|
||||||
#
|
#
|
||||||
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
|
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
|
||||||
# and create a kernel NSS if the SAVESYS= parm is defined
|
# and create a kernel NSS if the SAVESYS= parm is defined
|
||||||
|
|
|
@ -10,29 +10,6 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#
|
|
||||||
# startup-code at 0x10000, running in absolute addressing mode
|
|
||||||
# this is called either by the ipl loader or directly by PSW restart
|
|
||||||
# or linload or SALIPL
|
|
||||||
#
|
|
||||||
.org 0x10000
|
|
||||||
startup:basr %r13,0 # get base
|
|
||||||
.LPG0: l %r13,0f-.LPG0(%r13)
|
|
||||||
b 0(%r13)
|
|
||||||
0: .long startup_continue
|
|
||||||
|
|
||||||
#
|
|
||||||
# params at 10400 (setup.h)
|
|
||||||
#
|
|
||||||
.org PARMAREA
|
|
||||||
.quad 0 # IPL_DEVICE
|
|
||||||
.quad 0 # INITRD_START
|
|
||||||
.quad 0 # INITRD_SIZE
|
|
||||||
|
|
||||||
.org COMMAND_LINE
|
|
||||||
.byte "root=/dev/ram0 ro"
|
|
||||||
.byte 0
|
|
||||||
|
|
||||||
.org 0x11000
|
.org 0x11000
|
||||||
|
|
||||||
startup_continue:
|
startup_continue:
|
||||||
|
@ -119,7 +96,6 @@ startup_continue:
|
||||||
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
|
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
|
||||||
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
|
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
|
||||||
aghi %r15,-160
|
aghi %r15,-160
|
||||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
|
|
||||||
#
|
#
|
||||||
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
|
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
|
||||||
# and create a kernel NSS if the SAVESYS= parm is defined
|
# and create a kernel NSS if the SAVESYS= parm is defined
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
/*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
*
|
||||||
|
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
.globl _mcount
|
||||||
|
_mcount:
|
||||||
|
stm %r0,%r5,8(%r15)
|
||||||
|
st %r14,56(%r15)
|
||||||
|
lr %r1,%r15
|
||||||
|
ahi %r15,-96
|
||||||
|
l %r3,100(%r15)
|
||||||
|
la %r2,0(%r14)
|
||||||
|
st %r1,0(%r15)
|
||||||
|
la %r3,0(%r3)
|
||||||
|
bras %r14,0f
|
||||||
|
.long ftrace_trace_function
|
||||||
|
0: l %r14,0(%r14)
|
||||||
|
l %r14,0(%r14)
|
||||||
|
basr %r14,%r14
|
||||||
|
ahi %r15,96
|
||||||
|
lm %r0,%r5,8(%r15)
|
||||||
|
l %r14,56(%r15)
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
.globl ftrace_stub
|
||||||
|
ftrace_stub:
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
#else /* CONFIG_64BIT */
|
||||||
|
|
||||||
|
.globl _mcount
|
||||||
|
_mcount:
|
||||||
|
stmg %r0,%r5,16(%r15)
|
||||||
|
stg %r14,112(%r15)
|
||||||
|
lgr %r1,%r15
|
||||||
|
aghi %r15,-160
|
||||||
|
stg %r1,0(%r15)
|
||||||
|
lgr %r2,%r14
|
||||||
|
lg %r3,168(%r15)
|
||||||
|
larl %r14,ftrace_trace_function
|
||||||
|
lg %r14,0(%r14)
|
||||||
|
basr %r14,%r14
|
||||||
|
aghi %r15,160
|
||||||
|
lmg %r0,%r5,16(%r15)
|
||||||
|
lg %r14,112(%r15)
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
.globl ftrace_stub
|
||||||
|
ftrace_stub:
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
#endif /* CONFIG_64BIT */
|
|
@ -0,0 +1,98 @@
|
||||||
|
/*
|
||||||
|
* arch/s390/kernel/processor.c
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cpu"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/seq_file.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
|
||||||
|
#include <asm/elf.h>
|
||||||
|
#include <asm/lowcore.h>
|
||||||
|
#include <asm/param.h>
|
||||||
|
|
||||||
|
void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
|
||||||
|
{
|
||||||
|
pr_info("Processor %d started, address %d, identification %06X\n",
|
||||||
|
cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* show_cpuinfo - Get information on one CPU for use by procfs.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
|
{
|
||||||
|
static const char *hwcap_str[8] = {
|
||||||
|
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
|
||||||
|
"edat"
|
||||||
|
};
|
||||||
|
struct cpuinfo_S390 *cpuinfo;
|
||||||
|
unsigned long n = (unsigned long) v - 1;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
s390_adjust_jiffies();
|
||||||
|
preempt_disable();
|
||||||
|
if (!n) {
|
||||||
|
seq_printf(m, "vendor_id : IBM/S390\n"
|
||||||
|
"# processors : %i\n"
|
||||||
|
"bogomips per cpu: %lu.%02lu\n",
|
||||||
|
num_online_cpus(), loops_per_jiffy/(500000/HZ),
|
||||||
|
(loops_per_jiffy/(5000/HZ))%100);
|
||||||
|
seq_puts(m, "features\t: ");
|
||||||
|
for (i = 0; i < 8; i++)
|
||||||
|
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
|
||||||
|
seq_printf(m, "%s ", hwcap_str[i]);
|
||||||
|
seq_puts(m, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpu_online(n)) {
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
if (smp_processor_id() == n)
|
||||||
|
cpuinfo = &S390_lowcore.cpu_data;
|
||||||
|
else
|
||||||
|
cpuinfo = &lowcore_ptr[n]->cpu_data;
|
||||||
|
#else
|
||||||
|
cpuinfo = &S390_lowcore.cpu_data;
|
||||||
|
#endif
|
||||||
|
seq_printf(m, "processor %li: "
|
||||||
|
"version = %02X, "
|
||||||
|
"identification = %06X, "
|
||||||
|
"machine = %04X\n",
|
||||||
|
n, cpuinfo->cpu_id.version,
|
||||||
|
cpuinfo->cpu_id.ident,
|
||||||
|
cpuinfo->cpu_id.machine);
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||||
|
{
|
||||||
|
return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||||
|
{
|
||||||
|
++*pos;
|
||||||
|
return c_start(m, pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void c_stop(struct seq_file *m, void *v)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct seq_operations cpuinfo_op = {
|
||||||
|
.start = c_start,
|
||||||
|
.next = c_next,
|
||||||
|
.stop = c_stop,
|
||||||
|
.show = show_cpuinfo,
|
||||||
|
};
|
||||||
|
|
|
@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
|
||||||
static int
|
static int
|
||||||
peek_user(struct task_struct *child, addr_t addr, addr_t data)
|
peek_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||||
{
|
{
|
||||||
struct user *dummy = NULL;
|
|
||||||
addr_t tmp, mask;
|
addr_t tmp, mask;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||||
*/
|
*/
|
||||||
mask = __ADDR_MASK;
|
mask = __ADDR_MASK;
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
if (addr >= (addr_t) &dummy->regs.acrs &&
|
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
|
||||||
addr < (addr_t) &dummy->regs.orig_gpr2)
|
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
|
||||||
mask = 3;
|
mask = 3;
|
||||||
#endif
|
#endif
|
||||||
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
|
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
|
||||||
|
@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||||
static int
|
static int
|
||||||
poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||||
{
|
{
|
||||||
struct user *dummy = NULL;
|
|
||||||
addr_t mask;
|
addr_t mask;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||||
*/
|
*/
|
||||||
mask = __ADDR_MASK;
|
mask = __ADDR_MASK;
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
if (addr >= (addr_t) &dummy->regs.acrs &&
|
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
|
||||||
addr < (addr_t) &dummy->regs.orig_gpr2)
|
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
|
||||||
mask = 3;
|
mask = 3;
|
||||||
#endif
|
#endif
|
||||||
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
|
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
#include <asm/ftrace.h>
|
||||||
#ifdef CONFIG_IP_MULTICAST
|
#ifdef CONFIG_IP_MULTICAST
|
||||||
#include <net/arp.h>
|
#include <net/arp.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold);
|
||||||
EXPORT_SYMBOL(console_mode);
|
EXPORT_SYMBOL(console_mode);
|
||||||
EXPORT_SYMBOL(console_devno);
|
EXPORT_SYMBOL(console_devno);
|
||||||
EXPORT_SYMBOL(console_irq);
|
EXPORT_SYMBOL(console_irq);
|
||||||
|
|
||||||
|
#ifdef CONFIG_FUNCTION_TRACER
|
||||||
|
EXPORT_SYMBOL(_mcount);
|
||||||
|
#endif
|
||||||
|
|
|
@ -14,6 +14,9 @@
|
||||||
* This file handles the architecture-dependent parts of initialization
|
* This file handles the architecture-dependent parts of initialization
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "setup"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
@ -32,7 +35,6 @@
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/root_dev.h>
|
#include <linux/root_dev.h>
|
||||||
#include <linux/console.h>
|
#include <linux/console.h>
|
||||||
#include <linux/seq_file.h>
|
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
|
@ -291,8 +293,8 @@ unsigned int switch_amode = 0;
|
||||||
#endif
|
#endif
|
||||||
EXPORT_SYMBOL_GPL(switch_amode);
|
EXPORT_SYMBOL_GPL(switch_amode);
|
||||||
|
|
||||||
static void set_amode_and_uaccess(unsigned long user_amode,
|
static int set_amode_and_uaccess(unsigned long user_amode,
|
||||||
unsigned long user32_amode)
|
unsigned long user32_amode)
|
||||||
{
|
{
|
||||||
psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
|
psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
|
||||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
|
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
|
||||||
|
@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode,
|
||||||
PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
|
PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
|
||||||
|
|
||||||
if (MACHINE_HAS_MVCOS) {
|
if (MACHINE_HAS_MVCOS) {
|
||||||
printk("mvcos available.\n");
|
|
||||||
memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
|
memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
|
||||||
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
printk("mvcos not available.\n");
|
|
||||||
memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
|
memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p)
|
||||||
early_param("switch_amode", early_parse_switch_amode);
|
early_param("switch_amode", early_parse_switch_amode);
|
||||||
|
|
||||||
#else /* CONFIG_S390_SWITCH_AMODE */
|
#else /* CONFIG_S390_SWITCH_AMODE */
|
||||||
static inline void set_amode_and_uaccess(unsigned long user_amode,
|
static inline int set_amode_and_uaccess(unsigned long user_amode,
|
||||||
unsigned long user32_amode)
|
unsigned long user32_amode)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_S390_SWITCH_AMODE */
|
#endif /* CONFIG_S390_SWITCH_AMODE */
|
||||||
|
|
||||||
|
@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec);
|
||||||
static void setup_addressing_mode(void)
|
static void setup_addressing_mode(void)
|
||||||
{
|
{
|
||||||
if (s390_noexec) {
|
if (s390_noexec) {
|
||||||
printk("S390 execute protection active, ");
|
if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
|
||||||
set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
|
PSW32_ASC_SECONDARY))
|
||||||
|
pr_info("Execute protection active, "
|
||||||
|
"mvcos available\n");
|
||||||
|
else
|
||||||
|
pr_info("Execute protection active, "
|
||||||
|
"mvcos not available\n");
|
||||||
} else if (switch_amode) {
|
} else if (switch_amode) {
|
||||||
printk("S390 address spaces switched, ");
|
if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
|
||||||
set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
|
pr_info("Address spaces switched, "
|
||||||
|
"mvcos available\n");
|
||||||
|
else
|
||||||
|
pr_info("Address spaces switched, "
|
||||||
|
"mvcos not available\n");
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
|
sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
|
||||||
|
@ -572,15 +584,15 @@ setup_memory(void)
|
||||||
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
|
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
|
||||||
|
|
||||||
if (start + INITRD_SIZE > memory_end) {
|
if (start + INITRD_SIZE > memory_end) {
|
||||||
printk("initrd extends beyond end of memory "
|
pr_err("initrd extends beyond end of "
|
||||||
"(0x%08lx > 0x%08lx)\n"
|
"memory (0x%08lx > 0x%08lx) "
|
||||||
"disabling initrd\n",
|
"disabling initrd\n",
|
||||||
start + INITRD_SIZE, memory_end);
|
start + INITRD_SIZE, memory_end);
|
||||||
INITRD_START = INITRD_SIZE = 0;
|
INITRD_START = INITRD_SIZE = 0;
|
||||||
} else {
|
} else {
|
||||||
printk("Moving initrd (0x%08lx -> 0x%08lx, "
|
pr_info("Moving initrd (0x%08lx -> "
|
||||||
"size: %ld)\n",
|
"0x%08lx, size: %ld)\n",
|
||||||
INITRD_START, start, INITRD_SIZE);
|
INITRD_START, start, INITRD_SIZE);
|
||||||
memmove((void *) start, (void *) INITRD_START,
|
memmove((void *) start, (void *) INITRD_START,
|
||||||
INITRD_SIZE);
|
INITRD_SIZE);
|
||||||
INITRD_START = start;
|
INITRD_START = start;
|
||||||
|
@ -642,8 +654,9 @@ setup_memory(void)
|
||||||
initrd_start = INITRD_START;
|
initrd_start = INITRD_START;
|
||||||
initrd_end = initrd_start + INITRD_SIZE;
|
initrd_end = initrd_start + INITRD_SIZE;
|
||||||
} else {
|
} else {
|
||||||
printk("initrd extends beyond end of memory "
|
pr_err("initrd extends beyond end of "
|
||||||
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
"memory (0x%08lx > 0x%08lx) "
|
||||||
|
"disabling initrd\n",
|
||||||
initrd_start + INITRD_SIZE, memory_end);
|
initrd_start + INITRD_SIZE, memory_end);
|
||||||
initrd_start = initrd_end = 0;
|
initrd_start = initrd_end = 0;
|
||||||
}
|
}
|
||||||
|
@ -651,23 +664,6 @@ setup_memory(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init __stfle(unsigned long long *list, int doublewords)
|
|
||||||
{
|
|
||||||
typedef struct { unsigned long long _[doublewords]; } addrtype;
|
|
||||||
register unsigned long __nr asm("0") = doublewords - 1;
|
|
||||||
|
|
||||||
asm volatile(".insn s,0xb2b00000,%0" /* stfle */
|
|
||||||
: "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
|
|
||||||
return __nr + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __init stfle(unsigned long long *list, int doublewords)
|
|
||||||
{
|
|
||||||
if (!(stfl() & (1UL << 24)))
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
return __stfle(list, doublewords);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup hardware capabilities.
|
* Setup hardware capabilities.
|
||||||
*/
|
*/
|
||||||
|
@ -739,8 +735,13 @@ static void __init setup_hwcaps(void)
|
||||||
strcpy(elf_platform, "z990");
|
strcpy(elf_platform, "z990");
|
||||||
break;
|
break;
|
||||||
case 0x2094:
|
case 0x2094:
|
||||||
|
case 0x2096:
|
||||||
strcpy(elf_platform, "z9-109");
|
strcpy(elf_platform, "z9-109");
|
||||||
break;
|
break;
|
||||||
|
case 0x2097:
|
||||||
|
case 0x2098:
|
||||||
|
strcpy(elf_platform, "z10");
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -752,25 +753,34 @@ static void __init setup_hwcaps(void)
|
||||||
void __init
|
void __init
|
||||||
setup_arch(char **cmdline_p)
|
setup_arch(char **cmdline_p)
|
||||||
{
|
{
|
||||||
|
/* set up preferred console */
|
||||||
|
add_preferred_console("ttyS", 0, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* print what head.S has found out about the machine
|
* print what head.S has found out about the machine
|
||||||
*/
|
*/
|
||||||
#ifndef CONFIG_64BIT
|
#ifndef CONFIG_64BIT
|
||||||
printk((MACHINE_IS_VM) ?
|
if (MACHINE_IS_VM)
|
||||||
"We are running under VM (31 bit mode)\n" :
|
pr_info("Linux is running as a z/VM "
|
||||||
"We are running native (31 bit mode)\n");
|
"guest operating system in 31-bit mode\n");
|
||||||
printk((MACHINE_HAS_IEEE) ?
|
else
|
||||||
"This machine has an IEEE fpu\n" :
|
pr_info("Linux is running natively in 31-bit mode\n");
|
||||||
"This machine has no IEEE fpu\n");
|
if (MACHINE_HAS_IEEE)
|
||||||
|
pr_info("The hardware system has IEEE compatible "
|
||||||
|
"floating point units\n");
|
||||||
|
else
|
||||||
|
pr_info("The hardware system has no IEEE compatible "
|
||||||
|
"floating point units\n");
|
||||||
#else /* CONFIG_64BIT */
|
#else /* CONFIG_64BIT */
|
||||||
if (MACHINE_IS_VM)
|
if (MACHINE_IS_VM)
|
||||||
printk("We are running under VM (64 bit mode)\n");
|
pr_info("Linux is running as a z/VM "
|
||||||
|
"guest operating system in 64-bit mode\n");
|
||||||
else if (MACHINE_IS_KVM) {
|
else if (MACHINE_IS_KVM) {
|
||||||
printk("We are running under KVM (64 bit mode)\n");
|
pr_info("Linux is running under KVM in 64-bit mode\n");
|
||||||
add_preferred_console("hvc", 0, NULL);
|
add_preferred_console("hvc", 0, NULL);
|
||||||
s390_virtio_console_init();
|
s390_virtio_console_init();
|
||||||
} else
|
} else
|
||||||
printk("We are running native (64 bit mode)\n");
|
pr_info("Linux is running natively in 64-bit mode\n");
|
||||||
#endif /* CONFIG_64BIT */
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
/* Have one command line that is parsed and saved in /proc/cmdline */
|
/* Have one command line that is parsed and saved in /proc/cmdline */
|
||||||
|
@ -818,90 +828,3 @@ setup_arch(char **cmdline_p)
|
||||||
/* Setup zfcpdump support */
|
/* Setup zfcpdump support */
|
||||||
setup_zfcpdump(console_devno);
|
setup_zfcpdump(console_devno);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
|
|
||||||
{
|
|
||||||
printk(KERN_INFO "cpu %d "
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
"phys_idx=%d "
|
|
||||||
#endif
|
|
||||||
"vers=%02X ident=%06X machine=%04X unused=%04X\n",
|
|
||||||
cpuinfo->cpu_nr,
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
cpuinfo->cpu_addr,
|
|
||||||
#endif
|
|
||||||
cpuinfo->cpu_id.version,
|
|
||||||
cpuinfo->cpu_id.ident,
|
|
||||||
cpuinfo->cpu_id.machine,
|
|
||||||
cpuinfo->cpu_id.unused);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* show_cpuinfo - Get information on one CPU for use by procfs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
|
||||||
{
|
|
||||||
static const char *hwcap_str[8] = {
|
|
||||||
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
|
|
||||||
"edat"
|
|
||||||
};
|
|
||||||
struct cpuinfo_S390 *cpuinfo;
|
|
||||||
unsigned long n = (unsigned long) v - 1;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
s390_adjust_jiffies();
|
|
||||||
preempt_disable();
|
|
||||||
if (!n) {
|
|
||||||
seq_printf(m, "vendor_id : IBM/S390\n"
|
|
||||||
"# processors : %i\n"
|
|
||||||
"bogomips per cpu: %lu.%02lu\n",
|
|
||||||
num_online_cpus(), loops_per_jiffy/(500000/HZ),
|
|
||||||
(loops_per_jiffy/(5000/HZ))%100);
|
|
||||||
seq_puts(m, "features\t: ");
|
|
||||||
for (i = 0; i < 8; i++)
|
|
||||||
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
|
|
||||||
seq_printf(m, "%s ", hwcap_str[i]);
|
|
||||||
seq_puts(m, "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpu_online(n)) {
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (smp_processor_id() == n)
|
|
||||||
cpuinfo = &S390_lowcore.cpu_data;
|
|
||||||
else
|
|
||||||
cpuinfo = &lowcore_ptr[n]->cpu_data;
|
|
||||||
#else
|
|
||||||
cpuinfo = &S390_lowcore.cpu_data;
|
|
||||||
#endif
|
|
||||||
seq_printf(m, "processor %li: "
|
|
||||||
"version = %02X, "
|
|
||||||
"identification = %06X, "
|
|
||||||
"machine = %04X\n",
|
|
||||||
n, cpuinfo->cpu_id.version,
|
|
||||||
cpuinfo->cpu_id.ident,
|
|
||||||
cpuinfo->cpu_id.machine);
|
|
||||||
}
|
|
||||||
preempt_enable();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
|
||||||
{
|
|
||||||
return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
|
|
||||||
}
|
|
||||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
|
||||||
{
|
|
||||||
++*pos;
|
|
||||||
return c_start(m, pos);
|
|
||||||
}
|
|
||||||
static void c_stop(struct seq_file *m, void *v)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
const struct seq_operations cpuinfo_op = {
|
|
||||||
.start = c_start,
|
|
||||||
.next = c_next,
|
|
||||||
.stop = c_stop,
|
|
||||||
.show = show_cpuinfo,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,9 @@
|
||||||
* cpu_number_map in other architectures.
|
* cpu_number_map in other architectures.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cpu"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||||
|
|
||||||
static void smp_ext_bitcall(int, ec_bit_sig);
|
static void smp_ext_bitcall(int, ec_bit_sig);
|
||||||
|
|
||||||
/*
|
|
||||||
* Structure and data for __smp_call_function_map(). This is designed to
|
|
||||||
* minimise static memory requirements. It also looks cleaner.
|
|
||||||
*/
|
|
||||||
static DEFINE_SPINLOCK(call_lock);
|
|
||||||
|
|
||||||
struct call_data_struct {
|
|
||||||
void (*func) (void *info);
|
|
||||||
void *info;
|
|
||||||
cpumask_t started;
|
|
||||||
cpumask_t finished;
|
|
||||||
int wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct call_data_struct *call_data;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* 'Call function' interrupt callback
|
|
||||||
*/
|
|
||||||
static void do_call_function(void)
|
|
||||||
{
|
|
||||||
void (*func) (void *info) = call_data->func;
|
|
||||||
void *info = call_data->info;
|
|
||||||
int wait = call_data->wait;
|
|
||||||
|
|
||||||
cpu_set(smp_processor_id(), call_data->started);
|
|
||||||
(*func)(info);
|
|
||||||
if (wait)
|
|
||||||
cpu_set(smp_processor_id(), call_data->finished);;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __smp_call_function_map(void (*func) (void *info), void *info,
|
|
||||||
int wait, cpumask_t map)
|
|
||||||
{
|
|
||||||
struct call_data_struct data;
|
|
||||||
int cpu, local = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Can deadlock when interrupts are disabled or if in wrong context.
|
|
||||||
*/
|
|
||||||
WARN_ON(irqs_disabled() || in_irq());
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check for local function call. We have to have the same call order
|
|
||||||
* as in on_each_cpu() because of machine_restart_smp().
|
|
||||||
*/
|
|
||||||
if (cpu_isset(smp_processor_id(), map)) {
|
|
||||||
local = 1;
|
|
||||||
cpu_clear(smp_processor_id(), map);
|
|
||||||
}
|
|
||||||
|
|
||||||
cpus_and(map, map, cpu_online_map);
|
|
||||||
if (cpus_empty(map))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
data.func = func;
|
|
||||||
data.info = info;
|
|
||||||
data.started = CPU_MASK_NONE;
|
|
||||||
data.wait = wait;
|
|
||||||
if (wait)
|
|
||||||
data.finished = CPU_MASK_NONE;
|
|
||||||
|
|
||||||
call_data = &data;
|
|
||||||
|
|
||||||
for_each_cpu_mask(cpu, map)
|
|
||||||
smp_ext_bitcall(cpu, ec_call_function);
|
|
||||||
|
|
||||||
/* Wait for response */
|
|
||||||
while (!cpus_equal(map, data.started))
|
|
||||||
cpu_relax();
|
|
||||||
if (wait)
|
|
||||||
while (!cpus_equal(map, data.finished))
|
|
||||||
cpu_relax();
|
|
||||||
out:
|
|
||||||
if (local) {
|
|
||||||
local_irq_disable();
|
|
||||||
func(info);
|
|
||||||
local_irq_enable();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* smp_call_function:
|
|
||||||
* @func: the function to run; this must be fast and non-blocking
|
|
||||||
* @info: an arbitrary pointer to pass to the function
|
|
||||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
|
||||||
*
|
|
||||||
* Run a function on all other CPUs.
|
|
||||||
*
|
|
||||||
* You must not call this function with disabled interrupts, from a
|
|
||||||
* hardware interrupt handler or from a bottom half.
|
|
||||||
*/
|
|
||||||
int smp_call_function(void (*func) (void *info), void *info, int wait)
|
|
||||||
{
|
|
||||||
cpumask_t map;
|
|
||||||
|
|
||||||
spin_lock(&call_lock);
|
|
||||||
map = cpu_online_map;
|
|
||||||
cpu_clear(smp_processor_id(), map);
|
|
||||||
__smp_call_function_map(func, info, wait, map);
|
|
||||||
spin_unlock(&call_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(smp_call_function);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* smp_call_function_single:
|
|
||||||
* @cpu: the CPU where func should run
|
|
||||||
* @func: the function to run; this must be fast and non-blocking
|
|
||||||
* @info: an arbitrary pointer to pass to the function
|
|
||||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
|
||||||
*
|
|
||||||
* Run a function on one processor.
|
|
||||||
*
|
|
||||||
* You must not call this function with disabled interrupts, from a
|
|
||||||
* hardware interrupt handler or from a bottom half.
|
|
||||||
*/
|
|
||||||
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
||||||
int wait)
|
|
||||||
{
|
|
||||||
spin_lock(&call_lock);
|
|
||||||
__smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
|
|
||||||
spin_unlock(&call_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(smp_call_function_single);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
|
||||||
* @mask: The set of cpus to run on. Must not include the current cpu.
|
|
||||||
* @func: The function to run. This must be fast and non-blocking.
|
|
||||||
* @info: An arbitrary pointer to pass to the function.
|
|
||||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
|
||||||
*
|
|
||||||
* Returns 0 on success, else a negative status code.
|
|
||||||
*
|
|
||||||
* If @wait is true, then returns once @func has returned; otherwise
|
|
||||||
* it returns just before the target cpu calls @func.
|
|
||||||
*
|
|
||||||
* You must not call this function with disabled interrupts or from a
|
|
||||||
* hardware interrupt handler or from a bottom half handler.
|
|
||||||
*/
|
|
||||||
int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
|
|
||||||
int wait)
|
|
||||||
{
|
|
||||||
spin_lock(&call_lock);
|
|
||||||
cpu_clear(smp_processor_id(), mask);
|
|
||||||
__smp_call_function_map(func, info, wait, mask);
|
|
||||||
spin_unlock(&call_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(smp_call_function_mask);
|
|
||||||
|
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
int cpu, rc;
|
int cpu, rc;
|
||||||
|
@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code)
|
||||||
bits = xchg(&S390_lowcore.ext_call_fast, 0);
|
bits = xchg(&S390_lowcore.ext_call_fast, 0);
|
||||||
|
|
||||||
if (test_bit(ec_call_function, &bits))
|
if (test_bit(ec_call_function, &bits))
|
||||||
do_call_function();
|
generic_smp_call_function_interrupt();
|
||||||
|
|
||||||
|
if (test_bit(ec_call_function_single, &bits))
|
||||||
|
generic_smp_call_function_single_interrupt();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
|
||||||
udelay(10);
|
udelay(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void arch_send_call_function_ipi(cpumask_t mask)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_cpu_mask(cpu, mask)
|
||||||
|
smp_ext_bitcall(cpu, ec_call_function);
|
||||||
|
}
|
||||||
|
|
||||||
|
void arch_send_call_function_single_ipi(int cpu)
|
||||||
|
{
|
||||||
|
smp_ext_bitcall(cpu, ec_call_function_single);
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_64BIT
|
#ifndef CONFIG_64BIT
|
||||||
/*
|
/*
|
||||||
* this function sends a 'purge tlb' signal to another CPU.
|
* this function sends a 'purge tlb' signal to another CPU.
|
||||||
|
@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
|
||||||
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
|
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
|
||||||
return;
|
return;
|
||||||
if (cpu >= NR_CPUS) {
|
if (cpu >= NR_CPUS) {
|
||||||
printk(KERN_WARNING "Registers for cpu %i not saved since dump "
|
pr_warning("CPU %i exceeds the maximum %i and is excluded from "
|
||||||
"kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
|
"the dump\n", cpu, NR_CPUS - 1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
|
zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
|
||||||
|
@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void)
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
kfree(info);
|
kfree(info);
|
||||||
printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
|
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
__smp_rescan_cpus();
|
__smp_rescan_cpus();
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid)
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
/* Enable TOD clock interrupts on the secondary cpu. */
|
/* Enable TOD clock interrupts on the secondary cpu. */
|
||||||
init_cpu_timer();
|
init_cpu_timer();
|
||||||
#ifdef CONFIG_VIRT_TIMER
|
|
||||||
/* Enable cpu timer interrupts on the secondary cpu. */
|
/* Enable cpu timer interrupts on the secondary cpu. */
|
||||||
init_cpu_vtimer();
|
init_cpu_vtimer();
|
||||||
#endif
|
|
||||||
/* Enable pfault pseudo page faults on this cpu. */
|
/* Enable pfault pseudo page faults on this cpu. */
|
||||||
pfault_init();
|
pfault_init();
|
||||||
|
|
||||||
/* call cpu notifiers */
|
/* call cpu notifiers */
|
||||||
notify_cpu_starting(smp_processor_id());
|
notify_cpu_starting(smp_processor_id());
|
||||||
/* Mark this cpu as online */
|
/* Mark this cpu as online */
|
||||||
spin_lock(&call_lock);
|
ipi_call_lock();
|
||||||
cpu_set(smp_processor_id(), cpu_online_map);
|
cpu_set(smp_processor_id(), cpu_online_map);
|
||||||
spin_unlock(&call_lock);
|
ipi_call_unlock();
|
||||||
/* Switch on interrupts */
|
/* Switch on interrupts */
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
/* Print info about this processor */
|
/* Print info about this processor */
|
||||||
|
@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
|
||||||
|
|
||||||
save_area = get_zeroed_page(GFP_KERNEL);
|
save_area = get_zeroed_page(GFP_KERNEL);
|
||||||
if (!save_area)
|
if (!save_area)
|
||||||
goto out_save_area;
|
goto out;
|
||||||
lowcore->extended_save_area_addr = (u32) save_area;
|
lowcore->extended_save_area_addr = (u32) save_area;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
lowcore_ptr[cpu] = lowcore;
|
lowcore_ptr[cpu] = lowcore;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#ifndef CONFIG_64BIT
|
|
||||||
out_save_area:
|
|
||||||
free_page(panic_stack);
|
|
||||||
#endif
|
|
||||||
out:
|
out:
|
||||||
|
free_page(panic_stack);
|
||||||
free_pages(async_stack, ASYNC_ORDER);
|
free_pages(async_stack, ASYNC_ORDER);
|
||||||
free_pages((unsigned long) lowcore, lc_order);
|
free_pages((unsigned long) lowcore, lc_order);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
||||||
|
|
||||||
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
|
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
|
||||||
cpu, sigp_set_prefix);
|
cpu, sigp_set_prefix);
|
||||||
if (ccode) {
|
if (ccode)
|
||||||
printk("sigp_set_prefix failed for cpu %d "
|
|
||||||
"with condition code %d\n",
|
|
||||||
(int) cpu, (int) ccode);
|
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
|
||||||
|
|
||||||
idle = current_set[cpu];
|
idle = current_set[cpu];
|
||||||
cpu_lowcore = lowcore_ptr[cpu];
|
cpu_lowcore = lowcore_ptr[cpu];
|
||||||
|
@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu)
|
||||||
while (!smp_cpu_not_running(cpu))
|
while (!smp_cpu_not_running(cpu))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
smp_free_lowcore(cpu);
|
smp_free_lowcore(cpu);
|
||||||
printk(KERN_INFO "Processor %d spun down\n", cpu);
|
pr_info("Processor %d stopped\n", cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_die(void)
|
void cpu_die(void)
|
||||||
|
|
|
@ -12,6 +12,9 @@
|
||||||
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
|
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "time"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
@ -20,6 +23,8 @@
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/stop_machine.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/sysdev.h>
|
#include <linux/sysdev.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
@ -36,6 +41,7 @@
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
#include <asm/s390_ext.h>
|
#include <asm/s390_ext.h>
|
||||||
#include <asm/div64.h>
|
#include <asm/div64.h>
|
||||||
|
#include <asm/vdso.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/irq_regs.h>
|
#include <asm/irq_regs.h>
|
||||||
#include <asm/timer.h>
|
#include <asm/timer.h>
|
||||||
|
@ -223,6 +229,36 @@ static struct clocksource clocksource_tod = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
|
||||||
|
{
|
||||||
|
if (clock != &clocksource_tod)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Make userspace gettimeofday spin until we're done. */
|
||||||
|
++vdso_data->tb_update_count;
|
||||||
|
smp_wmb();
|
||||||
|
vdso_data->xtime_tod_stamp = clock->cycle_last;
|
||||||
|
vdso_data->xtime_clock_sec = xtime.tv_sec;
|
||||||
|
vdso_data->xtime_clock_nsec = xtime.tv_nsec;
|
||||||
|
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
|
||||||
|
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
|
||||||
|
smp_wmb();
|
||||||
|
++vdso_data->tb_update_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern struct timezone sys_tz;
|
||||||
|
|
||||||
|
void update_vsyscall_tz(void)
|
||||||
|
{
|
||||||
|
/* Make userspace gettimeofday spin until we're done. */
|
||||||
|
++vdso_data->tb_update_count;
|
||||||
|
smp_wmb();
|
||||||
|
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
|
||||||
|
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
|
||||||
|
smp_wmb();
|
||||||
|
++vdso_data->tb_update_count;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the TOD clock and the CPU timer of
|
* Initialize the TOD clock and the CPU timer of
|
||||||
* the boot cpu.
|
* the boot cpu.
|
||||||
|
@ -253,10 +289,8 @@ void __init time_init(void)
|
||||||
|
|
||||||
/* Enable TOD clock interrupts on the boot cpu. */
|
/* Enable TOD clock interrupts on the boot cpu. */
|
||||||
init_cpu_timer();
|
init_cpu_timer();
|
||||||
|
/* Enable cpu timer interrupts on the boot cpu. */
|
||||||
#ifdef CONFIG_VIRT_TIMER
|
|
||||||
vtime_init();
|
vtime_init();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -288,8 +322,8 @@ static unsigned long long adjust_time(unsigned long long old,
|
||||||
}
|
}
|
||||||
sched_clock_base_cc += delta;
|
sched_clock_base_cc += delta;
|
||||||
if (adjust.offset != 0) {
|
if (adjust.offset != 0) {
|
||||||
printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
|
pr_notice("The ETR interface has adjusted the clock "
|
||||||
adjust.offset);
|
"by %li microseconds\n", adjust.offset);
|
||||||
adjust.modes = ADJ_OFFSET_SINGLESHOT;
|
adjust.modes = ADJ_OFFSET_SINGLESHOT;
|
||||||
do_adjtimex(&adjust);
|
do_adjtimex(&adjust);
|
||||||
}
|
}
|
||||||
|
@ -360,6 +394,15 @@ static void enable_sync_clock(void)
|
||||||
atomic_set_mask(0x80000000, sw_ptr);
|
atomic_set_mask(0x80000000, sw_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Single threaded workqueue used for etr and stp sync events */
|
||||||
|
static struct workqueue_struct *time_sync_wq;
|
||||||
|
|
||||||
|
static void __init time_init_wq(void)
|
||||||
|
{
|
||||||
|
if (!time_sync_wq)
|
||||||
|
time_sync_wq = create_singlethread_workqueue("timesync");
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* External Time Reference (ETR) code.
|
* External Time Reference (ETR) code.
|
||||||
*/
|
*/
|
||||||
|
@ -425,6 +468,7 @@ static struct timer_list etr_timer;
|
||||||
|
|
||||||
static void etr_timeout(unsigned long dummy);
|
static void etr_timeout(unsigned long dummy);
|
||||||
static void etr_work_fn(struct work_struct *work);
|
static void etr_work_fn(struct work_struct *work);
|
||||||
|
static DEFINE_MUTEX(etr_work_mutex);
|
||||||
static DECLARE_WORK(etr_work, etr_work_fn);
|
static DECLARE_WORK(etr_work, etr_work_fn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -440,8 +484,8 @@ static void etr_reset(void)
|
||||||
etr_tolec = get_clock();
|
etr_tolec = get_clock();
|
||||||
set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
|
set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
|
||||||
} else if (etr_port0_online || etr_port1_online) {
|
} else if (etr_port0_online || etr_port1_online) {
|
||||||
printk(KERN_WARNING "Running on non ETR capable "
|
pr_warning("The real or virtual hardware system does "
|
||||||
"machine, only local mode available.\n");
|
"not provide an ETR interface\n");
|
||||||
etr_port0_online = etr_port1_online = 0;
|
etr_port0_online = etr_port1_online = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -452,17 +496,18 @@ static int __init etr_init(void)
|
||||||
|
|
||||||
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
|
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
time_init_wq();
|
||||||
/* Check if this machine has the steai instruction. */
|
/* Check if this machine has the steai instruction. */
|
||||||
if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
|
if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
|
||||||
etr_steai_available = 1;
|
etr_steai_available = 1;
|
||||||
setup_timer(&etr_timer, etr_timeout, 0UL);
|
setup_timer(&etr_timer, etr_timeout, 0UL);
|
||||||
if (etr_port0_online) {
|
if (etr_port0_online) {
|
||||||
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
|
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
if (etr_port1_online) {
|
if (etr_port1_online) {
|
||||||
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
|
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -489,7 +534,7 @@ void etr_switch_to_local(void)
|
||||||
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
|
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
|
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -505,7 +550,7 @@ void etr_sync_check(void)
|
||||||
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
|
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
|
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -529,13 +574,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm)
|
||||||
* Both ports are not up-to-date now.
|
* Both ports are not up-to-date now.
|
||||||
*/
|
*/
|
||||||
set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
|
set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void etr_timeout(unsigned long dummy)
|
static void etr_timeout(unsigned long dummy)
|
||||||
{
|
{
|
||||||
set_bit(ETR_EVENT_UPDATE, &etr_events);
|
set_bit(ETR_EVENT_UPDATE, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -642,14 +687,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct clock_sync_data {
|
struct clock_sync_data {
|
||||||
|
atomic_t cpus;
|
||||||
int in_sync;
|
int in_sync;
|
||||||
unsigned long long fixup_cc;
|
unsigned long long fixup_cc;
|
||||||
|
int etr_port;
|
||||||
|
struct etr_aib *etr_aib;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void clock_sync_cpu_start(void *dummy)
|
static void clock_sync_cpu(struct clock_sync_data *sync)
|
||||||
{
|
{
|
||||||
struct clock_sync_data *sync = dummy;
|
atomic_dec(&sync->cpus);
|
||||||
|
|
||||||
enable_sync_clock();
|
enable_sync_clock();
|
||||||
/*
|
/*
|
||||||
* This looks like a busy wait loop but it isn't. etr_sync_cpus
|
* This looks like a busy wait loop but it isn't. etr_sync_cpus
|
||||||
|
@ -675,39 +722,35 @@ static void clock_sync_cpu_start(void *dummy)
|
||||||
fixup_clock_comparator(sync->fixup_cc);
|
fixup_clock_comparator(sync->fixup_cc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clock_sync_cpu_end(void *dummy)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sync the TOD clock using the port refered to by aibp. This port
|
* Sync the TOD clock using the port refered to by aibp. This port
|
||||||
* has to be enabled and the other port has to be disabled. The
|
* has to be enabled and the other port has to be disabled. The
|
||||||
* last eacr update has to be more than 1.6 seconds in the past.
|
* last eacr update has to be more than 1.6 seconds in the past.
|
||||||
*/
|
*/
|
||||||
static int etr_sync_clock(struct etr_aib *aib, int port)
|
static int etr_sync_clock(void *data)
|
||||||
{
|
{
|
||||||
struct etr_aib *sync_port;
|
static int first;
|
||||||
struct clock_sync_data etr_sync;
|
|
||||||
unsigned long long clock, old_clock, delay, delta;
|
unsigned long long clock, old_clock, delay, delta;
|
||||||
int follows;
|
struct clock_sync_data *etr_sync;
|
||||||
|
struct etr_aib *sync_port, *aib;
|
||||||
|
int port;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* Check if the current aib is adjacent to the sync port aib. */
|
etr_sync = data;
|
||||||
sync_port = (port == 0) ? &etr_port0 : &etr_port1;
|
|
||||||
follows = etr_aib_follows(sync_port, aib, port);
|
|
||||||
memcpy(sync_port, aib, sizeof(*aib));
|
|
||||||
if (!follows)
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
/*
|
if (xchg(&first, 1) == 1) {
|
||||||
* Catch all other cpus and make them wait until we have
|
/* Slave */
|
||||||
* successfully synced the clock. smp_call_function will
|
clock_sync_cpu(etr_sync);
|
||||||
* return after all other cpus are in etr_sync_cpu_start.
|
return 0;
|
||||||
*/
|
}
|
||||||
memset(&etr_sync, 0, sizeof(etr_sync));
|
|
||||||
preempt_disable();
|
/* Wait until all other cpus entered the sync function. */
|
||||||
smp_call_function(clock_sync_cpu_start, &etr_sync, 0);
|
while (atomic_read(&etr_sync->cpus) != 0)
|
||||||
local_irq_disable();
|
cpu_relax();
|
||||||
|
|
||||||
|
port = etr_sync->etr_port;
|
||||||
|
aib = etr_sync->etr_aib;
|
||||||
|
sync_port = (port == 0) ? &etr_port0 : &etr_port1;
|
||||||
enable_sync_clock();
|
enable_sync_clock();
|
||||||
|
|
||||||
/* Set clock to next OTE. */
|
/* Set clock to next OTE. */
|
||||||
|
@ -724,16 +767,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
|
||||||
delay = (unsigned long long)
|
delay = (unsigned long long)
|
||||||
(aib->edf2.etv - sync_port->edf2.etv) << 32;
|
(aib->edf2.etv - sync_port->edf2.etv) << 32;
|
||||||
delta = adjust_time(old_clock, clock, delay);
|
delta = adjust_time(old_clock, clock, delay);
|
||||||
etr_sync.fixup_cc = delta;
|
etr_sync->fixup_cc = delta;
|
||||||
fixup_clock_comparator(delta);
|
fixup_clock_comparator(delta);
|
||||||
/* Verify that the clock is properly set. */
|
/* Verify that the clock is properly set. */
|
||||||
if (!etr_aib_follows(sync_port, aib, port)) {
|
if (!etr_aib_follows(sync_port, aib, port)) {
|
||||||
/* Didn't work. */
|
/* Didn't work. */
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
etr_sync.in_sync = -EAGAIN;
|
etr_sync->in_sync = -EAGAIN;
|
||||||
rc = -EAGAIN;
|
rc = -EAGAIN;
|
||||||
} else {
|
} else {
|
||||||
etr_sync.in_sync = 1;
|
etr_sync->in_sync = 1;
|
||||||
rc = 0;
|
rc = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -741,12 +784,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
|
||||||
__ctl_clear_bit(0, 29);
|
__ctl_clear_bit(0, 29);
|
||||||
__ctl_clear_bit(14, 21);
|
__ctl_clear_bit(14, 21);
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
etr_sync.in_sync = -EAGAIN;
|
etr_sync->in_sync = -EAGAIN;
|
||||||
rc = -EAGAIN;
|
rc = -EAGAIN;
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
xchg(&first, 0);
|
||||||
smp_call_function(clock_sync_cpu_end, NULL, 0);
|
return rc;
|
||||||
preempt_enable();
|
}
|
||||||
|
|
||||||
|
static int etr_sync_clock_stop(struct etr_aib *aib, int port)
|
||||||
|
{
|
||||||
|
struct clock_sync_data etr_sync;
|
||||||
|
struct etr_aib *sync_port;
|
||||||
|
int follows;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* Check if the current aib is adjacent to the sync port aib. */
|
||||||
|
sync_port = (port == 0) ? &etr_port0 : &etr_port1;
|
||||||
|
follows = etr_aib_follows(sync_port, aib, port);
|
||||||
|
memcpy(sync_port, aib, sizeof(*aib));
|
||||||
|
if (!follows)
|
||||||
|
return -EAGAIN;
|
||||||
|
memset(&etr_sync, 0, sizeof(etr_sync));
|
||||||
|
etr_sync.etr_aib = aib;
|
||||||
|
etr_sync.etr_port = port;
|
||||||
|
get_online_cpus();
|
||||||
|
atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
|
||||||
|
rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
|
||||||
|
put_online_cpus();
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -903,7 +967,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ETR tasklet. In this function you'll find the main logic. In
|
* ETR work. In this function you'll find the main logic. In
|
||||||
* particular this is the only function that calls etr_update_eacr(),
|
* particular this is the only function that calls etr_update_eacr(),
|
||||||
* it "controls" the etr control register.
|
* it "controls" the etr control register.
|
||||||
*/
|
*/
|
||||||
|
@ -914,6 +978,9 @@ static void etr_work_fn(struct work_struct *work)
|
||||||
struct etr_aib aib;
|
struct etr_aib aib;
|
||||||
int sync_port;
|
int sync_port;
|
||||||
|
|
||||||
|
/* prevent multiple execution. */
|
||||||
|
mutex_lock(&etr_work_mutex);
|
||||||
|
|
||||||
/* Create working copy of etr_eacr. */
|
/* Create working copy of etr_eacr. */
|
||||||
eacr = etr_eacr;
|
eacr = etr_eacr;
|
||||||
|
|
||||||
|
@ -929,7 +996,7 @@ static void etr_work_fn(struct work_struct *work)
|
||||||
del_timer_sync(&etr_timer);
|
del_timer_sync(&etr_timer);
|
||||||
etr_update_eacr(eacr);
|
etr_update_eacr(eacr);
|
||||||
clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
|
clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
|
||||||
return;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store aib to get the current ETR status word. */
|
/* Store aib to get the current ETR status word. */
|
||||||
|
@ -1016,7 +1083,7 @@ static void etr_work_fn(struct work_struct *work)
|
||||||
eacr.es || sync_port < 0) {
|
eacr.es || sync_port < 0) {
|
||||||
etr_update_eacr(eacr);
|
etr_update_eacr(eacr);
|
||||||
etr_set_tolec_timeout(now);
|
etr_set_tolec_timeout(now);
|
||||||
return;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1036,7 +1103,7 @@ static void etr_work_fn(struct work_struct *work)
|
||||||
etr_update_eacr(eacr);
|
etr_update_eacr(eacr);
|
||||||
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
|
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
|
||||||
if (now < etr_tolec + (1600000 << 12) ||
|
if (now < etr_tolec + (1600000 << 12) ||
|
||||||
etr_sync_clock(&aib, sync_port) != 0) {
|
etr_sync_clock_stop(&aib, sync_port) != 0) {
|
||||||
/* Sync failed. Try again in 1/2 second. */
|
/* Sync failed. Try again in 1/2 second. */
|
||||||
eacr.es = 0;
|
eacr.es = 0;
|
||||||
etr_update_eacr(eacr);
|
etr_update_eacr(eacr);
|
||||||
|
@ -1044,6 +1111,8 @@ static void etr_work_fn(struct work_struct *work)
|
||||||
etr_set_sync_timeout();
|
etr_set_sync_timeout();
|
||||||
} else
|
} else
|
||||||
etr_set_tolec_timeout(now);
|
etr_set_tolec_timeout(now);
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&etr_work_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1125,13 +1194,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
|
||||||
return count; /* Nothing to do. */
|
return count; /* Nothing to do. */
|
||||||
etr_port0_online = value;
|
etr_port0_online = value;
|
||||||
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
|
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
} else {
|
} else {
|
||||||
if (etr_port1_online == value)
|
if (etr_port1_online == value)
|
||||||
return count; /* Nothing to do. */
|
return count; /* Nothing to do. */
|
||||||
etr_port1_online = value;
|
etr_port1_online = value;
|
||||||
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
|
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -1332,6 +1401,7 @@ static struct stp_sstpi stp_info;
|
||||||
static void *stp_page;
|
static void *stp_page;
|
||||||
|
|
||||||
static void stp_work_fn(struct work_struct *work);
|
static void stp_work_fn(struct work_struct *work);
|
||||||
|
static DEFINE_MUTEX(stp_work_mutex);
|
||||||
static DECLARE_WORK(stp_work, stp_work_fn);
|
static DECLARE_WORK(stp_work, stp_work_fn);
|
||||||
|
|
||||||
static int __init early_parse_stp(char *p)
|
static int __init early_parse_stp(char *p)
|
||||||
|
@ -1356,7 +1426,8 @@ static void __init stp_reset(void)
|
||||||
if (rc == 0)
|
if (rc == 0)
|
||||||
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
|
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
|
||||||
else if (stp_online) {
|
else if (stp_online) {
|
||||||
printk(KERN_WARNING "Running on non STP capable machine.\n");
|
pr_warning("The real or virtual hardware system does "
|
||||||
|
"not provide an STP interface\n");
|
||||||
free_bootmem((unsigned long) stp_page, PAGE_SIZE);
|
free_bootmem((unsigned long) stp_page, PAGE_SIZE);
|
||||||
stp_page = NULL;
|
stp_page = NULL;
|
||||||
stp_online = 0;
|
stp_online = 0;
|
||||||
|
@ -1365,8 +1436,12 @@ static void __init stp_reset(void)
|
||||||
|
|
||||||
static int __init stp_init(void)
|
static int __init stp_init(void)
|
||||||
{
|
{
|
||||||
if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
|
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
|
||||||
schedule_work(&stp_work);
|
return 0;
|
||||||
|
time_init_wq();
|
||||||
|
if (!stp_online)
|
||||||
|
return 0;
|
||||||
|
queue_work(time_sync_wq, &stp_work);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1383,7 +1458,7 @@ arch_initcall(stp_init);
|
||||||
static void stp_timing_alert(struct stp_irq_parm *intparm)
|
static void stp_timing_alert(struct stp_irq_parm *intparm)
|
||||||
{
|
{
|
||||||
if (intparm->tsc || intparm->lac || intparm->tcpc)
|
if (intparm->tsc || intparm->lac || intparm->tcpc)
|
||||||
schedule_work(&stp_work);
|
queue_work(time_sync_wq, &stp_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1397,7 +1472,7 @@ void stp_sync_check(void)
|
||||||
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
|
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
|
||||||
return;
|
return;
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
schedule_work(&stp_work);
|
queue_work(time_sync_wq, &stp_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1411,46 +1486,34 @@ void stp_island_check(void)
|
||||||
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
|
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
|
||||||
return;
|
return;
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
schedule_work(&stp_work);
|
queue_work(time_sync_wq, &stp_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* STP tasklet. Check for the STP state and take over the clock
|
static int stp_sync_clock(void *data)
|
||||||
* synchronization if the STP clock source is usable.
|
|
||||||
*/
|
|
||||||
static void stp_work_fn(struct work_struct *work)
|
|
||||||
{
|
{
|
||||||
struct clock_sync_data stp_sync;
|
static int first;
|
||||||
unsigned long long old_clock, delta;
|
unsigned long long old_clock, delta;
|
||||||
|
struct clock_sync_data *stp_sync;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!stp_online) {
|
stp_sync = data;
|
||||||
chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
|
|
||||||
return;
|
if (xchg(&first, 1) == 1) {
|
||||||
|
/* Slave */
|
||||||
|
clock_sync_cpu(stp_sync);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
|
/* Wait until all other cpus entered the sync function. */
|
||||||
if (rc)
|
while (atomic_read(&stp_sync->cpus) != 0)
|
||||||
return;
|
cpu_relax();
|
||||||
|
|
||||||
rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
|
|
||||||
if (rc || stp_info.c == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Catch all other cpus and make them wait until we have
|
|
||||||
* successfully synced the clock. smp_call_function will
|
|
||||||
* return after all other cpus are in clock_sync_cpu_start.
|
|
||||||
*/
|
|
||||||
memset(&stp_sync, 0, sizeof(stp_sync));
|
|
||||||
preempt_disable();
|
|
||||||
smp_call_function(clock_sync_cpu_start, &stp_sync, 0);
|
|
||||||
local_irq_disable();
|
|
||||||
enable_sync_clock();
|
enable_sync_clock();
|
||||||
|
|
||||||
set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
|
set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
|
||||||
if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
|
if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
|
|
||||||
rc = 0;
|
rc = 0;
|
||||||
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
||||||
|
@ -1469,16 +1532,49 @@ static void stp_work_fn(struct work_struct *work)
|
||||||
}
|
}
|
||||||
if (rc) {
|
if (rc) {
|
||||||
disable_sync_clock(NULL);
|
disable_sync_clock(NULL);
|
||||||
stp_sync.in_sync = -EAGAIN;
|
stp_sync->in_sync = -EAGAIN;
|
||||||
clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
|
clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
|
||||||
if (etr_port0_online || etr_port1_online)
|
if (etr_port0_online || etr_port1_online)
|
||||||
schedule_work(&etr_work);
|
queue_work(time_sync_wq, &etr_work);
|
||||||
} else
|
} else
|
||||||
stp_sync.in_sync = 1;
|
stp_sync->in_sync = 1;
|
||||||
|
xchg(&first, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
local_irq_enable();
|
/*
|
||||||
smp_call_function(clock_sync_cpu_end, NULL, 0);
|
* STP work. Check for the STP state and take over the clock
|
||||||
preempt_enable();
|
* synchronization if the STP clock source is usable.
|
||||||
|
*/
|
||||||
|
static void stp_work_fn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct clock_sync_data stp_sync;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* prevent multiple execution. */
|
||||||
|
mutex_lock(&stp_work_mutex);
|
||||||
|
|
||||||
|
if (!stp_online) {
|
||||||
|
chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
|
||||||
|
if (rc)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
|
||||||
|
if (rc || stp_info.c == 0)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
memset(&stp_sync, 0, sizeof(stp_sync));
|
||||||
|
get_online_cpus();
|
||||||
|
atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
|
||||||
|
stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&stp_work_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1587,7 +1683,7 @@ static ssize_t stp_online_store(struct sysdev_class *class,
|
||||||
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
|
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
stp_online = value;
|
stp_online = value;
|
||||||
schedule_work(&stp_work);
|
queue_work(time_sync_wq, &stp_work);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,9 @@
|
||||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cpu"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -12,6 +15,7 @@
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
#include <linux/cpuset.h>
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
#include <asm/s390_ext.h>
|
#include <asm/s390_ext.h>
|
||||||
#include <asm/sysinfo.h>
|
#include <asm/sysinfo.h>
|
||||||
|
@ -57,11 +61,11 @@ struct core_info {
|
||||||
cpumask_t mask;
|
cpumask_t mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int topology_enabled;
|
||||||
static void topology_work_fn(struct work_struct *work);
|
static void topology_work_fn(struct work_struct *work);
|
||||||
static struct tl_info *tl_info;
|
static struct tl_info *tl_info;
|
||||||
static struct core_info core_info;
|
static struct core_info core_info;
|
||||||
static int machine_has_topology;
|
static int machine_has_topology;
|
||||||
static int machine_has_topology_irq;
|
|
||||||
static struct timer_list topology_timer;
|
static struct timer_list topology_timer;
|
||||||
static void set_topology_timer(void);
|
static void set_topology_timer(void);
|
||||||
static DECLARE_WORK(topology_work, topology_work_fn);
|
static DECLARE_WORK(topology_work, topology_work_fn);
|
||||||
|
@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
|
||||||
cpumask_t mask;
|
cpumask_t mask;
|
||||||
|
|
||||||
cpus_clear(mask);
|
cpus_clear(mask);
|
||||||
if (!machine_has_topology)
|
if (!topology_enabled || !machine_has_topology)
|
||||||
return cpu_present_map;
|
return cpu_possible_map;
|
||||||
spin_lock_irqsave(&topology_lock, flags);
|
spin_lock_irqsave(&topology_lock, flags);
|
||||||
while (core) {
|
while (core) {
|
||||||
if (cpu_isset(cpu, core->mask)) {
|
if (cpu_isset(cpu, core->mask)) {
|
||||||
|
@ -168,7 +172,7 @@ static void topology_update_polarization_simple(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
mutex_lock(&smp_cpu_state_mutex);
|
mutex_lock(&smp_cpu_state_mutex);
|
||||||
for_each_present_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
|
smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
|
||||||
mutex_unlock(&smp_cpu_state_mutex);
|
mutex_unlock(&smp_cpu_state_mutex);
|
||||||
}
|
}
|
||||||
|
@ -199,7 +203,7 @@ int topology_set_cpu_management(int fc)
|
||||||
rc = ptf(PTF_HORIZONTAL);
|
rc = ptf(PTF_HORIZONTAL);
|
||||||
if (rc)
|
if (rc)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
for_each_present_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
|
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -208,7 +212,7 @@ static void update_cpu_core_map(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_present_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
|
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,7 +239,7 @@ int arch_update_cpu_topology(void)
|
||||||
|
|
||||||
static void topology_work_fn(struct work_struct *work)
|
static void topology_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
arch_reinit_sched_domains();
|
rebuild_sched_domains();
|
||||||
}
|
}
|
||||||
|
|
||||||
void topology_schedule_update(void)
|
void topology_schedule_update(void)
|
||||||
|
@ -258,10 +262,14 @@ static void set_topology_timer(void)
|
||||||
add_timer(&topology_timer);
|
add_timer(&topology_timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void topology_interrupt(__u16 code)
|
static int __init early_parse_topology(char *p)
|
||||||
{
|
{
|
||||||
schedule_work(&topology_work);
|
if (strncmp(p, "on", 2))
|
||||||
|
return 0;
|
||||||
|
topology_enabled = 1;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
early_param("topology", early_parse_topology);
|
||||||
|
|
||||||
static int __init init_topology_update(void)
|
static int __init init_topology_update(void)
|
||||||
{
|
{
|
||||||
|
@ -273,14 +281,7 @@ static int __init init_topology_update(void)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
init_timer_deferrable(&topology_timer);
|
init_timer_deferrable(&topology_timer);
|
||||||
if (machine_has_topology_irq) {
|
set_topology_timer();
|
||||||
rc = register_external_interrupt(0x2005, topology_interrupt);
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
ctl_set_bit(0, 8);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
set_topology_timer();
|
|
||||||
out:
|
out:
|
||||||
update_cpu_core_map();
|
update_cpu_core_map();
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -301,9 +302,6 @@ void __init s390_init_cpu_topology(void)
|
||||||
return;
|
return;
|
||||||
machine_has_topology = 1;
|
machine_has_topology = 1;
|
||||||
|
|
||||||
if (facility_bits & (1ULL << 51))
|
|
||||||
machine_has_topology_irq = 1;
|
|
||||||
|
|
||||||
tl_info = alloc_bootmem_pages(PAGE_SIZE);
|
tl_info = alloc_bootmem_pages(PAGE_SIZE);
|
||||||
info = tl_info;
|
info = tl_info;
|
||||||
stsi(info, 15, 1, 2);
|
stsi(info, 15, 1, 2);
|
||||||
|
@ -312,7 +310,7 @@ void __init s390_init_cpu_topology(void)
|
||||||
for (i = 0; i < info->mnest - 2; i++)
|
for (i = 0; i < info->mnest - 2; i++)
|
||||||
nr_cores *= info->mag[NR_MAG - 3 - i];
|
nr_cores *= info->mag[NR_MAG - 3 - i];
|
||||||
|
|
||||||
printk(KERN_INFO "CPU topology:");
|
pr_info("The CPU configuration topology of the machine is:");
|
||||||
for (i = 0; i < NR_MAG; i++)
|
for (i = 0; i < NR_MAG; i++)
|
||||||
printk(" %d", info->mag[i]);
|
printk(" %d", info->mag[i]);
|
||||||
printk(" / %d\n", info->mnest);
|
printk(" / %d\n", info->mnest);
|
||||||
|
@ -327,5 +325,4 @@ void __init s390_init_cpu_topology(void)
|
||||||
return;
|
return;
|
||||||
error:
|
error:
|
||||||
machine_has_topology = 0;
|
machine_has_topology = 0;
|
||||||
machine_has_topology_irq = 0;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,234 @@
|
||||||
|
/*
|
||||||
|
* vdso setup for s390
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/stddef.h>
|
||||||
|
#include <linux/unistd.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/user.h>
|
||||||
|
#include <linux/elf.h>
|
||||||
|
#include <linux/security.h>
|
||||||
|
#include <linux/bootmem.h>
|
||||||
|
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/system.h>
|
||||||
|
#include <asm/processor.h>
|
||||||
|
#include <asm/mmu.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
#include <asm/sections.h>
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
|
/* Max supported size for symbol names */
|
||||||
|
#define MAX_SYMNAME 64
|
||||||
|
|
||||||
|
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
|
||||||
|
extern char vdso32_start, vdso32_end;
|
||||||
|
static void *vdso32_kbase = &vdso32_start;
|
||||||
|
static unsigned int vdso32_pages;
|
||||||
|
static struct page **vdso32_pagelist;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
extern char vdso64_start, vdso64_end;
|
||||||
|
static void *vdso64_kbase = &vdso64_start;
|
||||||
|
static unsigned int vdso64_pages;
|
||||||
|
static struct page **vdso64_pagelist;
|
||||||
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Should the kernel map a VDSO page into processes and pass its
|
||||||
|
* address down to glibc upon exec()?
|
||||||
|
*/
|
||||||
|
unsigned int __read_mostly vdso_enabled = 1;
|
||||||
|
|
||||||
|
static int __init vdso_setup(char *s)
|
||||||
|
{
|
||||||
|
vdso_enabled = simple_strtoul(s, NULL, 0);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
__setup("vdso=", vdso_setup);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The vdso data page
|
||||||
|
*/
|
||||||
|
static union {
|
||||||
|
struct vdso_data data;
|
||||||
|
u8 page[PAGE_SIZE];
|
||||||
|
} vdso_data_store __attribute__((__section__(".data.page_aligned")));
|
||||||
|
struct vdso_data *vdso_data = &vdso_data_store.data;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is called from binfmt_elf, we create the special vma for the
|
||||||
|
* vDSO and insert it into the mm struct tree
|
||||||
|
*/
|
||||||
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = current->mm;
|
||||||
|
struct page **vdso_pagelist;
|
||||||
|
unsigned long vdso_pages;
|
||||||
|
unsigned long vdso_base;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (!vdso_enabled)
|
||||||
|
return 0;
|
||||||
|
/*
|
||||||
|
* Only map the vdso for dynamically linked elf binaries.
|
||||||
|
*/
|
||||||
|
if (!uses_interp)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
vdso_base = mm->mmap_base;
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
vdso_pagelist = vdso64_pagelist;
|
||||||
|
vdso_pages = vdso64_pages;
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
if (test_thread_flag(TIF_31BIT)) {
|
||||||
|
vdso_pagelist = vdso32_pagelist;
|
||||||
|
vdso_pages = vdso32_pages;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
vdso_pagelist = vdso32_pagelist;
|
||||||
|
vdso_pages = vdso32_pages;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vDSO has a problem and was disabled, just don't "enable" it for
|
||||||
|
* the process
|
||||||
|
*/
|
||||||
|
if (vdso_pages == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
current->mm->context.vdso_base = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pick a base address for the vDSO in process space. We try to put
|
||||||
|
* it at vdso_base which is the "natural" base for it, but we might
|
||||||
|
* fail and end up putting it elsewhere.
|
||||||
|
*/
|
||||||
|
down_write(&mm->mmap_sem);
|
||||||
|
vdso_base = get_unmapped_area(NULL, vdso_base,
|
||||||
|
vdso_pages << PAGE_SHIFT, 0, 0);
|
||||||
|
if (IS_ERR_VALUE(vdso_base)) {
|
||||||
|
rc = vdso_base;
|
||||||
|
goto out_up;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* our vma flags don't have VM_WRITE so by default, the process
|
||||||
|
* isn't allowed to write those pages.
|
||||||
|
* gdb can break that with ptrace interface, and thus trigger COW
|
||||||
|
* on those pages but it's then your responsibility to never do that
|
||||||
|
* on the "data" page of the vDSO or you'll stop getting kernel
|
||||||
|
* updates and your nice userland gettimeofday will be totally dead.
|
||||||
|
* It's fine to use that for setting breakpoints in the vDSO code
|
||||||
|
* pages though
|
||||||
|
*
|
||||||
|
* Make sure the vDSO gets into every core dump.
|
||||||
|
* Dumping its contents makes post-mortem fully interpretable later
|
||||||
|
* without matching up the same kernel and hardware config to see
|
||||||
|
* what PC values meant.
|
||||||
|
*/
|
||||||
|
rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
|
||||||
|
VM_READ|VM_EXEC|
|
||||||
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
|
||||||
|
VM_ALWAYSDUMP,
|
||||||
|
vdso_pagelist);
|
||||||
|
if (rc)
|
||||||
|
goto out_up;
|
||||||
|
|
||||||
|
/* Put vDSO base into mm struct */
|
||||||
|
current->mm->context.vdso_base = vdso_base;
|
||||||
|
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_up:
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
|
||||||
|
return "[vdso]";
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init vdso_init(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
|
||||||
|
/* Calculate the size of the 32 bit vDSO */
|
||||||
|
vdso32_pages = ((&vdso32_end - &vdso32_start
|
||||||
|
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
|
||||||
|
|
||||||
|
/* Make sure pages are in the correct state */
|
||||||
|
vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
|
||||||
|
GFP_KERNEL);
|
||||||
|
BUG_ON(vdso32_pagelist == NULL);
|
||||||
|
for (i = 0; i < vdso32_pages - 1; i++) {
|
||||||
|
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
|
||||||
|
ClearPageReserved(pg);
|
||||||
|
get_page(pg);
|
||||||
|
vdso32_pagelist[i] = pg;
|
||||||
|
}
|
||||||
|
vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
|
||||||
|
vdso32_pagelist[vdso32_pages] = NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
/* Calculate the size of the 64 bit vDSO */
|
||||||
|
vdso64_pages = ((&vdso64_end - &vdso64_start
|
||||||
|
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
|
||||||
|
|
||||||
|
/* Make sure pages are in the correct state */
|
||||||
|
vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
|
||||||
|
GFP_KERNEL);
|
||||||
|
BUG_ON(vdso64_pagelist == NULL);
|
||||||
|
for (i = 0; i < vdso64_pages - 1; i++) {
|
||||||
|
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
|
||||||
|
ClearPageReserved(pg);
|
||||||
|
get_page(pg);
|
||||||
|
vdso64_pagelist[i] = pg;
|
||||||
|
}
|
||||||
|
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
|
||||||
|
vdso64_pagelist[vdso64_pages] = NULL;
|
||||||
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
|
get_page(virt_to_page(vdso_data));
|
||||||
|
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
arch_initcall(vdso_init);
|
||||||
|
|
||||||
|
int in_gate_area_no_task(unsigned long addr)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int in_gate_area(struct task_struct *task, unsigned long addr)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
# List of files in the vdso, has to be asm only for now
|
||||||
|
|
||||||
|
obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o
|
||||||
|
|
||||||
|
# Build rules
|
||||||
|
|
||||||
|
targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
|
||||||
|
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
|
||||||
|
|
||||||
|
KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS))
|
||||||
|
KBUILD_AFLAGS_31 += -m31 -s
|
||||||
|
|
||||||
|
KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||||
|
KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
|
||||||
|
KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
|
||||||
|
$(call ld-option, -Wl$(comma)--hash-style=sysv)
|
||||||
|
|
||||||
|
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
|
||||||
|
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
|
||||||
|
|
||||||
|
obj-y += vdso32_wrapper.o
|
||||||
|
extra-y += vdso32.lds
|
||||||
|
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
|
||||||
|
|
||||||
|
# Force dependency (incbin is bad)
|
||||||
|
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
|
||||||
|
|
||||||
|
# link rule for the .so file, .lds has to be first
|
||||||
|
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
|
||||||
|
$(call if_changed,vdso32ld)
|
||||||
|
|
||||||
|
# strip rule for the .so file
|
||||||
|
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||||
|
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||||
|
$(call if_changed,objcopy)
|
||||||
|
|
||||||
|
# assembly rules for the .S files
|
||||||
|
$(obj-vdso32): %.o: %.S
|
||||||
|
$(call if_changed_dep,vdso32as)
|
||||||
|
|
||||||
|
# actual build commands
|
||||||
|
quiet_cmd_vdso32ld = VDSO32L $@
|
||||||
|
cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
|
||||||
|
quiet_cmd_vdso32as = VDSO32A $@
|
||||||
|
cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
|
||||||
|
|
||||||
|
# install commands for the unstripped file
|
||||||
|
quiet_cmd_vdso_install = INSTALL $@
|
||||||
|
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
|
||||||
|
|
||||||
|
vdso32.so: $(obj)/vdso32.so.dbg
|
||||||
|
@mkdir -p $(MODLIB)/vdso
|
||||||
|
$(call cmd,vdso_install)
|
||||||
|
|
||||||
|
vdso_install: vdso32.so
|
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
* Userland implementation of clock_getres() for 32 bits processes in a
|
||||||
|
* s390 kernel for use in the vDSO
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
.globl __kernel_clock_getres
|
||||||
|
.type __kernel_clock_getres,@function
|
||||||
|
__kernel_clock_getres:
|
||||||
|
.cfi_startproc
|
||||||
|
chi %r2,CLOCK_REALTIME
|
||||||
|
je 0f
|
||||||
|
chi %r2,CLOCK_MONOTONIC
|
||||||
|
jne 3f
|
||||||
|
0: ltr %r3,%r3
|
||||||
|
jz 2f /* res == NULL */
|
||||||
|
basr %r1,0
|
||||||
|
1: l %r0,4f-1b(%r1)
|
||||||
|
xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
|
||||||
|
st %r0,4(%r3) /* store tp->tv_usec */
|
||||||
|
2: lhi %r2,0
|
||||||
|
br %r14
|
||||||
|
3: lhi %r1,__NR_clock_getres /* fallback to svc */
|
||||||
|
svc 0
|
||||||
|
br %r14
|
||||||
|
4: .long CLOCK_REALTIME_RES
|
||||||
|
.cfi_endproc
|
||||||
|
.size __kernel_clock_getres,.-__kernel_clock_getres
|
|
@ -0,0 +1,128 @@
|
||||||
|
/*
|
||||||
|
* Userland implementation of clock_gettime() for 32 bits processes in a
|
||||||
|
* s390 kernel for use in the vDSO
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
.globl __kernel_clock_gettime
|
||||||
|
.type __kernel_clock_gettime,@function
|
||||||
|
__kernel_clock_gettime:
|
||||||
|
.cfi_startproc
|
||||||
|
basr %r5,0
|
||||||
|
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
|
||||||
|
chi %r2,CLOCK_REALTIME
|
||||||
|
je 10f
|
||||||
|
chi %r2,CLOCK_MONOTONIC
|
||||||
|
jne 19f
|
||||||
|
|
||||||
|
/* CLOCK_MONOTONIC */
|
||||||
|
ltr %r3,%r3
|
||||||
|
jz 9f /* tp == NULL */
|
||||||
|
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
|
||||||
|
tml %r4,0x0001 /* pending update ? loop */
|
||||||
|
jnz 1b
|
||||||
|
stck 24(%r15) /* Store TOD clock */
|
||||||
|
lm %r0,%r1,24(%r15)
|
||||||
|
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
|
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||||
|
brc 3,2f
|
||||||
|
ahi %r0,-1
|
||||||
|
2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
|
||||||
|
lr %r2,%r0
|
||||||
|
lhi %r0,1000
|
||||||
|
ltr %r1,%r1
|
||||||
|
mr %r0,%r0
|
||||||
|
jnm 3f
|
||||||
|
ahi %r0,1000
|
||||||
|
3: alr %r0,%r2
|
||||||
|
srdl %r0,12
|
||||||
|
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
||||||
|
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||||
|
brc 12,4f
|
||||||
|
ahi %r0,1
|
||||||
|
4: l %r2,__VDSO_XTIME_SEC+4(%r5)
|
||||||
|
al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
|
||||||
|
al %r1,__VDSO_WTOM_NSEC+4(%r5)
|
||||||
|
brc 12,5f
|
||||||
|
ahi %r0,1
|
||||||
|
5: al %r2,__VDSO_WTOM_SEC+4(%r5)
|
||||||
|
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||||
|
jne 1b
|
||||||
|
basr %r5,0
|
||||||
|
6: ltr %r0,%r0
|
||||||
|
jnz 7f
|
||||||
|
cl %r1,20f-6b(%r5)
|
||||||
|
jl 8f
|
||||||
|
7: ahi %r2,1
|
||||||
|
sl %r1,20f-6b(%r5)
|
||||||
|
brc 3,6b
|
||||||
|
ahi %r0,-1
|
||||||
|
j 6b
|
||||||
|
8: st %r2,0(%r3) /* store tp->tv_sec */
|
||||||
|
st %r1,4(%r3) /* store tp->tv_nsec */
|
||||||
|
9: lhi %r2,0
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
/* CLOCK_REALTIME */
|
||||||
|
10: ltr %r3,%r3 /* tp == NULL */
|
||||||
|
jz 18f
|
||||||
|
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
|
||||||
|
tml %r4,0x0001 /* pending update ? loop */
|
||||||
|
jnz 11b
|
||||||
|
stck 24(%r15) /* Store TOD clock */
|
||||||
|
lm %r0,%r1,24(%r15)
|
||||||
|
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
|
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||||
|
brc 3,12f
|
||||||
|
ahi %r0,-1
|
||||||
|
12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
|
||||||
|
lr %r2,%r0
|
||||||
|
lhi %r0,1000
|
||||||
|
ltr %r1,%r1
|
||||||
|
mr %r0,%r0
|
||||||
|
jnm 13f
|
||||||
|
ahi %r0,1000
|
||||||
|
13: alr %r0,%r2
|
||||||
|
srdl %r0,12
|
||||||
|
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
||||||
|
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||||
|
brc 12,14f
|
||||||
|
ahi %r0,1
|
||||||
|
14: l %r2,__VDSO_XTIME_SEC+4(%r5)
|
||||||
|
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||||
|
jne 11b
|
||||||
|
basr %r5,0
|
||||||
|
15: ltr %r0,%r0
|
||||||
|
jnz 16f
|
||||||
|
cl %r1,20f-15b(%r5)
|
||||||
|
jl 17f
|
||||||
|
16: ahi %r2,1
|
||||||
|
sl %r1,20f-15b(%r5)
|
||||||
|
brc 3,15b
|
||||||
|
ahi %r0,-1
|
||||||
|
j 15b
|
||||||
|
17: st %r2,0(%r3) /* store tp->tv_sec */
|
||||||
|
st %r1,4(%r3) /* store tp->tv_nsec */
|
||||||
|
18: lhi %r2,0
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
/* Fallback to system call */
|
||||||
|
19: lhi %r1,__NR_clock_gettime
|
||||||
|
svc 0
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
20: .long 1000000000
|
||||||
|
21: .long _vdso_data - 0b
|
||||||
|
.cfi_endproc
|
||||||
|
.size __kernel_clock_gettime,.-__kernel_clock_gettime
|
|
@ -0,0 +1,82 @@
|
||||||
|
/*
|
||||||
|
* Userland implementation of gettimeofday() for 32 bits processes in a
|
||||||
|
* s390 kernel for use in the vDSO
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
.globl __kernel_gettimeofday
|
||||||
|
.type __kernel_gettimeofday,@function
|
||||||
|
__kernel_gettimeofday:
|
||||||
|
.cfi_startproc
|
||||||
|
basr %r5,0
|
||||||
|
0: al %r5,13f-0b(%r5) /* get &_vdso_data */
|
||||||
|
1: ltr %r3,%r3 /* check if tz is NULL */
|
||||||
|
je 2f
|
||||||
|
mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
|
||||||
|
2: ltr %r2,%r2 /* check if tv is NULL */
|
||||||
|
je 10f
|
||||||
|
l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
|
||||||
|
tml %r4,0x0001 /* pending update ? loop */
|
||||||
|
jnz 1b
|
||||||
|
stck 24(%r15) /* Store TOD clock */
|
||||||
|
lm %r0,%r1,24(%r15)
|
||||||
|
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
|
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||||
|
brc 3,3f
|
||||||
|
ahi %r0,-1
|
||||||
|
3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
|
||||||
|
st %r0,24(%r15)
|
||||||
|
lhi %r0,1000
|
||||||
|
ltr %r1,%r1
|
||||||
|
mr %r0,%r0
|
||||||
|
jnm 4f
|
||||||
|
ahi %r0,1000
|
||||||
|
4: al %r0,24(%r15)
|
||||||
|
srdl %r0,12
|
||||||
|
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
||||||
|
al %r1,__VDSO_XTIME_NSEC+4(%r5)
|
||||||
|
brc 12,5f
|
||||||
|
ahi %r0,1
|
||||||
|
5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
|
||||||
|
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
|
||||||
|
jne 1b
|
||||||
|
l %r4,24(%r15) /* get tv_sec from stack */
|
||||||
|
basr %r5,0
|
||||||
|
6: ltr %r0,%r0
|
||||||
|
jnz 7f
|
||||||
|
cl %r1,11f-6b(%r5)
|
||||||
|
jl 8f
|
||||||
|
7: ahi %r4,1
|
||||||
|
sl %r1,11f-6b(%r5)
|
||||||
|
brc 3,6b
|
||||||
|
ahi %r0,-1
|
||||||
|
j 6b
|
||||||
|
8: st %r4,0(%r2) /* store tv->tv_sec */
|
||||||
|
ltr %r1,%r1
|
||||||
|
m %r0,12f-6b(%r5)
|
||||||
|
jnm 9f
|
||||||
|
al %r0,12f-6b(%r5)
|
||||||
|
9: srl %r0,6
|
||||||
|
st %r0,4(%r2) /* store tv->tv_usec */
|
||||||
|
10: slr %r2,%r2
|
||||||
|
br %r14
|
||||||
|
11: .long 1000000000
|
||||||
|
12: .long 274877907
|
||||||
|
13: .long _vdso_data - 0b
|
||||||
|
.cfi_endproc
|
||||||
|
.size __kernel_gettimeofday,.-__kernel_gettimeofday
|
|
@ -0,0 +1,12 @@
|
||||||
|
/*
|
||||||
|
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
|
||||||
|
* Here we can supply some information useful to userland.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/uts.h>
|
||||||
|
#include <linux/version.h>
|
||||||
|
#include <linux/elfnote.h>
|
||||||
|
|
||||||
|
ELFNOTE_START(Linux, 0, "a")
|
||||||
|
.long LINUX_VERSION_CODE
|
||||||
|
ELFNOTE_END
|
|
@ -0,0 +1,138 @@
|
||||||
|
/*
|
||||||
|
* This is the infamous ld script for the 32 bits vdso
|
||||||
|
* library
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
|
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
|
||||||
|
OUTPUT_ARCH(s390:31-bit)
|
||||||
|
ENTRY(_start)
|
||||||
|
|
||||||
|
SECTIONS
|
||||||
|
{
|
||||||
|
. = VDSO32_LBASE + SIZEOF_HEADERS;
|
||||||
|
|
||||||
|
.hash : { *(.hash) } :text
|
||||||
|
.gnu.hash : { *(.gnu.hash) }
|
||||||
|
.dynsym : { *(.dynsym) }
|
||||||
|
.dynstr : { *(.dynstr) }
|
||||||
|
.gnu.version : { *(.gnu.version) }
|
||||||
|
.gnu.version_d : { *(.gnu.version_d) }
|
||||||
|
.gnu.version_r : { *(.gnu.version_r) }
|
||||||
|
|
||||||
|
.note : { *(.note.*) } :text :note
|
||||||
|
|
||||||
|
. = ALIGN(16);
|
||||||
|
.text : {
|
||||||
|
*(.text .stub .text.* .gnu.linkonce.t.*)
|
||||||
|
} :text
|
||||||
|
PROVIDE(__etext = .);
|
||||||
|
PROVIDE(_etext = .);
|
||||||
|
PROVIDE(etext = .);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Other stuff is appended to the text segment:
|
||||||
|
*/
|
||||||
|
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
|
||||||
|
.rodata1 : { *(.rodata1) }
|
||||||
|
|
||||||
|
.dynamic : { *(.dynamic) } :text :dynamic
|
||||||
|
|
||||||
|
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
|
||||||
|
.eh_frame : { KEEP (*(.eh_frame)) } :text
|
||||||
|
.gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
|
||||||
|
|
||||||
|
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
|
||||||
|
.got ALIGN(8) : { *(.got .toc) }
|
||||||
|
|
||||||
|
_end = .;
|
||||||
|
PROVIDE(end = .);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stabs debugging sections are here too.
|
||||||
|
*/
|
||||||
|
.stab 0 : { *(.stab) }
|
||||||
|
.stabstr 0 : { *(.stabstr) }
|
||||||
|
.stab.excl 0 : { *(.stab.excl) }
|
||||||
|
.stab.exclstr 0 : { *(.stab.exclstr) }
|
||||||
|
.stab.index 0 : { *(.stab.index) }
|
||||||
|
.stab.indexstr 0 : { *(.stab.indexstr) }
|
||||||
|
.comment 0 : { *(.comment) }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DWARF debug sections.
|
||||||
|
* Symbols in the DWARF debugging sections are relative to the
|
||||||
|
* beginning of the section so we begin them at 0.
|
||||||
|
*/
|
||||||
|
/* DWARF 1 */
|
||||||
|
.debug 0 : { *(.debug) }
|
||||||
|
.line 0 : { *(.line) }
|
||||||
|
/* GNU DWARF 1 extensions */
|
||||||
|
.debug_srcinfo 0 : { *(.debug_srcinfo) }
|
||||||
|
.debug_sfnames 0 : { *(.debug_sfnames) }
|
||||||
|
/* DWARF 1.1 and DWARF 2 */
|
||||||
|
.debug_aranges 0 : { *(.debug_aranges) }
|
||||||
|
.debug_pubnames 0 : { *(.debug_pubnames) }
|
||||||
|
/* DWARF 2 */
|
||||||
|
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
|
||||||
|
.debug_abbrev 0 : { *(.debug_abbrev) }
|
||||||
|
.debug_line 0 : { *(.debug_line) }
|
||||||
|
.debug_frame 0 : { *(.debug_frame) }
|
||||||
|
.debug_str 0 : { *(.debug_str) }
|
||||||
|
.debug_loc 0 : { *(.debug_loc) }
|
||||||
|
.debug_macinfo 0 : { *(.debug_macinfo) }
|
||||||
|
/* SGI/MIPS DWARF 2 extensions */
|
||||||
|
.debug_weaknames 0 : { *(.debug_weaknames) }
|
||||||
|
.debug_funcnames 0 : { *(.debug_funcnames) }
|
||||||
|
.debug_typenames 0 : { *(.debug_typenames) }
|
||||||
|
.debug_varnames 0 : { *(.debug_varnames) }
|
||||||
|
/* DWARF 3 */
|
||||||
|
.debug_pubtypes 0 : { *(.debug_pubtypes) }
|
||||||
|
.debug_ranges 0 : { *(.debug_ranges) }
|
||||||
|
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
|
||||||
|
|
||||||
|
. = ALIGN(4096);
|
||||||
|
PROVIDE(_vdso_data = .);
|
||||||
|
|
||||||
|
/DISCARD/ : {
|
||||||
|
*(.note.GNU-stack)
|
||||||
|
*(.branch_lt)
|
||||||
|
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||||
|
*(.bss .sbss .dynbss .dynsbss)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Very old versions of ld do not recognize this name token; use the constant.
|
||||||
|
*/
|
||||||
|
#define PT_GNU_EH_FRAME 0x6474e550
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must supply the ELF program headers explicitly to get just one
|
||||||
|
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
|
||||||
|
*/
|
||||||
|
PHDRS
|
||||||
|
{
|
||||||
|
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
|
||||||
|
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
|
||||||
|
note PT_NOTE FLAGS(4); /* PF_R */
|
||||||
|
eh_frame_hdr PT_GNU_EH_FRAME;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This controls what symbols we export from the DSO.
|
||||||
|
*/
|
||||||
|
VERSION
|
||||||
|
{
|
||||||
|
VDSO_VERSION_STRING {
|
||||||
|
global:
|
||||||
|
/*
|
||||||
|
* Has to be there for the kernel to find
|
||||||
|
*/
|
||||||
|
__kernel_gettimeofday;
|
||||||
|
__kernel_clock_gettime;
|
||||||
|
__kernel_clock_getres;
|
||||||
|
|
||||||
|
local: *;
|
||||||
|
};
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
.section ".data.page_aligned"
|
||||||
|
|
||||||
|
.globl vdso32_start, vdso32_end
|
||||||
|
.balign PAGE_SIZE
|
||||||
|
vdso32_start:
|
||||||
|
.incbin "arch/s390/kernel/vdso32/vdso32.so"
|
||||||
|
.balign PAGE_SIZE
|
||||||
|
vdso32_end:
|
||||||
|
|
||||||
|
.previous
|
|
@ -0,0 +1,55 @@
|
||||||
|
# List of files in the vdso, has to be asm only for now
|
||||||
|
|
||||||
|
obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o
|
||||||
|
|
||||||
|
# Build rules
|
||||||
|
|
||||||
|
targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
|
||||||
|
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
|
||||||
|
|
||||||
|
KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
|
||||||
|
KBUILD_AFLAGS_64 += -m64 -s
|
||||||
|
|
||||||
|
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||||
|
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
|
||||||
|
KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
|
||||||
|
$(call ld-option, -Wl$(comma)--hash-style=sysv)
|
||||||
|
|
||||||
|
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
|
||||||
|
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
|
||||||
|
|
||||||
|
obj-y += vdso64_wrapper.o
|
||||||
|
extra-y += vdso64.lds
|
||||||
|
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
|
||||||
|
|
||||||
|
# Force dependency (incbin is bad)
|
||||||
|
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
|
||||||
|
|
||||||
|
# link rule for the .so file, .lds has to be first
|
||||||
|
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
|
||||||
|
$(call if_changed,vdso64ld)
|
||||||
|
|
||||||
|
# strip rule for the .so file
|
||||||
|
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||||
|
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||||
|
$(call if_changed,objcopy)
|
||||||
|
|
||||||
|
# assembly rules for the .S files
|
||||||
|
$(obj-vdso64): %.o: %.S
|
||||||
|
$(call if_changed_dep,vdso64as)
|
||||||
|
|
||||||
|
# actual build commands
|
||||||
|
quiet_cmd_vdso64ld = VDSO64L $@
|
||||||
|
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
|
||||||
|
quiet_cmd_vdso64as = VDSO64A $@
|
||||||
|
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
|
||||||
|
|
||||||
|
# install commands for the unstripped file
|
||||||
|
quiet_cmd_vdso_install = INSTALL $@
|
||||||
|
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
|
||||||
|
|
||||||
|
vdso64.so: $(obj)/vdso64.so.dbg
|
||||||
|
@mkdir -p $(MODLIB)/vdso
|
||||||
|
$(call cmd,vdso_install)
|
||||||
|
|
||||||
|
vdso_install: vdso64.so
|
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
* Userland implementation of clock_getres() for 64 bits processes in a
|
||||||
|
* s390 kernel for use in the vDSO
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
.globl __kernel_clock_getres
|
||||||
|
.type __kernel_clock_getres,@function
|
||||||
|
__kernel_clock_getres:
|
||||||
|
.cfi_startproc
|
||||||
|
cghi %r2,CLOCK_REALTIME
|
||||||
|
je 0f
|
||||||
|
cghi %r2,CLOCK_MONOTONIC
|
||||||
|
jne 2f
|
||||||
|
0: ltgr %r3,%r3
|
||||||
|
jz 1f /* res == NULL */
|
||||||
|
larl %r1,3f
|
||||||
|
lg %r0,0(%r1)
|
||||||
|
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
|
||||||
|
stg %r0,8(%r3) /* store tp->tv_usec */
|
||||||
|
1: lghi %r2,0
|
||||||
|
br %r14
|
||||||
|
2: lghi %r1,__NR_clock_getres /* fallback to svc */
|
||||||
|
svc 0
|
||||||
|
br %r14
|
||||||
|
3: .quad CLOCK_REALTIME_RES
|
||||||
|
.cfi_endproc
|
||||||
|
.size __kernel_clock_getres,.-__kernel_clock_getres
|
|
@ -0,0 +1,89 @@
|
||||||
|
/*
|
||||||
|
* Userland implementation of clock_gettime() for 64 bits processes in a
|
||||||
|
* s390 kernel for use in the vDSO
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
.globl __kernel_clock_gettime
|
||||||
|
.type __kernel_clock_gettime,@function
|
||||||
|
__kernel_clock_gettime:
|
||||||
|
.cfi_startproc
|
||||||
|
larl %r5,_vdso_data
|
||||||
|
cghi %r2,CLOCK_REALTIME
|
||||||
|
je 4f
|
||||||
|
cghi %r2,CLOCK_MONOTONIC
|
||||||
|
jne 9f
|
||||||
|
|
||||||
|
/* CLOCK_MONOTONIC */
|
||||||
|
ltgr %r3,%r3
|
||||||
|
jz 3f /* tp == NULL */
|
||||||
|
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
|
||||||
|
tmll %r4,0x0001 /* pending update ? loop */
|
||||||
|
jnz 0b
|
||||||
|
stck 48(%r15) /* Store TOD clock */
|
||||||
|
lg %r1,48(%r15)
|
||||||
|
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
|
mghi %r1,1000
|
||||||
|
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
|
||||||
|
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
||||||
|
lg %r0,__VDSO_XTIME_SEC(%r5)
|
||||||
|
alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
|
||||||
|
alg %r0,__VDSO_WTOM_SEC(%r5)
|
||||||
|
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||||
|
jne 0b
|
||||||
|
larl %r5,10f
|
||||||
|
1: clg %r1,0(%r5)
|
||||||
|
jl 2f
|
||||||
|
slg %r1,0(%r5)
|
||||||
|
aghi %r0,1
|
||||||
|
j 1b
|
||||||
|
2: stg %r0,0(%r3) /* store tp->tv_sec */
|
||||||
|
stg %r1,8(%r3) /* store tp->tv_nsec */
|
||||||
|
3: lghi %r2,0
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
/* CLOCK_REALTIME */
|
||||||
|
4: ltr %r3,%r3 /* tp == NULL */
|
||||||
|
jz 8f
|
||||||
|
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
|
||||||
|
tmll %r4,0x0001 /* pending update ? loop */
|
||||||
|
jnz 5b
|
||||||
|
stck 48(%r15) /* Store TOD clock */
|
||||||
|
lg %r1,48(%r15)
|
||||||
|
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
|
mghi %r1,1000
|
||||||
|
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
|
||||||
|
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
|
||||||
|
lg %r0,__VDSO_XTIME_SEC(%r5)
|
||||||
|
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||||
|
jne 5b
|
||||||
|
larl %r5,10f
|
||||||
|
6: clg %r1,0(%r5)
|
||||||
|
jl 7f
|
||||||
|
slg %r1,0(%r5)
|
||||||
|
aghi %r0,1
|
||||||
|
j 6b
|
||||||
|
7: stg %r0,0(%r3) /* store tp->tv_sec */
|
||||||
|
stg %r1,8(%r3) /* store tp->tv_nsec */
|
||||||
|
8: lghi %r2,0
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
/* Fallback to system call */
|
||||||
|
9: lghi %r1,__NR_clock_gettime
|
||||||
|
svc 0
|
||||||
|
br %r14
|
||||||
|
|
||||||
|
10: .quad 1000000000
|
||||||
|
.cfi_endproc
|
||||||
|
.size __kernel_clock_gettime,.-__kernel_clock_gettime
|
|
@ -0,0 +1,56 @@
|
||||||
|
/*
|
||||||
|
* Userland implementation of gettimeofday() for 64 bits processes in a
|
||||||
|
* s390 kernel for use in the vDSO
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008
|
||||||
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License (version 2 only)
|
||||||
|
* as published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
.globl __kernel_gettimeofday
|
||||||
|
.type __kernel_gettimeofday,@function
|
||||||
|
__kernel_gettimeofday:
|
||||||
|
.cfi_startproc
|
||||||
|
larl %r5,_vdso_data
|
||||||
|
0: ltgr %r3,%r3 /* check if tz is NULL */
|
||||||
|
je 1f
|
||||||
|
mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
|
||||||
|
1: ltgr %r2,%r2 /* check if tv is NULL */
|
||||||
|
je 4f
|
||||||
|
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
|
||||||
|
tmll %r4,0x0001 /* pending update ? loop */
|
||||||
|
jnz 0b
|
||||||
|
stck 48(%r15) /* Store TOD clock */
|
||||||
|
lg %r1,48(%r15)
|
||||||
|
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||||
|
mghi %r1,1000
|
||||||
|
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
|
||||||
|
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
|
||||||
|
lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
|
||||||
|
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
|
||||||
|
jne 0b
|
||||||
|
larl %r5,5f
|
||||||
|
2: clg %r1,0(%r5)
|
||||||
|
jl 3f
|
||||||
|
slg %r1,0(%r5)
|
||||||
|
aghi %r0,1
|
||||||
|
j 2b
|
||||||
|
3: stg %r0,0(%r2) /* store tv->tv_sec */
|
||||||
|
slgr %r0,%r0 /* tv_nsec -> tv_usec */
|
||||||
|
ml %r0,8(%r5)
|
||||||
|
srlg %r0,%r0,6
|
||||||
|
stg %r0,8(%r2) /* store tv->tv_usec */
|
||||||
|
4: lghi %r2,0
|
||||||
|
br %r14
|
||||||
|
5: .quad 1000000000
|
||||||
|
.long 274877907
|
||||||
|
.cfi_endproc
|
||||||
|
.size __kernel_gettimeofday,.-__kernel_gettimeofday
|
|
@ -0,0 +1,12 @@
|
||||||
|
/*
|
||||||
|
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
|
||||||
|
* Here we can supply some information useful to userland.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/uts.h>
|
||||||
|
#include <linux/version.h>
|
||||||
|
#include <linux/elfnote.h>
|
||||||
|
|
||||||
|
ELFNOTE_START(Linux, 0, "a")
|
||||||
|
.long LINUX_VERSION_CODE
|
||||||
|
ELFNOTE_END
|
|
@ -0,0 +1,138 @@
|
||||||
|
/*
|
||||||
|
* This is the infamous ld script for the 64 bits vdso
|
||||||
|
* library
|
||||||
|
*/
|
||||||
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
|
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
|
||||||
|
OUTPUT_ARCH(s390:64-bit)
|
||||||
|
ENTRY(_start)
|
||||||
|
|
||||||
|
SECTIONS
|
||||||
|
{
|
||||||
|
. = VDSO64_LBASE + SIZEOF_HEADERS;
|
||||||
|
|
||||||
|
.hash : { *(.hash) } :text
|
||||||
|
.gnu.hash : { *(.gnu.hash) }
|
||||||
|
.dynsym : { *(.dynsym) }
|
||||||
|
.dynstr : { *(.dynstr) }
|
||||||
|
.gnu.version : { *(.gnu.version) }
|
||||||
|
.gnu.version_d : { *(.gnu.version_d) }
|
||||||
|
.gnu.version_r : { *(.gnu.version_r) }
|
||||||
|
|
||||||
|
.note : { *(.note.*) } :text :note
|
||||||
|
|
||||||
|
. = ALIGN(16);
|
||||||
|
.text : {
|
||||||
|
*(.text .stub .text.* .gnu.linkonce.t.*)
|
||||||
|
} :text
|
||||||
|
PROVIDE(__etext = .);
|
||||||
|
PROVIDE(_etext = .);
|
||||||
|
PROVIDE(etext = .);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Other stuff is appended to the text segment:
|
||||||
|
*/
|
||||||
|
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
|
||||||
|
.rodata1 : { *(.rodata1) }
|
||||||
|
|
||||||
|
.dynamic : { *(.dynamic) } :text :dynamic
|
||||||
|
|
||||||
|
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
|
||||||
|
.eh_frame : { KEEP (*(.eh_frame)) } :text
|
||||||
|
.gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
|
||||||
|
|
||||||
|
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
|
||||||
|
.got ALIGN(8) : { *(.got .toc) }
|
||||||
|
|
||||||
|
_end = .;
|
||||||
|
PROVIDE(end = .);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stabs debugging sections are here too.
|
||||||
|
*/
|
||||||
|
.stab 0 : { *(.stab) }
|
||||||
|
.stabstr 0 : { *(.stabstr) }
|
||||||
|
.stab.excl 0 : { *(.stab.excl) }
|
||||||
|
.stab.exclstr 0 : { *(.stab.exclstr) }
|
||||||
|
.stab.index 0 : { *(.stab.index) }
|
||||||
|
.stab.indexstr 0 : { *(.stab.indexstr) }
|
||||||
|
.comment 0 : { *(.comment) }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DWARF debug sections.
|
||||||
|
* Symbols in the DWARF debugging sections are relative to the
|
||||||
|
* beginning of the section so we begin them at 0.
|
||||||
|
*/
|
||||||
|
/* DWARF 1 */
|
||||||
|
.debug 0 : { *(.debug) }
|
||||||
|
.line 0 : { *(.line) }
|
||||||
|
/* GNU DWARF 1 extensions */
|
||||||
|
.debug_srcinfo 0 : { *(.debug_srcinfo) }
|
||||||
|
.debug_sfnames 0 : { *(.debug_sfnames) }
|
||||||
|
/* DWARF 1.1 and DWARF 2 */
|
||||||
|
.debug_aranges 0 : { *(.debug_aranges) }
|
||||||
|
.debug_pubnames 0 : { *(.debug_pubnames) }
|
||||||
|
/* DWARF 2 */
|
||||||
|
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
|
||||||
|
.debug_abbrev 0 : { *(.debug_abbrev) }
|
||||||
|
.debug_line 0 : { *(.debug_line) }
|
||||||
|
.debug_frame 0 : { *(.debug_frame) }
|
||||||
|
.debug_str 0 : { *(.debug_str) }
|
||||||
|
.debug_loc 0 : { *(.debug_loc) }
|
||||||
|
.debug_macinfo 0 : { *(.debug_macinfo) }
|
||||||
|
/* SGI/MIPS DWARF 2 extensions */
|
||||||
|
.debug_weaknames 0 : { *(.debug_weaknames) }
|
||||||
|
.debug_funcnames 0 : { *(.debug_funcnames) }
|
||||||
|
.debug_typenames 0 : { *(.debug_typenames) }
|
||||||
|
.debug_varnames 0 : { *(.debug_varnames) }
|
||||||
|
/* DWARF 3 */
|
||||||
|
.debug_pubtypes 0 : { *(.debug_pubtypes) }
|
||||||
|
.debug_ranges 0 : { *(.debug_ranges) }
|
||||||
|
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
|
||||||
|
|
||||||
|
. = ALIGN(4096);
|
||||||
|
PROVIDE(_vdso_data = .);
|
||||||
|
|
||||||
|
/DISCARD/ : {
|
||||||
|
*(.note.GNU-stack)
|
||||||
|
*(.branch_lt)
|
||||||
|
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||||
|
*(.bss .sbss .dynbss .dynsbss)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Very old versions of ld do not recognize this name token; use the constant.
|
||||||
|
*/
|
||||||
|
#define PT_GNU_EH_FRAME 0x6474e550
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must supply the ELF program headers explicitly to get just one
|
||||||
|
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
|
||||||
|
*/
|
||||||
|
PHDRS
|
||||||
|
{
|
||||||
|
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
|
||||||
|
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
|
||||||
|
note PT_NOTE FLAGS(4); /* PF_R */
|
||||||
|
eh_frame_hdr PT_GNU_EH_FRAME;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This controls what symbols we export from the DSO.
|
||||||
|
*/
|
||||||
|
VERSION
|
||||||
|
{
|
||||||
|
VDSO_VERSION_STRING {
|
||||||
|
global:
|
||||||
|
/*
|
||||||
|
* Has to be there for the kernel to find
|
||||||
|
*/
|
||||||
|
__kernel_gettimeofday;
|
||||||
|
__kernel_clock_gettime;
|
||||||
|
__kernel_clock_getres;
|
||||||
|
|
||||||
|
local: *;
|
||||||
|
};
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
.section ".data.page_aligned"
|
||||||
|
|
||||||
|
.globl vdso64_start, vdso64_end
|
||||||
|
.balign PAGE_SIZE
|
||||||
|
vdso64_start:
|
||||||
|
.incbin "arch/s390/kernel/vdso64/vdso64.so"
|
||||||
|
.balign PAGE_SIZE
|
||||||
|
vdso64_end:
|
||||||
|
|
||||||
|
.previous
|
|
@ -27,7 +27,6 @@
|
||||||
static ext_int_info_t ext_int_info_timer;
|
static ext_int_info_t ext_int_info_timer;
|
||||||
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
|
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
/*
|
/*
|
||||||
* Update process times based on virtual cpu times stored by entry.S
|
* Update process times based on virtual cpu times stored by entry.S
|
||||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||||
|
@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires)
|
||||||
/* store expire time for this CPU timer */
|
/* store expire time for this CPU timer */
|
||||||
__get_cpu_var(virt_cpu_timer).to_expire = expires;
|
__get_cpu_var(virt_cpu_timer).to_expire = expires;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline void set_vtimer(__u64 expires)
|
|
||||||
{
|
|
||||||
S390_lowcore.last_update_timer = expires;
|
|
||||||
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
|
|
||||||
|
|
||||||
/* store expire time for this CPU timer */
|
|
||||||
__get_cpu_var(virt_cpu_timer).to_expire = expires;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void vtime_start_cpu_timer(void)
|
void vtime_start_cpu_timer(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,6 +7,9 @@
|
||||||
* (C) IBM Corporation 2002-2004
|
* (C) IBM Corporation 2002-2004
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "extmem"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
@ -24,19 +27,6 @@
|
||||||
#include <asm/cpcmd.h>
|
#include <asm/cpcmd.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
|
||||||
#define DCSS_DEBUG /* Debug messages on/off */
|
|
||||||
|
|
||||||
#define DCSS_NAME "extmem"
|
|
||||||
#ifdef DCSS_DEBUG
|
|
||||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
|
|
||||||
#else
|
|
||||||
#define PRINT_DEBUG(x...) do {} while (0)
|
|
||||||
#endif
|
|
||||||
#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
|
|
||||||
#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
|
|
||||||
#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
|
|
||||||
|
|
||||||
|
|
||||||
#define DCSS_LOADSHR 0x00
|
#define DCSS_LOADSHR 0x00
|
||||||
#define DCSS_LOADNSR 0x04
|
#define DCSS_LOADNSR 0x04
|
||||||
#define DCSS_PURGESEG 0x08
|
#define DCSS_PURGESEG 0x08
|
||||||
|
@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
if (diag_cc > 1) {
|
if (diag_cc > 1) {
|
||||||
PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc);
|
pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
|
||||||
rc = dcss_diag_translate_rc (vmrc);
|
rc = dcss_diag_translate_rc (vmrc);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg)
|
||||||
* -EIO : could not perform query diagnose
|
* -EIO : could not perform query diagnose
|
||||||
* -ENOENT : no such segment
|
* -ENOENT : no such segment
|
||||||
* -ENOTSUPP: multi-part segment cannot be used with linux
|
* -ENOTSUPP: multi-part segment cannot be used with linux
|
||||||
* -ENOSPC : segment cannot be used (overlaps with storage)
|
|
||||||
* -ENOMEM : out of memory
|
* -ENOMEM : out of memory
|
||||||
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
|
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
|
||||||
*/
|
*/
|
||||||
|
@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
|
||||||
goto out_resource;
|
goto out_resource;
|
||||||
}
|
}
|
||||||
if (diag_cc > 1) {
|
if (diag_cc > 1) {
|
||||||
PRINT_WARN ("segment_load: could not load segment %s - "
|
pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
|
||||||
"diag returned error (%ld)\n",
|
end_addr);
|
||||||
name, end_addr);
|
|
||||||
rc = dcss_diag_translate_rc(end_addr);
|
rc = dcss_diag_translate_rc(end_addr);
|
||||||
dcss_diag(&purgeseg_scode, seg->dcss_name,
|
dcss_diag(&purgeseg_scode, seg->dcss_name,
|
||||||
&dummy, &dummy);
|
&dummy, &dummy);
|
||||||
|
@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
|
||||||
*addr = seg->start_addr;
|
*addr = seg->start_addr;
|
||||||
*end = seg->end;
|
*end = seg->end;
|
||||||
if (do_nonshared)
|
if (do_nonshared)
|
||||||
PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
|
pr_info("DCSS %s of range %p to %p and type %s loaded as "
|
||||||
"type %s in non-shared mode\n", name,
|
"exclusive-writable\n", name, (void*) seg->start_addr,
|
||||||
(void*)seg->start_addr, (void*)seg->end,
|
(void*) seg->end, segtype_string[seg->vm_segtype]);
|
||||||
segtype_string[seg->vm_segtype]);
|
|
||||||
else {
|
else {
|
||||||
PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
|
pr_info("DCSS %s of range %p to %p and type %s loaded in "
|
||||||
"type %s in shared mode\n", name,
|
"shared access mode\n", name, (void*) seg->start_addr,
|
||||||
(void*)seg->start_addr, (void*)seg->end,
|
(void*) seg->end, segtype_string[seg->vm_segtype]);
|
||||||
segtype_string[seg->vm_segtype]);
|
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
out_resource:
|
out_resource:
|
||||||
|
@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
if (do_nonshared == seg->do_nonshared) {
|
if (do_nonshared == seg->do_nonshared) {
|
||||||
PRINT_INFO ("segment_modify_shared: not reloading segment %s"
|
pr_info("DCSS %s is already in the requested access "
|
||||||
" - already in requested mode\n",name);
|
"mode\n", name);
|
||||||
rc = 0;
|
rc = 0;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
if (atomic_read (&seg->ref_count) != 1) {
|
if (atomic_read (&seg->ref_count) != 1) {
|
||||||
PRINT_WARN ("segment_modify_shared: not reloading segment %s - "
|
pr_warning("DCSS %s is in use and cannot be reloaded\n",
|
||||||
"segment is in use by other driver(s)\n",name);
|
name);
|
||||||
rc = -EAGAIN;
|
rc = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared)
|
||||||
seg->res->flags |= IORESOURCE_READONLY;
|
seg->res->flags |= IORESOURCE_READONLY;
|
||||||
|
|
||||||
if (request_resource(&iomem_resource, seg->res)) {
|
if (request_resource(&iomem_resource, seg->res)) {
|
||||||
PRINT_WARN("segment_modify_shared: could not reload segment %s"
|
pr_warning("DCSS %s overlaps with used memory resources "
|
||||||
" - overlapping resources\n", name);
|
"and cannot be reloaded\n", name);
|
||||||
rc = -EBUSY;
|
rc = -EBUSY;
|
||||||
kfree(seg->res);
|
kfree(seg->res);
|
||||||
goto out_del_mem;
|
goto out_del_mem;
|
||||||
|
@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared)
|
||||||
goto out_del_res;
|
goto out_del_res;
|
||||||
}
|
}
|
||||||
if (diag_cc > 1) {
|
if (diag_cc > 1) {
|
||||||
PRINT_WARN ("segment_modify_shared: could not reload segment %s"
|
pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
|
||||||
" - diag returned error (%ld)\n",
|
end_addr);
|
||||||
name, end_addr);
|
|
||||||
rc = dcss_diag_translate_rc(end_addr);
|
rc = dcss_diag_translate_rc(end_addr);
|
||||||
goto out_del_res;
|
goto out_del_res;
|
||||||
}
|
}
|
||||||
|
@ -673,8 +658,7 @@ segment_unload(char *name)
|
||||||
mutex_lock(&dcss_lock);
|
mutex_lock(&dcss_lock);
|
||||||
seg = segment_by_name (name);
|
seg = segment_by_name (name);
|
||||||
if (seg == NULL) {
|
if (seg == NULL) {
|
||||||
PRINT_ERR ("could not find segment %s in segment_unload, "
|
pr_err("Unloading unknown DCSS %s failed\n", name);
|
||||||
"please report to linux390@de.ibm.com\n",name);
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
if (atomic_dec_return(&seg->ref_count) != 0)
|
if (atomic_dec_return(&seg->ref_count) != 0)
|
||||||
|
@ -709,8 +693,7 @@ segment_save(char *name)
|
||||||
seg = segment_by_name (name);
|
seg = segment_by_name (name);
|
||||||
|
|
||||||
if (seg == NULL) {
|
if (seg == NULL) {
|
||||||
PRINT_ERR("could not find segment %s in segment_save, please "
|
pr_err("Saving unknown DCSS %s failed\n", name);
|
||||||
"report to linux390@de.ibm.com\n", name);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -727,14 +710,14 @@ segment_save(char *name)
|
||||||
response = 0;
|
response = 0;
|
||||||
cpcmd(cmd1, NULL, 0, &response);
|
cpcmd(cmd1, NULL, 0, &response);
|
||||||
if (response) {
|
if (response) {
|
||||||
PRINT_ERR("segment_save: DEFSEG failed with response code %i\n",
|
pr_err("Saving a DCSS failed with DEFSEG response code "
|
||||||
response);
|
"%i\n", response);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
cpcmd(cmd2, NULL, 0, &response);
|
cpcmd(cmd2, NULL, 0, &response);
|
||||||
if (response) {
|
if (response) {
|
||||||
PRINT_ERR("segment_save: SAVESEG failed with response code %i\n",
|
pr_err("Saving a DCSS failed with SAVESEG response code "
|
||||||
response);
|
"%i\n", response);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name)
|
||||||
{
|
{
|
||||||
switch (rc) {
|
switch (rc) {
|
||||||
case -ENOENT:
|
case -ENOENT:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
|
||||||
"does not exist\n", seg_name);
|
|
||||||
break;
|
break;
|
||||||
case -ENOSYS:
|
case -ENOSYS:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("DCSS %s cannot be loaded or queried without "
|
||||||
"not running on VM\n", seg_name);
|
"z/VM\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -EIO:
|
case -EIO:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("Loading or querying DCSS %s resulted in a "
|
||||||
"hardware error\n", seg_name);
|
"hardware error\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -ENOTSUPP:
|
case -ENOTSUPP:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("DCSS %s has multiple page ranges and cannot be "
|
||||||
"is a multi-part segment\n", seg_name);
|
"loaded or queried\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -ENOSPC:
|
case -ENOSPC:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("DCSS %s overlaps with used storage and cannot "
|
||||||
"overlaps with storage\n", seg_name);
|
"be loaded\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("%s needs used memory resources and cannot be "
|
||||||
"overlaps with already loaded dcss\n", seg_name);
|
"loaded or queried\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -EPERM:
|
case -EPERM:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("DCSS %s is already loaded in a different access "
|
||||||
"already loaded in incompatible mode\n", seg_name);
|
"mode\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -ENOMEM:
|
case -ENOMEM:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("There is not enough memory to load or query "
|
||||||
"out of memory\n", seg_name);
|
"DCSS %s\n", seg_name);
|
||||||
break;
|
break;
|
||||||
case -ERANGE:
|
case -ERANGE:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
|
||||||
"exceeds kernel mapping range\n", seg_name);
|
"and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
PRINT_WARN("cannot load/query segment %s, "
|
|
||||||
"return value %i\n", seg_name, rc);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -202,7 +202,7 @@ do { \
|
||||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||||
struct linux_binprm;
|
struct linux_binprm;
|
||||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
int executable_stack);
|
int uses_interp);
|
||||||
|
|
||||||
extern unsigned int vdso_enabled;
|
extern unsigned int vdso_enabled;
|
||||||
extern void __kernel_vsyscall;
|
extern void __kernel_vsyscall;
|
||||||
|
|
|
@ -59,8 +59,7 @@ int __init vsyscall_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Setup a VMA at program startup for the vsyscall page */
|
/* Setup a VMA at program startup for the vsyscall page */
|
||||||
int arch_setup_additional_pages(struct linux_binprm *bprm,
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
int executable_stack)
|
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
|
@ -325,7 +325,7 @@ struct linux_binprm;
|
||||||
|
|
||||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||||
int executable_stack);
|
int uses_interp);
|
||||||
|
|
||||||
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
|
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
|
||||||
#define compat_arch_setup_additional_pages syscall32_setup_pages
|
#define compat_arch_setup_additional_pages syscall32_setup_pages
|
||||||
|
|
|
@ -310,7 +310,7 @@ int __init sysenter_setup(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Setup a VMA at program startup for the vsyscall page */
|
/* Setup a VMA at program startup for the vsyscall page */
|
||||||
int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
|
@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||||
|
|
||||||
/* Setup a VMA at program startup for the vsyscall page.
|
/* Setup a VMA at program startup for the vsyscall page.
|
||||||
Not called for compat tasks */
|
Not called for compat tasks */
|
||||||
int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
|
@ -622,6 +622,16 @@ config HVC_BEAT
|
||||||
help
|
help
|
||||||
Toshiba's Cell Reference Set Beat Console device driver
|
Toshiba's Cell Reference Set Beat Console device driver
|
||||||
|
|
||||||
|
config HVC_IUCV
|
||||||
|
bool "z/VM IUCV Hypervisor console support (VM only)"
|
||||||
|
depends on S390
|
||||||
|
select HVC_DRIVER
|
||||||
|
select IUCV
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This driver provides a Hypervisor console (HVC) back-end to access
|
||||||
|
a Linux (console) terminal via a z/VM IUCV communication path.
|
||||||
|
|
||||||
config HVC_XEN
|
config HVC_XEN
|
||||||
bool "Xen Hypervisor Console support"
|
bool "Xen Hypervisor Console support"
|
||||||
depends on XEN
|
depends on XEN
|
||||||
|
|
|
@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
|
||||||
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
|
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
|
||||||
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
|
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
|
||||||
obj-$(CONFIG_HVC_XEN) += hvc_xen.o
|
obj-$(CONFIG_HVC_XEN) += hvc_xen.o
|
||||||
|
obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
|
||||||
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
|
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
|
||||||
obj-$(CONFIG_RAW_DRIVER) += raw.o
|
obj-$(CONFIG_RAW_DRIVER) += raw.o
|
||||||
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
|
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
|
||||||
|
|
|
@ -0,0 +1,850 @@
|
||||||
|
/*
|
||||||
|
* hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC)
|
||||||
|
*
|
||||||
|
* This back-end for HVC provides terminal access via
|
||||||
|
* z/VM IUCV communication paths.
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2008.
|
||||||
|
*
|
||||||
|
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||||
|
*/
|
||||||
|
#define KMSG_COMPONENT "hvc_iucv"
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <asm/ebcdic.h>
|
||||||
|
#include <linux/mempool.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/tty.h>
|
||||||
|
#include <net/iucv/iucv.h>
|
||||||
|
|
||||||
|
#include "hvc_console.h"
|
||||||
|
|
||||||
|
|
||||||
|
/* HVC backend for z/VM IUCV */
|
||||||
|
#define HVC_IUCV_MAGIC 0xc9e4c3e5
|
||||||
|
#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
|
||||||
|
#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
|
||||||
|
|
||||||
|
/* IUCV TTY message */
|
||||||
|
#define MSG_VERSION 0x02 /* Message version */
|
||||||
|
#define MSG_TYPE_ERROR 0x01 /* Error message */
|
||||||
|
#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
|
||||||
|
#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
|
||||||
|
#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
|
||||||
|
#define MSG_TYPE_DATA 0x10 /* Terminal data */
|
||||||
|
|
||||||
|
#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
|
||||||
|
struct iucv_tty_msg {
|
||||||
|
u8 version; /* Message version */
|
||||||
|
u8 type; /* Message type */
|
||||||
|
#define MSG_MAX_DATALEN (~(u16)0)
|
||||||
|
u16 datalen; /* Payload length */
|
||||||
|
u8 data[]; /* Payload buffer */
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
enum iucv_state_t {
|
||||||
|
IUCV_DISCONN = 0,
|
||||||
|
IUCV_CONNECTED = 1,
|
||||||
|
IUCV_SEVERED = 2,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum tty_state_t {
|
||||||
|
TTY_CLOSED = 0,
|
||||||
|
TTY_OPENED = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hvc_iucv_private {
|
||||||
|
struct hvc_struct *hvc; /* HVC console struct reference */
|
||||||
|
u8 srv_name[8]; /* IUCV service name (ebcdic) */
|
||||||
|
enum iucv_state_t iucv_state; /* IUCV connection status */
|
||||||
|
enum tty_state_t tty_state; /* TTY status */
|
||||||
|
struct iucv_path *path; /* IUCV path pointer */
|
||||||
|
spinlock_t lock; /* hvc_iucv_private lock */
|
||||||
|
struct list_head tty_outqueue; /* outgoing IUCV messages */
|
||||||
|
struct list_head tty_inqueue; /* incoming IUCV messages */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct iucv_tty_buffer {
|
||||||
|
struct list_head list; /* list pointer */
|
||||||
|
struct iucv_message msg; /* store an incoming IUCV message */
|
||||||
|
size_t offset; /* data buffer offset */
|
||||||
|
struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
|
||||||
|
};
|
||||||
|
|
||||||
|
/* IUCV callback handler */
|
||||||
|
static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
|
||||||
|
static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
|
||||||
|
static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
|
||||||
|
static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
|
||||||
|
|
||||||
|
|
||||||
|
/* Kernel module parameters */
|
||||||
|
static unsigned long hvc_iucv_devices;
|
||||||
|
|
||||||
|
/* Array of allocated hvc iucv tty lines... */
|
||||||
|
static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
|
||||||
|
|
||||||
|
/* Kmem cache and mempool for iucv_tty_buffer elements */
|
||||||
|
static struct kmem_cache *hvc_iucv_buffer_cache;
|
||||||
|
static mempool_t *hvc_iucv_mempool;
|
||||||
|
|
||||||
|
/* IUCV handler callback functions */
|
||||||
|
static struct iucv_handler hvc_iucv_handler = {
|
||||||
|
.path_pending = hvc_iucv_path_pending,
|
||||||
|
.path_severed = hvc_iucv_path_severed,
|
||||||
|
.message_complete = hvc_iucv_msg_complete,
|
||||||
|
.message_pending = hvc_iucv_msg_pending,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
|
||||||
|
* @num: The HVC virtual terminal number (vtermno)
|
||||||
|
*
|
||||||
|
* This function returns the struct hvc_iucv_private instance that corresponds
|
||||||
|
* to the HVC virtual terminal number specified as parameter @num.
|
||||||
|
*/
|
||||||
|
struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
|
||||||
|
{
|
||||||
|
if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
|
||||||
|
return NULL;
|
||||||
|
return hvc_iucv_table[num - HVC_IUCV_MAGIC];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element.
|
||||||
|
* @size: Size of the internal buffer used to store data.
|
||||||
|
* @flags: Memory allocation flags passed to mempool.
|
||||||
|
*
|
||||||
|
* This function allocates a new struct iucv_tty_buffer element and, optionally,
|
||||||
|
* allocates an internal data buffer with the specified size @size.
|
||||||
|
* Note: The total message size arises from the internal buffer size and the
|
||||||
|
* members of the iucv_tty_msg structure.
|
||||||
|
*
|
||||||
|
* The function returns NULL if memory allocation has failed.
|
||||||
|
*/
|
||||||
|
static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
|
||||||
|
{
|
||||||
|
struct iucv_tty_buffer *bufp;
|
||||||
|
|
||||||
|
bufp = mempool_alloc(hvc_iucv_mempool, flags);
|
||||||
|
if (!bufp)
|
||||||
|
return NULL;
|
||||||
|
memset(bufp, 0, sizeof(struct iucv_tty_buffer));
|
||||||
|
|
||||||
|
if (size > 0) {
|
||||||
|
bufp->msg.length = MSG_SIZE(size);
|
||||||
|
bufp->mbuf = kmalloc(bufp->msg.length, flags);
|
||||||
|
if (!bufp->mbuf) {
|
||||||
|
mempool_free(bufp, hvc_iucv_mempool);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
bufp->mbuf->version = MSG_VERSION;
|
||||||
|
bufp->mbuf->type = MSG_TYPE_DATA;
|
||||||
|
bufp->mbuf->datalen = (u16) size;
|
||||||
|
}
|
||||||
|
return bufp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
|
||||||
|
* @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
|
||||||
|
*
|
||||||
|
* The destroy_tty_buffer() function frees the internal data buffer and returns
|
||||||
|
* the struct iucv_tty_buffer element back to the mempool for freeing.
|
||||||
|
*/
|
||||||
|
static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
|
||||||
|
{
|
||||||
|
kfree(bufp->mbuf);
|
||||||
|
mempool_free(bufp, hvc_iucv_mempool);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
|
||||||
|
* @list: List head pointer to a list containing struct iucv_tty_buffer
|
||||||
|
* elements.
|
||||||
|
*
|
||||||
|
* Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the
|
||||||
|
* list @list.
|
||||||
|
*/
|
||||||
|
static void destroy_tty_buffer_list(struct list_head *list)
|
||||||
|
{
|
||||||
|
struct iucv_tty_buffer *ent, *next;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(ent, next, list, list) {
|
||||||
|
list_del(&ent->list);
|
||||||
|
destroy_tty_buffer(ent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_write() - Receive IUCV message write data to HVC console buffer.
|
||||||
|
* @priv: Pointer to hvc_iucv_private structure.
|
||||||
|
* @buf: HVC console buffer for writing received terminal data.
|
||||||
|
* @count: HVC console buffer size.
|
||||||
|
* @has_more_data: Pointer to an int variable.
|
||||||
|
*
|
||||||
|
* The function picks up pending messages from the input queue and receives
|
||||||
|
* the message data that is then written to the specified buffer @buf.
|
||||||
|
* If the buffer size @count is less than the data message size, then the
|
||||||
|
* message is kept on the input queue and @has_more_data is set to 1.
|
||||||
|
* If the message data has been entirely written, the message is removed from
|
||||||
|
* the input queue.
|
||||||
|
*
|
||||||
|
* The function returns the number of bytes written to the terminal, zero if
|
||||||
|
* there are no pending data messages available or if there is no established
|
||||||
|
* IUCV path.
|
||||||
|
* If the IUCV path has been severed, then -EPIPE is returned to cause a
|
||||||
|
* hang up (that is issued by the HVC console layer).
|
||||||
|
*/
|
||||||
|
static int hvc_iucv_write(struct hvc_iucv_private *priv,
|
||||||
|
char *buf, int count, int *has_more_data)
|
||||||
|
{
|
||||||
|
struct iucv_tty_buffer *rb;
|
||||||
|
int written;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* Immediately return if there is no IUCV connection */
|
||||||
|
if (priv->iucv_state == IUCV_DISCONN)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* If the IUCV path has been severed, return -EPIPE to inform the
|
||||||
|
* hvc console layer to hang up the tty device. */
|
||||||
|
if (priv->iucv_state == IUCV_SEVERED)
|
||||||
|
return -EPIPE;
|
||||||
|
|
||||||
|
/* check if there are pending messages */
|
||||||
|
if (list_empty(&priv->tty_inqueue))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* receive a iucv message and flip data to the tty (ldisc) */
|
||||||
|
rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
|
||||||
|
|
||||||
|
written = 0;
|
||||||
|
if (!rb->mbuf) { /* message not yet received ... */
|
||||||
|
/* allocate mem to store msg data; if no memory is available
|
||||||
|
* then leave the buffer on the list and re-try later */
|
||||||
|
rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
|
||||||
|
if (!rb->mbuf)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rc = __iucv_message_receive(priv->path, &rb->msg, 0,
|
||||||
|
rb->mbuf, rb->msg.length, NULL);
|
||||||
|
switch (rc) {
|
||||||
|
case 0: /* Successful */
|
||||||
|
break;
|
||||||
|
case 2: /* No message found */
|
||||||
|
case 9: /* Message purged */
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
written = -EIO;
|
||||||
|
}
|
||||||
|
/* remove buffer if an error has occured or received data
|
||||||
|
* is not correct */
|
||||||
|
if (rc || (rb->mbuf->version != MSG_VERSION) ||
|
||||||
|
(rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
|
||||||
|
goto out_remove_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (rb->mbuf->type) {
|
||||||
|
case MSG_TYPE_DATA:
|
||||||
|
written = min_t(int, rb->mbuf->datalen - rb->offset, count);
|
||||||
|
memcpy(buf, rb->mbuf->data + rb->offset, written);
|
||||||
|
if (written < (rb->mbuf->datalen - rb->offset)) {
|
||||||
|
rb->offset += written;
|
||||||
|
*has_more_data = 1;
|
||||||
|
goto out_written;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case MSG_TYPE_WINSIZE:
|
||||||
|
if (rb->mbuf->datalen != sizeof(struct winsize))
|
||||||
|
break;
|
||||||
|
hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data));
|
||||||
|
break;
|
||||||
|
|
||||||
|
case MSG_TYPE_ERROR: /* ignored ... */
|
||||||
|
case MSG_TYPE_TERMENV: /* ignored ... */
|
||||||
|
case MSG_TYPE_TERMIOS: /* ignored ... */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_remove_buffer:
|
||||||
|
list_del(&rb->list);
|
||||||
|
destroy_tty_buffer(rb);
|
||||||
|
*has_more_data = !list_empty(&priv->tty_inqueue);
|
||||||
|
|
||||||
|
out_written:
|
||||||
|
return written;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_get_chars() - HVC get_chars operation.
|
||||||
|
* @vtermno: HVC virtual terminal number.
|
||||||
|
* @buf: Pointer to a buffer to store data
|
||||||
|
* @count: Size of buffer available for writing
|
||||||
|
*
|
||||||
|
* The hvc_console thread calls this method to read characters from
|
||||||
|
* the terminal backend. If an IUCV communication path has been established,
|
||||||
|
* pending IUCV messages are received and data is copied into buffer @buf
|
||||||
|
* up to @count bytes.
|
||||||
|
*
|
||||||
|
* Locking: The routine gets called under an irqsave() spinlock; and
|
||||||
|
* the routine locks the struct hvc_iucv_private->lock to call
|
||||||
|
* helper functions.
|
||||||
|
*/
|
||||||
|
static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
|
||||||
|
int written;
|
||||||
|
int has_more_data;
|
||||||
|
|
||||||
|
if (count <= 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!priv)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
has_more_data = 0;
|
||||||
|
written = hvc_iucv_write(priv, buf, count, &has_more_data);
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
|
||||||
|
/* if there are still messages on the queue... schedule another run */
|
||||||
|
if (has_more_data)
|
||||||
|
hvc_kick();
|
||||||
|
|
||||||
|
return written;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_send() - Send an IUCV message containing terminal data.
|
||||||
|
* @priv: Pointer to struct hvc_iucv_private instance.
|
||||||
|
* @buf: Buffer containing data to send.
|
||||||
|
* @size: Size of buffer and amount of data to send.
|
||||||
|
*
|
||||||
|
* If an IUCV communication path is established, the function copies the buffer
|
||||||
|
* data to a newly allocated struct iucv_tty_buffer element, sends the data and
|
||||||
|
* puts the element to the outqueue.
|
||||||
|
*
|
||||||
|
* If there is no IUCV communication path established, the function returns 0.
|
||||||
|
* If an existing IUCV communicaton path has been severed, the function returns
|
||||||
|
* -EPIPE (can be passed to HVC layer to cause a tty hangup).
|
||||||
|
*/
|
||||||
|
static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
|
||||||
|
int count)
|
||||||
|
{
|
||||||
|
struct iucv_tty_buffer *sb;
|
||||||
|
int rc;
|
||||||
|
u16 len;
|
||||||
|
|
||||||
|
if (priv->iucv_state == IUCV_SEVERED)
|
||||||
|
return -EPIPE;
|
||||||
|
|
||||||
|
if (priv->iucv_state == IUCV_DISCONN)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
len = min_t(u16, MSG_MAX_DATALEN, count);
|
||||||
|
|
||||||
|
/* allocate internal buffer to store msg data and also compute total
|
||||||
|
* message length */
|
||||||
|
sb = alloc_tty_buffer(len, GFP_ATOMIC);
|
||||||
|
if (!sb)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
sb->mbuf->datalen = len;
|
||||||
|
memcpy(sb->mbuf->data, buf, len);
|
||||||
|
|
||||||
|
list_add_tail(&sb->list, &priv->tty_outqueue);
|
||||||
|
|
||||||
|
rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
|
||||||
|
(void *) sb->mbuf, sb->msg.length);
|
||||||
|
if (rc) {
|
||||||
|
list_del(&sb->list);
|
||||||
|
destroy_tty_buffer(sb);
|
||||||
|
len = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_put_chars() - HVC put_chars operation.
|
||||||
|
* @vtermno: HVC virtual terminal number.
|
||||||
|
* @buf: Pointer to an buffer to read data from
|
||||||
|
* @count: Size of buffer available for reading
|
||||||
|
*
|
||||||
|
* The hvc_console thread calls this method to write characters from
|
||||||
|
* to the terminal backend.
|
||||||
|
* The function calls hvc_iucv_send() under the lock of the
|
||||||
|
* struct hvc_iucv_private instance that corresponds to the tty @vtermno.
|
||||||
|
*
|
||||||
|
* Locking: The method gets called under an irqsave() spinlock; and
|
||||||
|
* locks struct hvc_iucv_private->lock.
|
||||||
|
*/
|
||||||
|
static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
|
||||||
|
int sent;
|
||||||
|
|
||||||
|
if (count <= 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!priv)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
sent = hvc_iucv_send(priv, buf, count);
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
|
||||||
|
return sent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
|
||||||
|
* @hp: Pointer to the HVC device (struct hvc_struct)
|
||||||
|
* @id: Additional data (originally passed to hvc_alloc): the index of an struct
|
||||||
|
* hvc_iucv_private instance.
|
||||||
|
*
|
||||||
|
* The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private
|
||||||
|
* instance that is derived from @id. Always returns 0.
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock, spin_lock_bh
|
||||||
|
*/
|
||||||
|
static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv;
|
||||||
|
|
||||||
|
priv = hvc_iucv_get_private(id);
|
||||||
|
if (!priv)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
spin_lock_bh(&priv->lock);
|
||||||
|
priv->tty_state = TTY_OPENED;
|
||||||
|
spin_unlock_bh(&priv->lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed.
|
||||||
|
* @priv: Pointer to the struct hvc_iucv_private instance.
|
||||||
|
*
|
||||||
|
* The functions severs the established IUCV communication path (if any), and
|
||||||
|
* destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally,
|
||||||
|
* the functions resets the states to TTY_CLOSED and IUCV_DISCONN.
|
||||||
|
*/
|
||||||
|
static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
|
||||||
|
{
|
||||||
|
destroy_tty_buffer_list(&priv->tty_outqueue);
|
||||||
|
destroy_tty_buffer_list(&priv->tty_inqueue);
|
||||||
|
|
||||||
|
priv->tty_state = TTY_CLOSED;
|
||||||
|
priv->iucv_state = IUCV_DISCONN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_notifier_hangup() - HVC notifier for tty hangups.
|
||||||
|
* @hp: Pointer to the HVC device (struct hvc_struct)
|
||||||
|
* @id: Additional data (originally passed to hvc_alloc): the index of an struct
|
||||||
|
* hvc_iucv_private instance.
|
||||||
|
*
|
||||||
|
* This routine notifies the HVC backend that a tty hangup (carrier loss,
|
||||||
|
* virtual or otherwise) has occured.
|
||||||
|
*
|
||||||
|
* The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep
|
||||||
|
* an existing IUCV communication path established.
|
||||||
|
* (Background: vhangup() is called from user space (by getty or login) to
|
||||||
|
* disable writing to the tty by other applications).
|
||||||
|
*
|
||||||
|
* If the tty has been opened (e.g. getty) and an established IUCV path has been
|
||||||
|
* severed (we caused the tty hangup in that case), then the functions invokes
|
||||||
|
* hvc_iucv_cleanup() to clean up.
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock
|
||||||
|
*/
|
||||||
|
static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv;
|
||||||
|
|
||||||
|
priv = hvc_iucv_get_private(id);
|
||||||
|
if (!priv)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_bh(&priv->lock);
|
||||||
|
/* NOTE: If the hangup was scheduled by ourself (from the iucv
|
||||||
|
* path_servered callback [IUCV_SEVERED]), then we have to
|
||||||
|
* finally clean up the tty backend structure and set state to
|
||||||
|
* TTY_CLOSED.
|
||||||
|
*
|
||||||
|
* If the tty was hung up otherwise (e.g. vhangup()), then we
|
||||||
|
* ignore this hangup and keep an established IUCV path open...
|
||||||
|
* (...the reason is that we are not able to connect back to the
|
||||||
|
* client if we disconnect on hang up) */
|
||||||
|
priv->tty_state = TTY_CLOSED;
|
||||||
|
|
||||||
|
if (priv->iucv_state == IUCV_SEVERED)
|
||||||
|
hvc_iucv_cleanup(priv);
|
||||||
|
spin_unlock_bh(&priv->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
|
||||||
|
* @hp: Pointer to the HVC device (struct hvc_struct)
|
||||||
|
* @id: Additional data (originally passed to hvc_alloc):
|
||||||
|
* the index of an struct hvc_iucv_private instance.
|
||||||
|
*
|
||||||
|
* This routine notifies the HVC backend that the last tty device file
|
||||||
|
* descriptor has been closed.
|
||||||
|
* The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private
|
||||||
|
* instance.
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock
|
||||||
|
*/
|
||||||
|
static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv;
|
||||||
|
struct iucv_path *path;
|
||||||
|
|
||||||
|
priv = hvc_iucv_get_private(id);
|
||||||
|
if (!priv)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_bh(&priv->lock);
|
||||||
|
path = priv->path; /* save reference to IUCV path */
|
||||||
|
priv->path = NULL;
|
||||||
|
hvc_iucv_cleanup(priv);
|
||||||
|
spin_unlock_bh(&priv->lock);
|
||||||
|
|
||||||
|
/* sever IUCV path outside of priv->lock due to lock ordering of:
|
||||||
|
* priv->lock <--> iucv_table_lock */
|
||||||
|
if (path) {
|
||||||
|
iucv_path_sever(path, NULL);
|
||||||
|
iucv_path_free(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_path_pending() - IUCV handler to process a connection request.
|
||||||
|
* @path: Pending path (struct iucv_path)
|
||||||
|
* @ipvmid: Originator z/VM system identifier
|
||||||
|
* @ipuser: User specified data for this path
|
||||||
|
* (AF_IUCV: port/service name and originator port)
|
||||||
|
*
|
||||||
|
* The function uses the @ipuser data to check to determine if the pending
|
||||||
|
* path belongs to a terminal managed by this HVC backend.
|
||||||
|
* If the check is successful, then an additional check is done to ensure
|
||||||
|
* that a terminal cannot be accessed multiple times (only one connection
|
||||||
|
* to a terminal is allowed). In that particular case, the pending path is
|
||||||
|
* severed. If it is the first connection, the pending path is accepted and
|
||||||
|
* associated to the struct hvc_iucv_private. The iucv state is updated to
|
||||||
|
* reflect that a communication path has been established.
|
||||||
|
*
|
||||||
|
* Returns 0 if the path belongs to a terminal managed by the this HVC backend;
|
||||||
|
* otherwise returns -ENODEV in order to dispatch this path to other handlers.
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock
|
||||||
|
*/
|
||||||
|
static int hvc_iucv_path_pending(struct iucv_path *path,
|
||||||
|
u8 ipvmid[8], u8 ipuser[16])
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv;
|
||||||
|
u8 nuser_data[16];
|
||||||
|
int i, rc;
|
||||||
|
|
||||||
|
priv = NULL;
|
||||||
|
for (i = 0; i < hvc_iucv_devices; i++)
|
||||||
|
if (hvc_iucv_table[i] &&
|
||||||
|
(0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
|
||||||
|
priv = hvc_iucv_table[i];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!priv)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
|
||||||
|
/* If the terminal is already connected or being severed, then sever
|
||||||
|
* this path to enforce that there is only ONE established communication
|
||||||
|
* path per terminal. */
|
||||||
|
if (priv->iucv_state != IUCV_DISCONN) {
|
||||||
|
iucv_path_sever(path, ipuser);
|
||||||
|
iucv_path_free(path);
|
||||||
|
goto out_path_handled;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* accept path */
|
||||||
|
memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
|
||||||
|
memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
|
||||||
|
path->msglim = 0xffff; /* IUCV MSGLIMIT */
|
||||||
|
path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
|
||||||
|
rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
|
||||||
|
if (rc) {
|
||||||
|
iucv_path_sever(path, ipuser);
|
||||||
|
iucv_path_free(path);
|
||||||
|
goto out_path_handled;
|
||||||
|
}
|
||||||
|
priv->path = path;
|
||||||
|
priv->iucv_state = IUCV_CONNECTED;
|
||||||
|
|
||||||
|
out_path_handled:
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_path_severed() - IUCV handler to process a path sever.
|
||||||
|
* @path: Pending path (struct iucv_path)
|
||||||
|
* @ipuser: User specified data for this path
|
||||||
|
* (AF_IUCV: port/service name and originator port)
|
||||||
|
*
|
||||||
|
* The function also severs the path (as required by the IUCV protocol) and
|
||||||
|
* sets the iucv state to IUCV_SEVERED for the associated struct
|
||||||
|
* hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
|
||||||
|
* hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
|
||||||
|
*
|
||||||
|
* If tty portion of the HVC is closed then clean up the outqueue in addition.
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock
|
||||||
|
*/
|
||||||
|
static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv = path->private;
|
||||||
|
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
priv->iucv_state = IUCV_SEVERED;
|
||||||
|
|
||||||
|
/* NOTE: If the tty has not yet been opened by a getty program
|
||||||
|
* (e.g. to see console messages), then cleanup the
|
||||||
|
* hvc_iucv_private structure to allow re-connects.
|
||||||
|
*
|
||||||
|
* If the tty has been opened, the get_chars() callback returns
|
||||||
|
* -EPIPE to signal the hvc console layer to hang up the tty. */
|
||||||
|
priv->path = NULL;
|
||||||
|
if (priv->tty_state == TTY_CLOSED)
|
||||||
|
hvc_iucv_cleanup(priv);
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
|
||||||
|
/* finally sever path (outside of priv->lock due to lock ordering) */
|
||||||
|
iucv_path_sever(path, ipuser);
|
||||||
|
iucv_path_free(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
|
||||||
|
* @path: Pending path (struct iucv_path)
|
||||||
|
* @msg: Pointer to the IUCV message
|
||||||
|
*
|
||||||
|
* The function stores an incoming message on the input queue for later
|
||||||
|
* processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
|
||||||
|
* However, if the tty has not yet been opened, the message is rejected.
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock
|
||||||
|
*/
|
||||||
|
static void hvc_iucv_msg_pending(struct iucv_path *path,
|
||||||
|
struct iucv_message *msg)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv = path->private;
|
||||||
|
struct iucv_tty_buffer *rb;
|
||||||
|
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
|
||||||
|
/* reject messages if tty has not yet been opened */
|
||||||
|
if (priv->tty_state == TTY_CLOSED) {
|
||||||
|
iucv_message_reject(path, msg);
|
||||||
|
goto unlock_return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allocate buffer an empty buffer element */
|
||||||
|
rb = alloc_tty_buffer(0, GFP_ATOMIC);
|
||||||
|
if (!rb) {
|
||||||
|
iucv_message_reject(path, msg);
|
||||||
|
goto unlock_return; /* -ENOMEM */
|
||||||
|
}
|
||||||
|
rb->msg = *msg;
|
||||||
|
|
||||||
|
list_add_tail(&rb->list, &priv->tty_inqueue);
|
||||||
|
|
||||||
|
hvc_kick(); /* wakup hvc console thread */
|
||||||
|
|
||||||
|
unlock_return:
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_msg_complete() - IUCV handler to process message completion
|
||||||
|
* @path: Pending path (struct iucv_path)
|
||||||
|
* @msg: Pointer to the IUCV message
|
||||||
|
*
|
||||||
|
* The function is called upon completion of message delivery and the
|
||||||
|
* message is removed from the outqueue. Additional delivery information
|
||||||
|
* can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and
|
||||||
|
* purged messages (0x010000 (IPADPGNR)).
|
||||||
|
*
|
||||||
|
* Locking: struct hvc_iucv_private->lock
|
||||||
|
*/
|
||||||
|
static void hvc_iucv_msg_complete(struct iucv_path *path,
|
||||||
|
struct iucv_message *msg)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv = path->private;
|
||||||
|
struct iucv_tty_buffer *ent, *next;
|
||||||
|
LIST_HEAD(list_remove);
|
||||||
|
|
||||||
|
spin_lock(&priv->lock);
|
||||||
|
list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
|
||||||
|
if (ent->msg.id == msg->id) {
|
||||||
|
list_move(&ent->list, &list_remove);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
spin_unlock(&priv->lock);
|
||||||
|
destroy_tty_buffer_list(&list_remove);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* HVC operations */
|
||||||
|
static struct hv_ops hvc_iucv_ops = {
|
||||||
|
.get_chars = hvc_iucv_get_chars,
|
||||||
|
.put_chars = hvc_iucv_put_chars,
|
||||||
|
.notifier_add = hvc_iucv_notifier_add,
|
||||||
|
.notifier_del = hvc_iucv_notifier_del,
|
||||||
|
.notifier_hangup = hvc_iucv_notifier_hangup,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
|
||||||
|
* @id: hvc_iucv_table index
|
||||||
|
*
|
||||||
|
* This function allocates a new hvc_iucv_private struct and put the
|
||||||
|
* instance into hvc_iucv_table at index @id.
|
||||||
|
* Returns 0 on success; otherwise non-zero.
|
||||||
|
*/
|
||||||
|
static int __init hvc_iucv_alloc(int id)
|
||||||
|
{
|
||||||
|
struct hvc_iucv_private *priv;
|
||||||
|
char name[9];
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
|
||||||
|
if (!priv)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
spin_lock_init(&priv->lock);
|
||||||
|
INIT_LIST_HEAD(&priv->tty_outqueue);
|
||||||
|
INIT_LIST_HEAD(&priv->tty_inqueue);
|
||||||
|
|
||||||
|
/* Finally allocate hvc */
|
||||||
|
priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
|
||||||
|
HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE);
|
||||||
|
if (IS_ERR(priv->hvc)) {
|
||||||
|
rc = PTR_ERR(priv->hvc);
|
||||||
|
kfree(priv);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* setup iucv related information */
|
||||||
|
snprintf(name, 9, "ihvc%-4d", id);
|
||||||
|
memcpy(priv->srv_name, name, 8);
|
||||||
|
ASCEBC(priv->srv_name, 8);
|
||||||
|
|
||||||
|
hvc_iucv_table[id] = priv;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV
|
||||||
|
*/
|
||||||
|
static int __init hvc_iucv_init(void)
|
||||||
|
{
|
||||||
|
int rc, i;
|
||||||
|
|
||||||
|
if (!MACHINE_IS_VM) {
|
||||||
|
pr_warning("The z/VM IUCV Hypervisor console cannot be "
|
||||||
|
"used without z/VM.\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hvc_iucv_devices)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (hvc_iucv_devices > MAX_HVC_IUCV_LINES)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
|
||||||
|
sizeof(struct iucv_tty_buffer),
|
||||||
|
0, 0, NULL);
|
||||||
|
if (!hvc_iucv_buffer_cache) {
|
||||||
|
pr_err("Not enough memory for driver initialization "
|
||||||
|
"(rs=%d).\n", 1);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
|
||||||
|
hvc_iucv_buffer_cache);
|
||||||
|
if (!hvc_iucv_mempool) {
|
||||||
|
pr_err("Not enough memory for driver initialization "
|
||||||
|
"(rs=%d).\n", 2);
|
||||||
|
kmem_cache_destroy(hvc_iucv_buffer_cache);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allocate hvc_iucv_private structs */
|
||||||
|
for (i = 0; i < hvc_iucv_devices; i++) {
|
||||||
|
rc = hvc_iucv_alloc(i);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Could not create new z/VM IUCV HVC backend "
|
||||||
|
"rc=%d.\n", rc);
|
||||||
|
goto out_error_hvc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* register IUCV callback handler */
|
||||||
|
rc = iucv_register(&hvc_iucv_handler, 0);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Could not register iucv handler (rc=%d).\n", rc);
|
||||||
|
goto out_error_iucv;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_error_iucv:
|
||||||
|
iucv_unregister(&hvc_iucv_handler, 0);
|
||||||
|
out_error_hvc:
|
||||||
|
for (i = 0; i < hvc_iucv_devices; i++)
|
||||||
|
if (hvc_iucv_table[i]) {
|
||||||
|
if (hvc_iucv_table[i]->hvc)
|
||||||
|
hvc_remove(hvc_iucv_table[i]->hvc);
|
||||||
|
kfree(hvc_iucv_table[i]);
|
||||||
|
}
|
||||||
|
mempool_destroy(hvc_iucv_mempool);
|
||||||
|
kmem_cache_destroy(hvc_iucv_buffer_cache);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_console_init() - Early console initialization
|
||||||
|
*/
|
||||||
|
static int __init hvc_iucv_console_init(void)
|
||||||
|
{
|
||||||
|
if (!MACHINE_IS_VM || !hvc_iucv_devices)
|
||||||
|
return -ENODEV;
|
||||||
|
return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
|
||||||
|
* @val: Parameter value (numeric)
|
||||||
|
*/
|
||||||
|
static int __init hvc_iucv_config(char *val)
|
||||||
|
{
|
||||||
|
return strict_strtoul(val, 10, &hvc_iucv_devices);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module_init(hvc_iucv_init);
|
||||||
|
console_initcall(hvc_iucv_console_init);
|
||||||
|
__setup("hvc_iucv=", hvc_iucv_config);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("HVC back-end for z/VM IUCV.");
|
||||||
|
MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
|
|
@ -1898,15 +1898,19 @@ restart_cb:
|
||||||
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
|
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
|
||||||
/* Process finished ERP request. */
|
/* Process finished ERP request. */
|
||||||
if (cqr->refers) {
|
if (cqr->refers) {
|
||||||
|
spin_lock_bh(&block->queue_lock);
|
||||||
__dasd_block_process_erp(block, cqr);
|
__dasd_block_process_erp(block, cqr);
|
||||||
|
spin_unlock_bh(&block->queue_lock);
|
||||||
/* restart list_for_xx loop since dasd_process_erp
|
/* restart list_for_xx loop since dasd_process_erp
|
||||||
* might remove multiple elements */
|
* might remove multiple elements */
|
||||||
goto restart_cb;
|
goto restart_cb;
|
||||||
}
|
}
|
||||||
/* call the callback function */
|
/* call the callback function */
|
||||||
|
spin_lock_irq(&block->request_queue_lock);
|
||||||
cqr->endclk = get_clock();
|
cqr->endclk = get_clock();
|
||||||
list_del_init(&cqr->blocklist);
|
list_del_init(&cqr->blocklist);
|
||||||
__dasd_cleanup_cqr(cqr);
|
__dasd_cleanup_cqr(cqr);
|
||||||
|
spin_unlock_irq(&block->request_queue_lock);
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
|
|
||||||
/* This is ugly... */
|
/* This is ugly... */
|
||||||
#define PRINTK_HEADER "dasd_devmap:"
|
#define PRINTK_HEADER "dasd_devmap:"
|
||||||
|
#define DASD_BUS_ID_SIZE 20
|
||||||
|
|
||||||
#include "dasd_int.h"
|
#include "dasd_int.h"
|
||||||
|
|
||||||
|
@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache);
|
||||||
*/
|
*/
|
||||||
struct dasd_devmap {
|
struct dasd_devmap {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
char bus_id[BUS_ID_SIZE];
|
char bus_id[DASD_BUS_ID_SIZE];
|
||||||
unsigned int devindex;
|
unsigned int devindex;
|
||||||
unsigned short features;
|
unsigned short features;
|
||||||
struct dasd_device *device;
|
struct dasd_device *device;
|
||||||
|
@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id)
|
||||||
int hash, i;
|
int hash, i;
|
||||||
|
|
||||||
hash = 0;
|
hash = 0;
|
||||||
for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++)
|
for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
|
||||||
hash += *bus_id;
|
hash += *bus_id;
|
||||||
return hash & 0xff;
|
return hash & 0xff;
|
||||||
}
|
}
|
||||||
|
@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) {
|
||||||
int from, from_id0, from_id1;
|
int from, from_id0, from_id1;
|
||||||
int to, to_id0, to_id1;
|
int to, to_id0, to_id1;
|
||||||
int features, rc;
|
int features, rc;
|
||||||
char bus_id[BUS_ID_SIZE+1], *str;
|
char bus_id[DASD_BUS_ID_SIZE+1], *str;
|
||||||
|
|
||||||
str = parsestring;
|
str = parsestring;
|
||||||
rc = dasd_busid(&str, &from_id0, &from_id1, &from);
|
rc = dasd_busid(&str, &from_id0, &from_id1, &from);
|
||||||
|
@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features)
|
||||||
devmap = NULL;
|
devmap = NULL;
|
||||||
hash = dasd_hash_busid(bus_id);
|
hash = dasd_hash_busid(bus_id);
|
||||||
list_for_each_entry(tmp, &dasd_hashlists[hash], list)
|
list_for_each_entry(tmp, &dasd_hashlists[hash], list)
|
||||||
if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
|
if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
|
||||||
devmap = tmp;
|
devmap = tmp;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!devmap) {
|
if (!devmap) {
|
||||||
/* This bus_id is new. */
|
/* This bus_id is new. */
|
||||||
new->devindex = dasd_max_devindex++;
|
new->devindex = dasd_max_devindex++;
|
||||||
strncpy(new->bus_id, bus_id, BUS_ID_SIZE);
|
strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
|
||||||
new->features = features;
|
new->features = features;
|
||||||
new->device = NULL;
|
new->device = NULL;
|
||||||
list_add(&new->list, &dasd_hashlists[hash]);
|
list_add(&new->list, &dasd_hashlists[hash]);
|
||||||
|
@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id)
|
||||||
devmap = ERR_PTR(-ENODEV);
|
devmap = ERR_PTR(-ENODEV);
|
||||||
hash = dasd_hash_busid(bus_id);
|
hash = dasd_hash_busid(bus_id);
|
||||||
list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
|
list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
|
||||||
if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
|
if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
|
||||||
devmap = tmp;
|
devmap = tmp;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
||||||
cdev->dev.driver_data = device;
|
dev_set_drvdata(&cdev->dev, device);
|
||||||
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
|
||||||
|
|
||||||
return device;
|
return device;
|
||||||
|
@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device)
|
||||||
|
|
||||||
/* Disconnect dasd_device structure from ccw_device structure. */
|
/* Disconnect dasd_device structure from ccw_device structure. */
|
||||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||||
device->cdev->dev.driver_data = NULL;
|
dev_set_drvdata(&device->cdev->dev, NULL);
|
||||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device)
|
||||||
struct dasd_device *
|
struct dasd_device *
|
||||||
dasd_device_from_cdev_locked(struct ccw_device *cdev)
|
dasd_device_from_cdev_locked(struct ccw_device *cdev)
|
||||||
{
|
{
|
||||||
struct dasd_device *device = cdev->dev.driver_data;
|
struct dasd_device *device = dev_get_drvdata(&cdev->dev);
|
||||||
|
|
||||||
if (!device)
|
if (!device)
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
|
@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
|
||||||
|
|
||||||
|
|
||||||
/* service information message SIM */
|
/* service information message SIM */
|
||||||
if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) &&
|
if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) &&
|
||||||
((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
|
((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
|
||||||
dasd_3990_erp_handle_sim(device, irb->ecw);
|
dasd_3990_erp_handle_sim(device, irb->ecw);
|
||||||
dasd_schedule_device_bh(device);
|
dasd_schedule_device_bh(device);
|
||||||
|
|
|
@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off,
|
||||||
|
|
||||||
#ifdef CONFIG_DASD_PROFILE
|
#ifdef CONFIG_DASD_PROFILE
|
||||||
static char *
|
static char *
|
||||||
dasd_statistics_array(char *str, unsigned int *array, int shift)
|
dasd_statistics_array(char *str, unsigned int *array, int factor)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
str += sprintf(str, "%7d ", array[i] >> shift);
|
str += sprintf(str, "%7d ", array[i] / factor);
|
||||||
if (i == 15)
|
if (i == 15)
|
||||||
str += sprintf(str, "\n");
|
str += sprintf(str, "\n");
|
||||||
}
|
}
|
||||||
|
@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off,
|
||||||
#ifdef CONFIG_DASD_PROFILE
|
#ifdef CONFIG_DASD_PROFILE
|
||||||
struct dasd_profile_info_t *prof;
|
struct dasd_profile_info_t *prof;
|
||||||
char *str;
|
char *str;
|
||||||
int shift;
|
int factor;
|
||||||
|
|
||||||
/* check for active profiling */
|
/* check for active profiling */
|
||||||
if (dasd_profile_level == DASD_PROFILE_OFF) {
|
if (dasd_profile_level == DASD_PROFILE_OFF) {
|
||||||
|
@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off,
|
||||||
|
|
||||||
prof = &dasd_global_profile;
|
prof = &dasd_global_profile;
|
||||||
/* prevent couter 'overflow' on output */
|
/* prevent couter 'overflow' on output */
|
||||||
for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++);
|
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
|
||||||
|
factor *= 10);
|
||||||
|
|
||||||
str = page;
|
str = page;
|
||||||
str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
|
str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
|
||||||
str += sprintf(str, "with %d sectors(512B each)\n",
|
str += sprintf(str, "with %u sectors(512B each)\n",
|
||||||
prof->dasd_io_sects);
|
prof->dasd_io_sects);
|
||||||
|
str += sprintf(str, "Scale Factor is %d\n", factor);
|
||||||
str += sprintf(str,
|
str += sprintf(str,
|
||||||
" __<4 ___8 __16 __32 __64 _128 "
|
" __<4 ___8 __16 __32 __64 _128 "
|
||||||
" _256 _512 __1k __2k __4k __8k "
|
" _256 _512 __1k __2k __4k __8k "
|
||||||
|
@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off,
|
||||||
" __1G __2G __4G " " _>4G\n");
|
" __1G __2G __4G " " _>4G\n");
|
||||||
|
|
||||||
str += sprintf(str, "Histogram of sizes (512B secs)\n");
|
str += sprintf(str, "Histogram of sizes (512B secs)\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_secs, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_secs, factor);
|
||||||
str += sprintf(str, "Histogram of I/O times (microseconds)\n");
|
str += sprintf(str, "Histogram of I/O times (microseconds)\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_times, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_times, factor);
|
||||||
str += sprintf(str, "Histogram of I/O times per sector\n");
|
str += sprintf(str, "Histogram of I/O times per sector\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_timps, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_timps, factor);
|
||||||
str += sprintf(str, "Histogram of I/O time till ssch\n");
|
str += sprintf(str, "Histogram of I/O time till ssch\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_time1, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_time1, factor);
|
||||||
str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
|
str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_time2, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_time2, factor);
|
||||||
str += sprintf(str, "Histogram of I/O time between ssch "
|
str += sprintf(str, "Histogram of I/O time between ssch "
|
||||||
"and irq per sector\n");
|
"and irq per sector\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor);
|
||||||
str += sprintf(str, "Histogram of I/O time between irq and end\n");
|
str += sprintf(str, "Histogram of I/O time between irq and end\n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_time3, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_time3, factor);
|
||||||
str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
|
str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
|
||||||
str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift);
|
str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor);
|
||||||
len = str - page;
|
len = str - page;
|
||||||
#else
|
#else
|
||||||
len = sprintf(page, "Statistics are not activated in this kernel\n");
|
len = sprintf(page, "Statistics are not activated in this kernel\n");
|
||||||
|
|
|
@ -4,6 +4,9 @@
|
||||||
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
|
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "dcssblk"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
|
@ -17,19 +20,10 @@
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <asm/s390_rdev.h>
|
#include <asm/s390_rdev.h>
|
||||||
|
|
||||||
//#define DCSSBLK_DEBUG /* Debug messages on/off */
|
|
||||||
#define DCSSBLK_NAME "dcssblk"
|
#define DCSSBLK_NAME "dcssblk"
|
||||||
#define DCSSBLK_MINORS_PER_DISK 1
|
#define DCSSBLK_MINORS_PER_DISK 1
|
||||||
#define DCSSBLK_PARM_LEN 400
|
#define DCSSBLK_PARM_LEN 400
|
||||||
|
#define DCSS_BUS_ID_SIZE 20
|
||||||
#ifdef DCSSBLK_DEBUG
|
|
||||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
|
|
||||||
#else
|
|
||||||
#define PRINT_DEBUG(x...) do {} while (0)
|
|
||||||
#endif
|
|
||||||
#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
|
|
||||||
#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
|
|
||||||
#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
|
|
||||||
|
|
||||||
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
|
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
|
||||||
static int dcssblk_release(struct gendisk *disk, fmode_t mode);
|
static int dcssblk_release(struct gendisk *disk, fmode_t mode);
|
||||||
|
@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = {
|
||||||
struct dcssblk_dev_info {
|
struct dcssblk_dev_info {
|
||||||
struct list_head lh;
|
struct list_head lh;
|
||||||
struct device dev;
|
struct device dev;
|
||||||
char segment_name[BUS_ID_SIZE];
|
char segment_name[DCSS_BUS_ID_SIZE];
|
||||||
atomic_t use_count;
|
atomic_t use_count;
|
||||||
struct gendisk *gd;
|
struct gendisk *gd;
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
|
@ -65,7 +59,7 @@ struct dcssblk_dev_info {
|
||||||
|
|
||||||
struct segment_info {
|
struct segment_info {
|
||||||
struct list_head lh;
|
struct list_head lh;
|
||||||
char segment_name[BUS_ID_SIZE];
|
char segment_name[DCSS_BUS_ID_SIZE];
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
int segment_type;
|
int segment_type;
|
||||||
|
@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
|
||||||
/* check continuity */
|
/* check continuity */
|
||||||
for (i = 0; i < dev_info->num_of_segments - 1; i++) {
|
for (i = 0; i < dev_info->num_of_segments - 1; i++) {
|
||||||
if ((sort_list[i].end + 1) != sort_list[i+1].start) {
|
if ((sort_list[i].end + 1) != sort_list[i+1].start) {
|
||||||
PRINT_ERR("Segment %s is not contiguous with "
|
pr_err("Adjacent DCSSs %s and %s are not "
|
||||||
"segment %s\n",
|
"contiguous\n", sort_list[i].segment_name,
|
||||||
sort_list[i].segment_name,
|
sort_list[i+1].segment_name);
|
||||||
sort_list[i+1].segment_name);
|
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
|
||||||
!(sort_list[i+1].segment_type &
|
!(sort_list[i+1].segment_type &
|
||||||
SEGMENT_EXCLUSIVE) ||
|
SEGMENT_EXCLUSIVE) ||
|
||||||
(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
|
(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
|
||||||
PRINT_ERR("Segment %s has different type from "
|
pr_err("DCSS %s and DCSS %s have "
|
||||||
"segment %s\n",
|
"incompatible types\n",
|
||||||
sort_list[i].segment_name,
|
sort_list[i].segment_name,
|
||||||
sort_list[i+1].segment_name);
|
sort_list[i+1].segment_name);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
|
||||||
} else if (inbuf[0] == '0') {
|
} else if (inbuf[0] == '0') {
|
||||||
/* reload segments in exclusive mode */
|
/* reload segments in exclusive mode */
|
||||||
if (dev_info->segment_type == SEG_TYPE_SC) {
|
if (dev_info->segment_type == SEG_TYPE_SC) {
|
||||||
PRINT_ERR("Segment type SC (%s) cannot be loaded in "
|
pr_err("DCSS %s is of type SC and cannot be "
|
||||||
"non-shared mode\n", dev_info->segment_name);
|
"loaded as exclusive-writable\n",
|
||||||
|
dev_info->segment_name);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
removeseg:
|
removeseg:
|
||||||
PRINT_ERR("Could not reload segment(s) of the device %s, removing "
|
pr_err("DCSS device %s is removed after a failed access mode "
|
||||||
"segment(s) now!\n",
|
"change\n", dev_info->segment_name);
|
||||||
dev_info->segment_name);
|
|
||||||
temp = entry;
|
temp = entry;
|
||||||
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
||||||
if (entry != temp)
|
if (entry != temp)
|
||||||
|
@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
|
||||||
if (inbuf[0] == '1') {
|
if (inbuf[0] == '1') {
|
||||||
if (atomic_read(&dev_info->use_count) == 0) {
|
if (atomic_read(&dev_info->use_count) == 0) {
|
||||||
// device is idle => we save immediately
|
// device is idle => we save immediately
|
||||||
PRINT_INFO("Saving segment(s) of the device %s\n",
|
pr_info("All DCSSs that map to device %s are "
|
||||||
dev_info->segment_name);
|
"saved\n", dev_info->segment_name);
|
||||||
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
||||||
segment_save(entry->segment_name);
|
segment_save(entry->segment_name);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// device is busy => we save it when it becomes
|
// device is busy => we save it when it becomes
|
||||||
// idle in dcssblk_release
|
// idle in dcssblk_release
|
||||||
PRINT_INFO("Device %s is currently busy, segment(s) "
|
pr_info("Device %s is in use, its DCSSs will be "
|
||||||
"will be saved when it becomes idle...\n",
|
"saved when it becomes idle\n",
|
||||||
dev_info->segment_name);
|
dev_info->segment_name);
|
||||||
dev_info->save_pending = 1;
|
dev_info->save_pending = 1;
|
||||||
}
|
}
|
||||||
} else if (inbuf[0] == '0') {
|
} else if (inbuf[0] == '0') {
|
||||||
|
@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
|
||||||
// device is busy & the user wants to undo his save
|
// device is busy & the user wants to undo his save
|
||||||
// request
|
// request
|
||||||
dev_info->save_pending = 0;
|
dev_info->save_pending = 0;
|
||||||
PRINT_INFO("Pending save for segment(s) of the device "
|
pr_info("A pending save request for device %s "
|
||||||
"%s deactivated\n",
|
"has been canceled\n",
|
||||||
dev_info->segment_name);
|
dev_info->segment_name);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
up_write(&dcssblk_devices_sem);
|
up_write(&dcssblk_devices_sem);
|
||||||
|
@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||||
|
|
||||||
seg_byte_size = (dev_info->end - dev_info->start + 1);
|
seg_byte_size = (dev_info->end - dev_info->start + 1);
|
||||||
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
|
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
|
||||||
PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, "
|
pr_info("Loaded %s with total size %lu bytes and capacity %lu "
|
||||||
"capacity = %lu (512 Byte) sectors\n", local_buf,
|
"sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
|
||||||
seg_byte_size, seg_byte_size >> 9);
|
|
||||||
|
|
||||||
dev_info->save_pending = 0;
|
dev_info->save_pending = 0;
|
||||||
dev_info->is_shared = 1;
|
dev_info->is_shared = 1;
|
||||||
|
@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
|
||||||
dev_info = dcssblk_get_device_by_name(local_buf);
|
dev_info = dcssblk_get_device_by_name(local_buf);
|
||||||
if (dev_info == NULL) {
|
if (dev_info == NULL) {
|
||||||
up_write(&dcssblk_devices_sem);
|
up_write(&dcssblk_devices_sem);
|
||||||
PRINT_WARN("Device %s is not loaded!\n", local_buf);
|
pr_warning("Device %s cannot be removed because it is not a "
|
||||||
|
"known device\n", local_buf);
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto out_buf;
|
goto out_buf;
|
||||||
}
|
}
|
||||||
if (atomic_read(&dev_info->use_count) != 0) {
|
if (atomic_read(&dev_info->use_count) != 0) {
|
||||||
up_write(&dcssblk_devices_sem);
|
up_write(&dcssblk_devices_sem);
|
||||||
PRINT_WARN("Device %s is in use!\n", local_buf);
|
pr_warning("Device %s cannot be removed while it is in "
|
||||||
|
"use\n", local_buf);
|
||||||
rc = -EBUSY;
|
rc = -EBUSY;
|
||||||
goto out_buf;
|
goto out_buf;
|
||||||
}
|
}
|
||||||
|
@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
|
||||||
down_write(&dcssblk_devices_sem);
|
down_write(&dcssblk_devices_sem);
|
||||||
if (atomic_dec_and_test(&dev_info->use_count)
|
if (atomic_dec_and_test(&dev_info->use_count)
|
||||||
&& (dev_info->save_pending)) {
|
&& (dev_info->save_pending)) {
|
||||||
PRINT_INFO("Device %s became idle and is being saved now\n",
|
pr_info("Device %s has become idle and is being saved "
|
||||||
dev_info->segment_name);
|
"now\n", dev_info->segment_name);
|
||||||
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
list_for_each_entry(entry, &dev_info->seg_list, lh) {
|
||||||
segment_save(entry->segment_name);
|
segment_save(entry->segment_name);
|
||||||
}
|
}
|
||||||
|
@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
|
||||||
case SEG_TYPE_SC:
|
case SEG_TYPE_SC:
|
||||||
/* cannot write to these segments */
|
/* cannot write to these segments */
|
||||||
if (bio_data_dir(bio) == WRITE) {
|
if (bio_data_dir(bio) == WRITE) {
|
||||||
PRINT_WARN("rejecting write to ro device %s\n",
|
pr_warning("Writing to %s failed because it "
|
||||||
|
"is a read-only device\n",
|
||||||
dev_name(&dev_info->dev));
|
dev_name(&dev_info->dev));
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,9 @@
|
||||||
* generic hard disk support to replace ad-hoc partitioning
|
* generic hard disk support to replace ad-hoc partitioning
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "xpram"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/ctype.h> /* isdigit, isxdigit */
|
#include <linux/ctype.h> /* isdigit, isxdigit */
|
||||||
|
@ -42,12 +45,6 @@
|
||||||
#define XPRAM_DEVS 1 /* one partition */
|
#define XPRAM_DEVS 1 /* one partition */
|
||||||
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
|
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
|
||||||
|
|
||||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x)
|
|
||||||
#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x)
|
|
||||||
#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x)
|
|
||||||
#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
|
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned int size; /* size of xpram segment in pages */
|
unsigned int size; /* size of xpram segment in pages */
|
||||||
unsigned int offset; /* start page of xpram segment */
|
unsigned int offset; /* start page of xpram segment */
|
||||||
|
@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
|
||||||
|
|
||||||
/* Check number of devices. */
|
/* Check number of devices. */
|
||||||
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
|
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
|
||||||
PRINT_ERR("invalid number %d of devices\n",devs);
|
pr_err("%d is not a valid number of XPRAM devices\n",devs);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
xpram_devs = devs;
|
xpram_devs = devs;
|
||||||
|
@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages)
|
||||||
mem_auto_no++;
|
mem_auto_no++;
|
||||||
}
|
}
|
||||||
|
|
||||||
PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs);
|
pr_info(" number of devices (partitions): %d \n", xpram_devs);
|
||||||
for (i = 0; i < xpram_devs; i++) {
|
for (i = 0; i < xpram_devs; i++) {
|
||||||
if (xpram_sizes[i])
|
if (xpram_sizes[i])
|
||||||
PRINT_INFO(" size of partition %d: %u kB\n",
|
pr_info(" size of partition %d: %u kB\n",
|
||||||
i, xpram_sizes[i]);
|
i, xpram_sizes[i]);
|
||||||
else
|
else
|
||||||
PRINT_INFO(" size of partition %d to be set "
|
pr_info(" size of partition %d to be set "
|
||||||
"automatically\n",i);
|
"automatically\n",i);
|
||||||
}
|
}
|
||||||
PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n",
|
pr_info(" memory needed (for sized partitions): %lu kB\n",
|
||||||
mem_needed);
|
mem_needed);
|
||||||
PRINT_DEBUG(" partitions to be sized automatically: %d\n",
|
pr_info(" partitions to be sized automatically: %d\n",
|
||||||
mem_auto_no);
|
mem_auto_no);
|
||||||
|
|
||||||
if (mem_needed > pages * 4) {
|
if (mem_needed > pages * 4) {
|
||||||
PRINT_ERR("Not enough expanded memory available\n");
|
pr_err("Not enough expanded memory available\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages)
|
||||||
*/
|
*/
|
||||||
if (mem_auto_no) {
|
if (mem_auto_no) {
|
||||||
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
|
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
|
||||||
PRINT_INFO(" automatically determined "
|
pr_info(" automatically determined "
|
||||||
"partition size: %lu kB\n", mem_auto);
|
"partition size: %lu kB\n", mem_auto);
|
||||||
for (i = 0; i < xpram_devs; i++)
|
for (i = 0; i < xpram_devs; i++)
|
||||||
if (xpram_sizes[i] == 0)
|
if (xpram_sizes[i] == 0)
|
||||||
xpram_sizes[i] = mem_auto;
|
xpram_sizes[i] = mem_auto;
|
||||||
|
@ -405,12 +402,12 @@ static int __init xpram_init(void)
|
||||||
|
|
||||||
/* Find out size of expanded memory. */
|
/* Find out size of expanded memory. */
|
||||||
if (xpram_present() != 0) {
|
if (xpram_present() != 0) {
|
||||||
PRINT_WARN("No expanded memory available\n");
|
pr_err("No expanded memory available\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
xpram_pages = xpram_highest_page_index() + 1;
|
xpram_pages = xpram_highest_page_index() + 1;
|
||||||
PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
|
pr_info(" %u pages expanded memory found (%lu KB).\n",
|
||||||
xpram_pages, (unsigned long) xpram_pages*4);
|
xpram_pages, (unsigned long) xpram_pages*4);
|
||||||
rc = xpram_setup_sizes(xpram_pages);
|
rc = xpram_setup_sizes(xpram_pages);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -7,6 +7,9 @@
|
||||||
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "monreader"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -24,19 +27,6 @@
|
||||||
#include <asm/ebcdic.h>
|
#include <asm/ebcdic.h>
|
||||||
#include <asm/extmem.h>
|
#include <asm/extmem.h>
|
||||||
|
|
||||||
//#define MON_DEBUG /* Debug messages on/off */
|
|
||||||
|
|
||||||
#define MON_NAME "monreader"
|
|
||||||
|
|
||||||
#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x)
|
|
||||||
#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x)
|
|
||||||
#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x)
|
|
||||||
|
|
||||||
#ifdef MON_DEBUG
|
|
||||||
#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x)
|
|
||||||
#else
|
|
||||||
#define P_DEBUG(x...) do {} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define MON_COLLECT_SAMPLE 0x80
|
#define MON_COLLECT_SAMPLE 0x80
|
||||||
#define MON_COLLECT_EVENT 0x40
|
#define MON_COLLECT_EVENT 0x40
|
||||||
|
@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg,
|
||||||
} else
|
} else
|
||||||
monmsg->replied_msglim = 1;
|
monmsg->replied_msglim = 1;
|
||||||
if (rc) {
|
if (rc) {
|
||||||
P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc);
|
pr_err("Reading monitor data failed with rc=%i\n", rc);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
|
||||||
{
|
{
|
||||||
struct mon_private *monpriv = path->private;
|
struct mon_private *monpriv = path->private;
|
||||||
|
|
||||||
P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]);
|
pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
|
||||||
|
ipuser[0]);
|
||||||
iucv_path_sever(path, NULL);
|
iucv_path_sever(path, NULL);
|
||||||
atomic_set(&monpriv->iucv_severed, 1);
|
atomic_set(&monpriv->iucv_severed, 1);
|
||||||
wake_up(&mon_conn_wait_queue);
|
wake_up(&mon_conn_wait_queue);
|
||||||
|
@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path,
|
||||||
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
|
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
|
||||||
msg, sizeof(*msg));
|
msg, sizeof(*msg));
|
||||||
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
|
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
|
||||||
P_WARNING("IUCV message pending, message limit (%i) reached\n",
|
pr_warning("The read queue for monitor data is full\n");
|
||||||
MON_MSGLIM);
|
|
||||||
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
|
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
|
||||||
}
|
}
|
||||||
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
|
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
|
||||||
|
@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp)
|
||||||
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
|
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
|
||||||
MON_SERVICE, NULL, user_data_connect, monpriv);
|
MON_SERVICE, NULL, user_data_connect, monpriv);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
P_ERROR("iucv connection to *MONITOR failed with "
|
pr_err("Connecting to the z/VM *MONITOR system service "
|
||||||
"IPUSER SEVER code = %i\n", rc);
|
"failed with rc=%i\n", rc);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto out_path;
|
goto out_path;
|
||||||
}
|
}
|
||||||
|
@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp)
|
||||||
*/
|
*/
|
||||||
rc = iucv_path_sever(monpriv->path, user_data_sever);
|
rc = iucv_path_sever(monpriv->path, user_data_sever);
|
||||||
if (rc)
|
if (rc)
|
||||||
P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
|
pr_warning("Disconnecting the z/VM *MONITOR system service "
|
||||||
|
"failed with rc=%i\n", rc);
|
||||||
|
|
||||||
atomic_set(&monpriv->iucv_severed, 0);
|
atomic_set(&monpriv->iucv_severed, 0);
|
||||||
atomic_set(&monpriv->iucv_connected, 0);
|
atomic_set(&monpriv->iucv_connected, 0);
|
||||||
|
@ -469,7 +460,8 @@ static int __init mon_init(void)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!MACHINE_IS_VM) {
|
if (!MACHINE_IS_VM) {
|
||||||
P_ERROR("not running under z/VM, driver not loaded\n");
|
pr_err("The z/VM *MONITOR record device driver cannot be "
|
||||||
|
"loaded without z/VM\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,7 +470,8 @@ static int __init mon_init(void)
|
||||||
*/
|
*/
|
||||||
rc = iucv_register(&monreader_iucv_handler, 1);
|
rc = iucv_register(&monreader_iucv_handler, 1);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
P_ERROR("failed to register with iucv driver\n");
|
pr_err("The z/VM *MONITOR record device driver failed to "
|
||||||
|
"register with IUCV\n");
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -488,8 +481,8 @@ static int __init mon_init(void)
|
||||||
goto out_iucv;
|
goto out_iucv;
|
||||||
}
|
}
|
||||||
if (rc != SEG_TYPE_SC) {
|
if (rc != SEG_TYPE_SC) {
|
||||||
P_ERROR("segment %s has unsupported type, should be SC\n",
|
pr_err("The specified *MONITOR DCSS %s does not have the "
|
||||||
mon_dcss_name);
|
"required type SC\n", mon_dcss_name);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out_iucv;
|
goto out_iucv;
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,9 @@
|
||||||
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
|
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "monwriter"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
|
||||||
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
|
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
|
||||||
if (rc <= 0)
|
if (rc <= 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
pr_err("Writing monitor data failed with rc=%i\n", rc);
|
||||||
if (rc == 5)
|
if (rc == 5)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
printk("DIAG X'DC' error with return code: %i\n", rc);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,9 @@
|
||||||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "sclp_cmd"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
@ -16,9 +19,8 @@
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <asm/chpid.h>
|
#include <asm/chpid.h>
|
||||||
#include <asm/sclp.h>
|
#include <asm/sclp.h>
|
||||||
#include "sclp.h"
|
|
||||||
|
|
||||||
#define TAG "sclp_cmd: "
|
#include "sclp.h"
|
||||||
|
|
||||||
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
|
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
|
||||||
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
|
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
|
||||||
|
@ -169,8 +171,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
|
||||||
|
|
||||||
/* Check response. */
|
/* Check response. */
|
||||||
if (request->status != SCLP_REQ_DONE) {
|
if (request->status != SCLP_REQ_DONE) {
|
||||||
printk(KERN_WARNING TAG "sync request failed "
|
pr_warning("sync request failed (cmd=0x%08x, "
|
||||||
"(cmd=0x%08x, status=0x%02x)\n", cmd, request->status);
|
"status=0x%02x)\n", cmd, request->status);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -224,8 +226,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info)
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
if (sccb->header.response_code != 0x0010) {
|
if (sccb->header.response_code != 0x0010) {
|
||||||
printk(KERN_WARNING TAG "readcpuinfo failed "
|
pr_warning("readcpuinfo failed (response=0x%04x)\n",
|
||||||
"(response=0x%04x)\n", sccb->header.response_code);
|
sccb->header.response_code);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -262,8 +264,9 @@ static int do_cpu_configure(sclp_cmdw_t cmd)
|
||||||
case 0x0120:
|
case 0x0120:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, "
|
pr_warning("configure cpu failed (cmd=0x%08x, "
|
||||||
"response=0x%04x)\n", cmd, sccb->header.response_code);
|
"response=0x%04x)\n", cmd,
|
||||||
|
sccb->header.response_code);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -626,9 +629,9 @@ static int do_chp_configure(sclp_cmdw_t cmd)
|
||||||
case 0x0450:
|
case 0x0450:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
printk(KERN_WARNING TAG "configure channel-path failed "
|
pr_warning("configure channel-path failed "
|
||||||
"(cmd=0x%08x, response=0x%04x)\n", cmd,
|
"(cmd=0x%08x, response=0x%04x)\n", cmd,
|
||||||
sccb->header.response_code);
|
sccb->header.response_code);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -695,8 +698,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
if (sccb->header.response_code != 0x0010) {
|
if (sccb->header.response_code != 0x0010) {
|
||||||
printk(KERN_WARNING TAG "read channel-path info failed "
|
pr_warning("read channel-path info failed "
|
||||||
"(response=0x%04x)\n", sccb->header.response_code);
|
"(response=0x%04x)\n", sccb->header.response_code);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,15 +5,17 @@
|
||||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "sclp_config"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/sysdev.h>
|
#include <linux/sysdev.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include "sclp.h"
|
|
||||||
|
|
||||||
#define TAG "sclp_config: "
|
#include "sclp.h"
|
||||||
|
|
||||||
struct conf_mgm_data {
|
struct conf_mgm_data {
|
||||||
u8 reserved;
|
u8 reserved;
|
||||||
|
@ -31,7 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
|
||||||
int cpu;
|
int cpu;
|
||||||
struct sys_device *sysdev;
|
struct sys_device *sysdev;
|
||||||
|
|
||||||
printk(KERN_WARNING TAG "cpu capability changed.\n");
|
pr_warning("cpu capability changed.\n");
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
sysdev = get_cpu_sysdev(cpu);
|
sysdev = get_cpu_sysdev(cpu);
|
||||||
|
@ -78,7 +80,7 @@ static int __init sclp_conf_init(void)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
|
if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
|
||||||
printk(KERN_WARNING TAG "no configuration management.\n");
|
pr_warning("no configuration management.\n");
|
||||||
sclp_unregister(&sclp_conf_register);
|
sclp_unregister(&sclp_conf_register);
|
||||||
rc = -ENOSYS;
|
rc = -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,9 @@
|
||||||
* Michael Ernst <mernst@de.ibm.com>
|
* Michael Ernst <mernst@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "sclp_cpi"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/stat.h>
|
#include <linux/stat.h>
|
||||||
|
@ -20,6 +23,7 @@
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <asm/ebcdic.h>
|
#include <asm/ebcdic.h>
|
||||||
#include <asm/sclp.h>
|
#include <asm/sclp.h>
|
||||||
|
|
||||||
#include "sclp.h"
|
#include "sclp.h"
|
||||||
#include "sclp_rw.h"
|
#include "sclp_rw.h"
|
||||||
#include "sclp_cpi_sys.h"
|
#include "sclp_cpi_sys.h"
|
||||||
|
@ -150,16 +154,16 @@ static int cpi_req(void)
|
||||||
wait_for_completion(&completion);
|
wait_for_completion(&completion);
|
||||||
|
|
||||||
if (req->status != SCLP_REQ_DONE) {
|
if (req->status != SCLP_REQ_DONE) {
|
||||||
printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n",
|
pr_warning("request failed (status=0x%02x)\n",
|
||||||
req->status);
|
req->status);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto out_free_req;
|
goto out_free_req;
|
||||||
}
|
}
|
||||||
|
|
||||||
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
|
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
|
||||||
if (response != 0x0020) {
|
if (response != 0x0020) {
|
||||||
printk(KERN_WARNING "cpi: failed with "
|
pr_warning("request failed with response code 0x%x\n",
|
||||||
"response code 0x%x\n", response);
|
response);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,15 +5,18 @@
|
||||||
* Author(s): Michael Holzheu
|
* Author(s): Michael Holzheu
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "sclp_sdias"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <asm/sclp.h>
|
#include <asm/sclp.h>
|
||||||
#include <asm/debug.h>
|
#include <asm/debug.h>
|
||||||
#include <asm/ipl.h>
|
#include <asm/ipl.h>
|
||||||
|
|
||||||
#include "sclp.h"
|
#include "sclp.h"
|
||||||
#include "sclp_rw.h"
|
#include "sclp_rw.h"
|
||||||
|
|
||||||
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
|
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
|
||||||
#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
|
|
||||||
|
|
||||||
#define SDIAS_RETRIES 300
|
#define SDIAS_RETRIES 300
|
||||||
#define SDIAS_SLEEP_TICKS 50
|
#define SDIAS_SLEEP_TICKS 50
|
||||||
|
@ -131,7 +134,7 @@ int sclp_sdias_blk_count(void)
|
||||||
|
|
||||||
rc = sdias_sclp_send(&request);
|
rc = sdias_sclp_send(&request);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
ERROR_MSG("sclp_send failed for get_nr_blocks\n");
|
pr_err("sclp_send failed for get_nr_blocks\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (sccb.hdr.response_code != 0x0020) {
|
if (sccb.hdr.response_code != 0x0020) {
|
||||||
|
@ -145,7 +148,8 @@ int sclp_sdias_blk_count(void)
|
||||||
rc = sccb.evbuf.blk_cnt;
|
rc = sccb.evbuf.blk_cnt;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
|
pr_err("SCLP error: %x\n",
|
||||||
|
sccb.evbuf.event_status);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -201,7 +205,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
|
||||||
|
|
||||||
rc = sdias_sclp_send(&request);
|
rc = sdias_sclp_send(&request);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
ERROR_MSG("sclp_send failed: %x\n", rc);
|
pr_err("sclp_send failed: %x\n", rc);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (sccb.hdr.response_code != 0x0020) {
|
if (sccb.hdr.response_code != 0x0020) {
|
||||||
|
@ -219,9 +223,9 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
|
||||||
case EVSTATE_NO_DATA:
|
case EVSTATE_NO_DATA:
|
||||||
TRACE("no data\n");
|
TRACE("no data\n");
|
||||||
default:
|
default:
|
||||||
ERROR_MSG("Error from SCLP while copying hsa. "
|
pr_err("Error from SCLP while copying hsa. "
|
||||||
"Event status = %x\n",
|
"Event status = %x\n",
|
||||||
sccb.evbuf.event_status);
|
sccb.evbuf.event_status);
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -583,23 +583,6 @@ sclp_vt220_chars_in_buffer(struct tty_struct *tty)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
__sclp_vt220_flush_buffer(void)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
sclp_vt220_emit_current();
|
|
||||||
spin_lock_irqsave(&sclp_vt220_lock, flags);
|
|
||||||
if (timer_pending(&sclp_vt220_timer))
|
|
||||||
del_timer(&sclp_vt220_timer);
|
|
||||||
while (sclp_vt220_outqueue_count > 0) {
|
|
||||||
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
|
|
||||||
sclp_sync_wait();
|
|
||||||
spin_lock_irqsave(&sclp_vt220_lock, flags);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pass on all buffers to the hardware. Return only when there are no more
|
* Pass on all buffers to the hardware. Return only when there are no more
|
||||||
* buffers pending.
|
* buffers pending.
|
||||||
|
@ -745,6 +728,22 @@ sclp_vt220_con_device(struct console *c, int *index)
|
||||||
return sclp_vt220_driver;
|
return sclp_vt220_driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __sclp_vt220_flush_buffer(void)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
sclp_vt220_emit_current();
|
||||||
|
spin_lock_irqsave(&sclp_vt220_lock, flags);
|
||||||
|
if (timer_pending(&sclp_vt220_timer))
|
||||||
|
del_timer(&sclp_vt220_timer);
|
||||||
|
while (sclp_vt220_outqueue_count > 0) {
|
||||||
|
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
|
||||||
|
sclp_sync_wait();
|
||||||
|
spin_lock_irqsave(&sclp_vt220_lock, flags);
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
sclp_vt220_notify(struct notifier_block *self,
|
sclp_vt220_notify(struct notifier_block *self,
|
||||||
unsigned long event, void *data)
|
unsigned long event, void *data)
|
||||||
|
|
|
@ -11,12 +11,14 @@
|
||||||
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
|
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "vmcp"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/smp_lock.h>
|
|
||||||
#include <asm/cpcmd.h>
|
#include <asm/cpcmd.h>
|
||||||
#include <asm/debug.h>
|
#include <asm/debug.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
@ -26,8 +28,6 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
|
MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
|
||||||
MODULE_DESCRIPTION("z/VM CP interface");
|
MODULE_DESCRIPTION("z/VM CP interface");
|
||||||
|
|
||||||
#define PRINTK_HEADER "vmcp: "
|
|
||||||
|
|
||||||
static debug_info_t *vmcp_debug;
|
static debug_info_t *vmcp_debug;
|
||||||
|
|
||||||
static int vmcp_open(struct inode *inode, struct file *file)
|
static int vmcp_open(struct inode *inode, struct file *file)
|
||||||
|
@ -41,13 +41,11 @@ static int vmcp_open(struct inode *inode, struct file *file)
|
||||||
if (!session)
|
if (!session)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
lock_kernel();
|
|
||||||
session->bufsize = PAGE_SIZE;
|
session->bufsize = PAGE_SIZE;
|
||||||
session->response = NULL;
|
session->response = NULL;
|
||||||
session->resp_size = 0;
|
session->resp_size = 0;
|
||||||
mutex_init(&session->mutex);
|
mutex_init(&session->mutex);
|
||||||
file->private_data = session;
|
file->private_data = session;
|
||||||
unlock_kernel();
|
|
||||||
return nonseekable_open(inode, file);
|
return nonseekable_open(inode, file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +191,8 @@ static int __init vmcp_init(void)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!MACHINE_IS_VM) {
|
if (!MACHINE_IS_VM) {
|
||||||
PRINT_WARN("z/VM CP interface is only available under z/VM\n");
|
pr_warning("The z/VM CP interface device driver cannot be "
|
||||||
|
"loaded without z/VM\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,10 @@
|
||||||
* Stefan Weinhuber <wein@de.ibm.com>
|
* Stefan Weinhuber <wein@de.ibm.com>
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "vmlogrdr"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
@ -28,8 +32,6 @@
|
||||||
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
MODULE_AUTHOR
|
MODULE_AUTHOR
|
||||||
("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
|
("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
|
||||||
" Stefan Weinhuber (wein@de.ibm.com)");
|
" Stefan Weinhuber (wein@de.ibm.com)");
|
||||||
|
@ -174,8 +176,7 @@ static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
|
||||||
struct vmlogrdr_priv_t * logptr = path->private;
|
struct vmlogrdr_priv_t * logptr = path->private;
|
||||||
u8 reason = (u8) ipuser[8];
|
u8 reason = (u8) ipuser[8];
|
||||||
|
|
||||||
printk (KERN_ERR "vmlogrdr: connection severed with"
|
pr_err("vmlogrdr: connection severed with reason %i\n", reason);
|
||||||
" reason %i\n", reason);
|
|
||||||
|
|
||||||
iucv_path_sever(path, NULL);
|
iucv_path_sever(path, NULL);
|
||||||
kfree(path);
|
kfree(path);
|
||||||
|
@ -333,8 +334,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
|
||||||
if (logptr->autorecording) {
|
if (logptr->autorecording) {
|
||||||
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
|
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
|
||||||
if (ret)
|
if (ret)
|
||||||
printk (KERN_WARNING "vmlogrdr: failed to start "
|
pr_warning("vmlogrdr: failed to start "
|
||||||
"recording automatically\n");
|
"recording automatically\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* create connection to the system service */
|
/* create connection to the system service */
|
||||||
|
@ -345,9 +346,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
|
||||||
logptr->system_service, NULL, NULL,
|
logptr->system_service, NULL, NULL,
|
||||||
logptr);
|
logptr);
|
||||||
if (connect_rc) {
|
if (connect_rc) {
|
||||||
printk (KERN_ERR "vmlogrdr: iucv connection to %s "
|
pr_err("vmlogrdr: iucv connection to %s "
|
||||||
"failed with rc %i \n", logptr->system_service,
|
"failed with rc %i \n",
|
||||||
connect_rc);
|
logptr->system_service, connect_rc);
|
||||||
goto out_path;
|
goto out_path;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,8 +389,8 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
|
||||||
if (logptr->autorecording) {
|
if (logptr->autorecording) {
|
||||||
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
|
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
|
||||||
if (ret)
|
if (ret)
|
||||||
printk (KERN_WARNING "vmlogrdr: failed to stop "
|
pr_warning("vmlogrdr: failed to stop "
|
||||||
"recording automatically\n");
|
"recording automatically\n");
|
||||||
}
|
}
|
||||||
logptr->dev_in_use = 0;
|
logptr->dev_in_use = 0;
|
||||||
|
|
||||||
|
@ -823,8 +824,7 @@ static int __init vmlogrdr_init(void)
|
||||||
dev_t dev;
|
dev_t dev;
|
||||||
|
|
||||||
if (! MACHINE_IS_VM) {
|
if (! MACHINE_IS_VM) {
|
||||||
printk (KERN_ERR "vmlogrdr: not running under VM, "
|
pr_err("not running under VM, driver not loaded.\n");
|
||||||
"driver not loaded.\n");
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,9 @@
|
||||||
* Frank Munzert <munzert@de.ibm.com>
|
* Frank Munzert <munzert@de.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "vmur"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/cdev.h>
|
#include <linux/cdev.h>
|
||||||
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
||||||
|
|
||||||
|
@ -40,8 +43,6 @@ MODULE_AUTHOR("IBM Corporation");
|
||||||
MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
|
MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
#define PRINTK_HEADER "vmur: "
|
|
||||||
|
|
||||||
static dev_t ur_first_dev_maj_min;
|
static dev_t ur_first_dev_maj_min;
|
||||||
static struct class *vmur_class;
|
static struct class *vmur_class;
|
||||||
static struct debug_info *vmur_dbf;
|
static struct debug_info *vmur_dbf;
|
||||||
|
@ -987,7 +988,8 @@ static int __init ur_init(void)
|
||||||
dev_t dev;
|
dev_t dev;
|
||||||
|
|
||||||
if (!MACHINE_IS_VM) {
|
if (!MACHINE_IS_VM) {
|
||||||
PRINT_ERR("%s is only available under z/VM.\n", ur_banner);
|
pr_err("The %s cannot be loaded without z/VM\n",
|
||||||
|
ur_banner);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1006,7 +1008,8 @@ static int __init ur_init(void)
|
||||||
|
|
||||||
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
|
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
|
||||||
if (rc) {
|
if (rc) {
|
||||||
PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc);
|
pr_err("Kernel function alloc_chrdev_region failed with "
|
||||||
|
"error code %d\n", rc);
|
||||||
goto fail_unregister_driver;
|
goto fail_unregister_driver;
|
||||||
}
|
}
|
||||||
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
|
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
|
||||||
|
@ -1016,7 +1019,7 @@ static int __init ur_init(void)
|
||||||
rc = PTR_ERR(vmur_class);
|
rc = PTR_ERR(vmur_class);
|
||||||
goto fail_unregister_region;
|
goto fail_unregister_region;
|
||||||
}
|
}
|
||||||
PRINT_INFO("%s loaded.\n", ur_banner);
|
pr_info("%s loaded.\n", ur_banner);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_unregister_region:
|
fail_unregister_region:
|
||||||
|
@ -1034,7 +1037,7 @@ static void __exit ur_exit(void)
|
||||||
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
|
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
|
||||||
ccw_driver_unregister(&ur_driver);
|
ccw_driver_unregister(&ur_driver);
|
||||||
debug_unregister(vmur_dbf);
|
debug_unregister(vmur_dbf);
|
||||||
PRINT_INFO("%s unloaded.\n", ur_banner);
|
pr_info("%s unloaded.\n", ur_banner);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(ur_init);
|
module_init(ur_init);
|
||||||
|
|
|
@ -9,6 +9,9 @@
|
||||||
* Author(s): Michael Holzheu
|
* Author(s): Michael Holzheu
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "zdump"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
#include <linux/utsname.h>
|
#include <linux/utsname.h>
|
||||||
|
@ -24,8 +27,6 @@
|
||||||
#include "sclp.h"
|
#include "sclp.h"
|
||||||
|
|
||||||
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
|
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
|
||||||
#define MSG(x...) printk( KERN_ALERT x )
|
|
||||||
#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
|
|
||||||
|
|
||||||
#define TO_USER 0
|
#define TO_USER 0
|
||||||
#define TO_KERNEL 1
|
#define TO_KERNEL 1
|
||||||
|
@ -563,19 +564,19 @@ static int __init sys_info_init(enum arch_id arch)
|
||||||
|
|
||||||
switch (arch) {
|
switch (arch) {
|
||||||
case ARCH_S390X:
|
case ARCH_S390X:
|
||||||
MSG("DETECTED 'S390X (64 bit) OS'\n");
|
pr_alert("DETECTED 'S390X (64 bit) OS'\n");
|
||||||
sys_info.sa_base = SAVE_AREA_BASE_S390X;
|
sys_info.sa_base = SAVE_AREA_BASE_S390X;
|
||||||
sys_info.sa_size = sizeof(struct save_area_s390x);
|
sys_info.sa_size = sizeof(struct save_area_s390x);
|
||||||
set_s390x_lc_mask(&sys_info.lc_mask);
|
set_s390x_lc_mask(&sys_info.lc_mask);
|
||||||
break;
|
break;
|
||||||
case ARCH_S390:
|
case ARCH_S390:
|
||||||
MSG("DETECTED 'S390 (32 bit) OS'\n");
|
pr_alert("DETECTED 'S390 (32 bit) OS'\n");
|
||||||
sys_info.sa_base = SAVE_AREA_BASE_S390;
|
sys_info.sa_base = SAVE_AREA_BASE_S390;
|
||||||
sys_info.sa_size = sizeof(struct save_area_s390);
|
sys_info.sa_size = sizeof(struct save_area_s390);
|
||||||
set_s390_lc_mask(&sys_info.lc_mask);
|
set_s390_lc_mask(&sys_info.lc_mask);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ERROR_MSG("unknown architecture 0x%x.\n",arch);
|
pr_alert("0x%x is an unknown architecture.\n",arch);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
sys_info.arch = arch;
|
sys_info.arch = arch;
|
||||||
|
@ -674,7 +675,8 @@ static int __init zcore_init(void)
|
||||||
|
|
||||||
#ifndef __s390x__
|
#ifndef __s390x__
|
||||||
if (arch == ARCH_S390X) {
|
if (arch == ARCH_S390X) {
|
||||||
ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
|
pr_alert("The 32-bit dump tool cannot be used for a "
|
||||||
|
"64-bit system\n");
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,9 @@
|
||||||
* Arnd Bergmann (arndb@de.ibm.com)
|
* Arnd Bergmann (arndb@de.ibm.com)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cio"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid,
|
||||||
{
|
{
|
||||||
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
|
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
|
||||||
if (msgtrigger)
|
if (msgtrigger)
|
||||||
printk(KERN_WARNING "cio: Invalid cio_ignore range "
|
pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
|
||||||
"0.%x.%04x-0.%x.%04x\n", from_ssid, from,
|
"range for cio_ignore\n", from_ssid, from,
|
||||||
to_ssid, to);
|
to_ssid, to);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
|
||||||
rc = 0;
|
rc = 0;
|
||||||
out:
|
out:
|
||||||
if (rc && msgtrigger)
|
if (rc && msgtrigger)
|
||||||
printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n",
|
pr_warning("%s is not a valid device for the cio_ignore "
|
||||||
str);
|
"kernel parameter\n", str);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
#include <asm/ccwdev.h>
|
#include <asm/ccwdev.h>
|
||||||
#include <asm/ccwgroup.h>
|
#include <asm/ccwgroup.h>
|
||||||
|
|
||||||
|
#define CCW_BUS_ID_SIZE 20
|
||||||
|
|
||||||
/* In Linux 2.4, we had a channel device layer called "chandev"
|
/* In Linux 2.4, we had a channel device layer called "chandev"
|
||||||
* that did all sorts of obscure stuff for networking devices.
|
* that did all sorts of obscure stuff for networking devices.
|
||||||
* This is another driver that serves as a replacement for just
|
* This is another driver that serves as a replacement for just
|
||||||
|
@ -89,15 +91,23 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const
|
||||||
|
|
||||||
gdev = to_ccwgroupdev(dev);
|
gdev = to_ccwgroupdev(dev);
|
||||||
|
|
||||||
if (gdev->state != CCWGROUP_OFFLINE)
|
/* Prevent concurrent online/offline processing and ungrouping. */
|
||||||
return -EINVAL;
|
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
|
||||||
|
return -EAGAIN;
|
||||||
|
if (gdev->state != CCWGROUP_OFFLINE) {
|
||||||
|
rc = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
/* Note that we cannot unregister the device from one of its
|
/* Note that we cannot unregister the device from one of its
|
||||||
* attribute methods, so we have to use this roundabout approach.
|
* attribute methods, so we have to use this roundabout approach.
|
||||||
*/
|
*/
|
||||||
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
|
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
|
||||||
if (rc)
|
out:
|
||||||
count = rc;
|
if (rc) {
|
||||||
|
/* Release onoff "lock" when ungrouping failed. */
|
||||||
|
atomic_set(&gdev->onoff, 0);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,7 +182,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
|
||||||
len = end - start + 1;
|
len = end - start + 1;
|
||||||
end++;
|
end++;
|
||||||
}
|
}
|
||||||
if (len < BUS_ID_SIZE) {
|
if (len < CCW_BUS_ID_SIZE) {
|
||||||
strlcpy(bus_id, start, len);
|
strlcpy(bus_id, start, len);
|
||||||
rc = 0;
|
rc = 0;
|
||||||
} else
|
} else
|
||||||
|
@ -181,7 +191,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE])
|
static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
|
||||||
{
|
{
|
||||||
int cssid, ssid, devno;
|
int cssid, ssid, devno;
|
||||||
|
|
||||||
|
@ -213,7 +223,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
|
||||||
{
|
{
|
||||||
struct ccwgroup_device *gdev;
|
struct ccwgroup_device *gdev;
|
||||||
int rc, i;
|
int rc, i;
|
||||||
char tmp_bus_id[BUS_ID_SIZE];
|
char tmp_bus_id[CCW_BUS_ID_SIZE];
|
||||||
const char *curr_buf;
|
const char *curr_buf;
|
||||||
|
|
||||||
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
|
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
|
||||||
|
|
|
@ -8,6 +8,9 @@
|
||||||
* Arnd Bergmann (arndb@de.ibm.com)
|
* Arnd Bergmann (arndb@de.ibm.com)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cio"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -333,6 +336,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
|
||||||
struct chp_config_data *data;
|
struct chp_config_data *data;
|
||||||
struct chp_id chpid;
|
struct chp_id chpid;
|
||||||
int num;
|
int num;
|
||||||
|
char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
|
||||||
|
|
||||||
CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
|
CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
|
||||||
if (sei_area->rs != 0)
|
if (sei_area->rs != 0)
|
||||||
|
@ -343,8 +347,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
|
||||||
if (!chp_test_bit(data->map, num))
|
if (!chp_test_bit(data->map, num))
|
||||||
continue;
|
continue;
|
||||||
chpid.id = num;
|
chpid.id = num;
|
||||||
printk(KERN_WARNING "cio: processing configure event %d for "
|
pr_notice("Processing %s for channel path %x.%02x\n",
|
||||||
"chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
|
events[data->op], chpid.cssid, chpid.id);
|
||||||
switch (data->op) {
|
switch (data->op) {
|
||||||
case 0:
|
case 0:
|
||||||
chp_cfg_schedule(chpid, 1);
|
chp_cfg_schedule(chpid, 1);
|
||||||
|
|
|
@ -61,7 +61,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
|
||||||
}
|
}
|
||||||
private->request = NULL;
|
private->request = NULL;
|
||||||
memcpy(&request->irb, irb, sizeof(*irb));
|
memcpy(&request->irb, irb, sizeof(*irb));
|
||||||
stsch(sch->schid, &sch->schib);
|
cio_update_schib(sch);
|
||||||
complete(&request->completion);
|
complete(&request->completion);
|
||||||
put_device(&sch->dev);
|
put_device(&sch->dev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,9 @@
|
||||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cio"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
@ -104,44 +107,6 @@ cio_get_options (struct subchannel *sch)
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Use tpi to get a pending interrupt, call the interrupt handler and
|
|
||||||
* return a pointer to the subchannel structure.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
cio_tpi(void)
|
|
||||||
{
|
|
||||||
struct tpi_info *tpi_info;
|
|
||||||
struct subchannel *sch;
|
|
||||||
struct irb *irb;
|
|
||||||
int irq_context;
|
|
||||||
|
|
||||||
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
|
|
||||||
if (tpi (NULL) != 1)
|
|
||||||
return 0;
|
|
||||||
irb = (struct irb *) __LC_IRB;
|
|
||||||
/* Store interrupt response block to lowcore. */
|
|
||||||
if (tsch (tpi_info->schid, irb) != 0)
|
|
||||||
/* Not status pending or not operational. */
|
|
||||||
return 1;
|
|
||||||
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
|
|
||||||
if (!sch)
|
|
||||||
return 1;
|
|
||||||
irq_context = in_interrupt();
|
|
||||||
if (!irq_context)
|
|
||||||
local_bh_disable();
|
|
||||||
irq_enter ();
|
|
||||||
spin_lock(sch->lock);
|
|
||||||
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
|
|
||||||
if (sch->driver && sch->driver->irq)
|
|
||||||
sch->driver->irq(sch);
|
|
||||||
spin_unlock(sch->lock);
|
|
||||||
irq_exit ();
|
|
||||||
if (!irq_context)
|
|
||||||
_local_bh_enable();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
|
cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
|
||||||
{
|
{
|
||||||
|
@ -152,11 +117,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
|
||||||
else
|
else
|
||||||
sch->lpm = 0;
|
sch->lpm = 0;
|
||||||
|
|
||||||
stsch (sch->schid, &sch->schib);
|
|
||||||
|
|
||||||
CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
|
CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
|
||||||
"subchannel 0.%x.%04x!\n", sch->schid.ssid,
|
"subchannel 0.%x.%04x!\n", sch->schid.ssid,
|
||||||
sch->schid.sch_no);
|
sch->schid.sch_no);
|
||||||
|
|
||||||
|
if (cio_update_schib(sch))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
sprintf(dbf_text, "no%s", dev_name(&sch->dev));
|
sprintf(dbf_text, "no%s", dev_name(&sch->dev));
|
||||||
CIO_TRACE_EVENT(0, dbf_text);
|
CIO_TRACE_EVENT(0, dbf_text);
|
||||||
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
|
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
|
||||||
|
@ -354,7 +321,8 @@ cio_cancel (struct subchannel *sch)
|
||||||
switch (ccode) {
|
switch (ccode) {
|
||||||
case 0: /* success */
|
case 0: /* success */
|
||||||
/* Update information in scsw. */
|
/* Update information in scsw. */
|
||||||
stsch (sch->schid, &sch->schib);
|
if (cio_update_schib(sch))
|
||||||
|
return -ENODEV;
|
||||||
return 0;
|
return 0;
|
||||||
case 1: /* status pending */
|
case 1: /* status pending */
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -365,36 +333,93 @@ cio_cancel (struct subchannel *sch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Function: cio_modify
|
|
||||||
* Issues a "Modify Subchannel" on the specified subchannel
|
|
||||||
*/
|
|
||||||
int
|
|
||||||
cio_modify (struct subchannel *sch)
|
|
||||||
{
|
|
||||||
int ccode, retry, ret;
|
|
||||||
|
|
||||||
ret = 0;
|
static void cio_apply_config(struct subchannel *sch, struct schib *schib)
|
||||||
|
{
|
||||||
|
schib->pmcw.intparm = sch->config.intparm;
|
||||||
|
schib->pmcw.mbi = sch->config.mbi;
|
||||||
|
schib->pmcw.isc = sch->config.isc;
|
||||||
|
schib->pmcw.ena = sch->config.ena;
|
||||||
|
schib->pmcw.mme = sch->config.mme;
|
||||||
|
schib->pmcw.mp = sch->config.mp;
|
||||||
|
schib->pmcw.csense = sch->config.csense;
|
||||||
|
schib->pmcw.mbfc = sch->config.mbfc;
|
||||||
|
if (sch->config.mbfc)
|
||||||
|
schib->mba = sch->config.mba;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cio_check_config(struct subchannel *sch, struct schib *schib)
|
||||||
|
{
|
||||||
|
return (schib->pmcw.intparm == sch->config.intparm) &&
|
||||||
|
(schib->pmcw.mbi == sch->config.mbi) &&
|
||||||
|
(schib->pmcw.isc == sch->config.isc) &&
|
||||||
|
(schib->pmcw.ena == sch->config.ena) &&
|
||||||
|
(schib->pmcw.mme == sch->config.mme) &&
|
||||||
|
(schib->pmcw.mp == sch->config.mp) &&
|
||||||
|
(schib->pmcw.csense == sch->config.csense) &&
|
||||||
|
(schib->pmcw.mbfc == sch->config.mbfc) &&
|
||||||
|
(!sch->config.mbfc || (schib->mba == sch->config.mba));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cio_commit_config - apply configuration to the subchannel
|
||||||
|
*/
|
||||||
|
int cio_commit_config(struct subchannel *sch)
|
||||||
|
{
|
||||||
|
struct schib schib;
|
||||||
|
int ccode, retry, ret = 0;
|
||||||
|
|
||||||
|
if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
for (retry = 0; retry < 5; retry++) {
|
for (retry = 0; retry < 5; retry++) {
|
||||||
ccode = msch_err (sch->schid, &sch->schib);
|
/* copy desired changes to local schib */
|
||||||
if (ccode < 0) /* -EIO if msch gets a program check. */
|
cio_apply_config(sch, &schib);
|
||||||
|
ccode = msch_err(sch->schid, &schib);
|
||||||
|
if (ccode < 0) /* -EIO if msch gets a program check. */
|
||||||
return ccode;
|
return ccode;
|
||||||
switch (ccode) {
|
switch (ccode) {
|
||||||
case 0: /* successfull */
|
case 0: /* successfull */
|
||||||
return 0;
|
if (stsch(sch->schid, &schib) ||
|
||||||
case 1: /* status pending */
|
!css_sch_is_valid(&schib))
|
||||||
|
return -ENODEV;
|
||||||
|
if (cio_check_config(sch, &schib)) {
|
||||||
|
/* commit changes from local schib */
|
||||||
|
memcpy(&sch->schib, &schib, sizeof(schib));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
ret = -EAGAIN;
|
||||||
|
break;
|
||||||
|
case 1: /* status pending */
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
case 2: /* busy */
|
case 2: /* busy */
|
||||||
udelay (100); /* allow for recovery */
|
udelay(100); /* allow for recovery */
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
break;
|
break;
|
||||||
case 3: /* not operational */
|
case 3: /* not operational */
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cio_update_schib - Perform stsch and update schib if subchannel is valid.
|
||||||
|
* @sch: subchannel on which to perform stsch
|
||||||
|
* Return zero on success, -ENODEV otherwise.
|
||||||
|
*/
|
||||||
|
int cio_update_schib(struct subchannel *sch)
|
||||||
|
{
|
||||||
|
struct schib schib;
|
||||||
|
|
||||||
|
if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
memcpy(&sch->schib, &schib, sizeof(schib));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cio_update_schib);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cio_enable_subchannel - enable a subchannel.
|
* cio_enable_subchannel - enable a subchannel.
|
||||||
* @sch: subchannel to be enabled
|
* @sch: subchannel to be enabled
|
||||||
|
@ -403,7 +428,6 @@ cio_modify (struct subchannel *sch)
|
||||||
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
|
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
|
||||||
{
|
{
|
||||||
char dbf_txt[15];
|
char dbf_txt[15];
|
||||||
int ccode;
|
|
||||||
int retry;
|
int retry;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -412,33 +436,27 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
|
||||||
|
|
||||||
if (sch_is_pseudo_sch(sch))
|
if (sch_is_pseudo_sch(sch))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ccode = stsch (sch->schid, &sch->schib);
|
if (cio_update_schib(sch))
|
||||||
if (ccode)
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
for (retry = 5, ret = 0; retry > 0; retry--) {
|
sch->config.ena = 1;
|
||||||
sch->schib.pmcw.ena = 1;
|
sch->config.isc = sch->isc;
|
||||||
sch->schib.pmcw.isc = sch->isc;
|
sch->config.intparm = intparm;
|
||||||
sch->schib.pmcw.intparm = intparm;
|
|
||||||
ret = cio_modify(sch);
|
for (retry = 0; retry < 3; retry++) {
|
||||||
if (ret == -ENODEV)
|
ret = cio_commit_config(sch);
|
||||||
break;
|
if (ret == -EIO) {
|
||||||
if (ret == -EIO)
|
|
||||||
/*
|
/*
|
||||||
* Got a program check in cio_modify. Try without
|
* Got a program check in msch. Try without
|
||||||
* the concurrent sense bit the next time.
|
* the concurrent sense bit the next time.
|
||||||
*/
|
*/
|
||||||
sch->schib.pmcw.csense = 0;
|
sch->config.csense = 0;
|
||||||
if (ret == 0) {
|
} else if (ret == -EBUSY) {
|
||||||
stsch (sch->schid, &sch->schib);
|
|
||||||
if (sch->schib.pmcw.ena)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (ret == -EBUSY) {
|
|
||||||
struct irb irb;
|
struct irb irb;
|
||||||
if (tsch(sch->schid, &irb) != 0)
|
if (tsch(sch->schid, &irb) != 0)
|
||||||
break;
|
break;
|
||||||
}
|
} else
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
sprintf (dbf_txt, "ret:%d", ret);
|
sprintf (dbf_txt, "ret:%d", ret);
|
||||||
CIO_TRACE_EVENT (2, dbf_txt);
|
CIO_TRACE_EVENT (2, dbf_txt);
|
||||||
|
@ -453,8 +471,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
|
||||||
int cio_disable_subchannel(struct subchannel *sch)
|
int cio_disable_subchannel(struct subchannel *sch)
|
||||||
{
|
{
|
||||||
char dbf_txt[15];
|
char dbf_txt[15];
|
||||||
int ccode;
|
|
||||||
int retry;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
CIO_TRACE_EVENT (2, "dissch");
|
CIO_TRACE_EVENT (2, "dissch");
|
||||||
|
@ -462,8 +478,7 @@ int cio_disable_subchannel(struct subchannel *sch)
|
||||||
|
|
||||||
if (sch_is_pseudo_sch(sch))
|
if (sch_is_pseudo_sch(sch))
|
||||||
return 0;
|
return 0;
|
||||||
ccode = stsch (sch->schid, &sch->schib);
|
if (cio_update_schib(sch))
|
||||||
if (ccode == 3) /* Not operational. */
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (scsw_actl(&sch->schib.scsw) != 0)
|
if (scsw_actl(&sch->schib.scsw) != 0)
|
||||||
|
@ -473,24 +488,9 @@ int cio_disable_subchannel(struct subchannel *sch)
|
||||||
*/
|
*/
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
for (retry = 5, ret = 0; retry > 0; retry--) {
|
sch->config.ena = 0;
|
||||||
sch->schib.pmcw.ena = 0;
|
ret = cio_commit_config(sch);
|
||||||
ret = cio_modify(sch);
|
|
||||||
if (ret == -ENODEV)
|
|
||||||
break;
|
|
||||||
if (ret == -EBUSY)
|
|
||||||
/*
|
|
||||||
* The subchannel is busy or status pending.
|
|
||||||
* We'll disable when the next interrupt was delivered
|
|
||||||
* via the state machine.
|
|
||||||
*/
|
|
||||||
break;
|
|
||||||
if (ret == 0) {
|
|
||||||
stsch (sch->schid, &sch->schib);
|
|
||||||
if (!sch->schib.pmcw.ena)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sprintf (dbf_txt, "ret:%d", ret);
|
sprintf (dbf_txt, "ret:%d", ret);
|
||||||
CIO_TRACE_EVENT (2, dbf_txt);
|
CIO_TRACE_EVENT (2, dbf_txt);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -687,6 +687,43 @@ static char console_sch_name[10] = "0.x.xxxx";
|
||||||
static struct io_subchannel_private console_priv;
|
static struct io_subchannel_private console_priv;
|
||||||
static int console_subchannel_in_use;
|
static int console_subchannel_in_use;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use tpi to get a pending interrupt, call the interrupt handler and
|
||||||
|
* return a pointer to the subchannel structure.
|
||||||
|
*/
|
||||||
|
static int cio_tpi(void)
|
||||||
|
{
|
||||||
|
struct tpi_info *tpi_info;
|
||||||
|
struct subchannel *sch;
|
||||||
|
struct irb *irb;
|
||||||
|
int irq_context;
|
||||||
|
|
||||||
|
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
|
||||||
|
if (tpi(NULL) != 1)
|
||||||
|
return 0;
|
||||||
|
irb = (struct irb *) __LC_IRB;
|
||||||
|
/* Store interrupt response block to lowcore. */
|
||||||
|
if (tsch(tpi_info->schid, irb) != 0)
|
||||||
|
/* Not status pending or not operational. */
|
||||||
|
return 1;
|
||||||
|
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
|
||||||
|
if (!sch)
|
||||||
|
return 1;
|
||||||
|
irq_context = in_interrupt();
|
||||||
|
if (!irq_context)
|
||||||
|
local_bh_disable();
|
||||||
|
irq_enter();
|
||||||
|
spin_lock(sch->lock);
|
||||||
|
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
|
||||||
|
if (sch->driver && sch->driver->irq)
|
||||||
|
sch->driver->irq(sch);
|
||||||
|
spin_unlock(sch->lock);
|
||||||
|
irq_exit();
|
||||||
|
if (!irq_context)
|
||||||
|
_local_bh_enable();
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
void *cio_get_console_priv(void)
|
void *cio_get_console_priv(void)
|
||||||
{
|
{
|
||||||
return &console_priv;
|
return &console_priv;
|
||||||
|
@ -780,7 +817,7 @@ cio_probe_console(void)
|
||||||
sch_no = cio_get_console_sch_no();
|
sch_no = cio_get_console_sch_no();
|
||||||
if (sch_no == -1) {
|
if (sch_no == -1) {
|
||||||
console_subchannel_in_use = 0;
|
console_subchannel_in_use = 0;
|
||||||
printk(KERN_WARNING "cio: No ccw console found!\n");
|
pr_warning("No CCW console was found\n");
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
}
|
}
|
||||||
memset(&console_subchannel, 0, sizeof(struct subchannel));
|
memset(&console_subchannel, 0, sizeof(struct subchannel));
|
||||||
|
@ -796,10 +833,9 @@ cio_probe_console(void)
|
||||||
* enable console I/O-interrupt subclass
|
* enable console I/O-interrupt subclass
|
||||||
*/
|
*/
|
||||||
isc_register(CONSOLE_ISC);
|
isc_register(CONSOLE_ISC);
|
||||||
console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
|
console_subchannel.config.isc = CONSOLE_ISC;
|
||||||
console_subchannel.schib.pmcw.intparm =
|
console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
|
||||||
(u32)(addr_t)&console_subchannel;
|
ret = cio_commit_config(&console_subchannel);
|
||||||
ret = cio_modify(&console_subchannel);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
isc_unregister(CONSOLE_ISC);
|
isc_unregister(CONSOLE_ISC);
|
||||||
console_subchannel_in_use = 0;
|
console_subchannel_in_use = 0;
|
||||||
|
@ -811,8 +847,8 @@ cio_probe_console(void)
|
||||||
void
|
void
|
||||||
cio_release_console(void)
|
cio_release_console(void)
|
||||||
{
|
{
|
||||||
console_subchannel.schib.pmcw.intparm = 0;
|
console_subchannel.config.intparm = 0;
|
||||||
cio_modify(&console_subchannel);
|
cio_commit_config(&console_subchannel);
|
||||||
isc_unregister(CONSOLE_ISC);
|
isc_unregister(CONSOLE_ISC);
|
||||||
console_subchannel_in_use = 0;
|
console_subchannel_in_use = 0;
|
||||||
}
|
}
|
||||||
|
@ -852,7 +888,8 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
|
||||||
cc = msch(schid, schib);
|
cc = msch(schid, schib);
|
||||||
if (cc)
|
if (cc)
|
||||||
return (cc==3?-ENODEV:-EBUSY);
|
return (cc==3?-ENODEV:-EBUSY);
|
||||||
stsch(schid, schib);
|
if (stsch(schid, schib) || !css_sch_is_valid(schib))
|
||||||
|
return -ENODEV;
|
||||||
if (!schib->pmcw.ena)
|
if (!schib->pmcw.ena)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,19 @@ struct pmcw {
|
||||||
/* ... in an operand exception. */
|
/* ... in an operand exception. */
|
||||||
} __attribute__ ((packed));
|
} __attribute__ ((packed));
|
||||||
|
|
||||||
|
/* Target SCHIB configuration. */
|
||||||
|
struct schib_config {
|
||||||
|
u64 mba;
|
||||||
|
u32 intparm;
|
||||||
|
u16 mbi;
|
||||||
|
u32 isc:3;
|
||||||
|
u32 ena:1;
|
||||||
|
u32 mme:2;
|
||||||
|
u32 mp:1;
|
||||||
|
u32 csense:1;
|
||||||
|
u32 mbfc:1;
|
||||||
|
} __attribute__ ((packed));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* subchannel information block
|
* subchannel information block
|
||||||
*/
|
*/
|
||||||
|
@ -82,6 +95,8 @@ struct subchannel {
|
||||||
struct device dev; /* entry in device tree */
|
struct device dev; /* entry in device tree */
|
||||||
struct css_driver *driver;
|
struct css_driver *driver;
|
||||||
void *private; /* private per subchannel type data */
|
void *private; /* private per subchannel type data */
|
||||||
|
struct work_struct work;
|
||||||
|
struct schib_config config;
|
||||||
} __attribute__ ((aligned(8)));
|
} __attribute__ ((aligned(8)));
|
||||||
|
|
||||||
#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
|
#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
|
||||||
|
@ -100,7 +115,8 @@ extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
|
||||||
extern int cio_cancel (struct subchannel *);
|
extern int cio_cancel (struct subchannel *);
|
||||||
extern int cio_set_options (struct subchannel *, int);
|
extern int cio_set_options (struct subchannel *, int);
|
||||||
extern int cio_get_options (struct subchannel *);
|
extern int cio_get_options (struct subchannel *);
|
||||||
extern int cio_modify (struct subchannel *);
|
extern int cio_update_schib(struct subchannel *sch);
|
||||||
|
extern int cio_commit_config(struct subchannel *sch);
|
||||||
|
|
||||||
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
|
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
|
||||||
int cio_tm_intrg(struct subchannel *sch);
|
int cio_tm_intrg(struct subchannel *sch);
|
||||||
|
|
|
@ -25,6 +25,9 @@
|
||||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cio"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -185,56 +188,19 @@ static inline void cmf_activate(void *area, unsigned int onoff)
|
||||||
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
|
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
int retry;
|
|
||||||
struct subchannel *sch;
|
struct subchannel *sch;
|
||||||
struct schib *schib;
|
|
||||||
|
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
schib = &sch->schib;
|
|
||||||
/* msch can silently fail, so do it again if necessary */
|
|
||||||
for (retry = 0; retry < 3; retry++) {
|
|
||||||
/* prepare schib */
|
|
||||||
stsch(sch->schid, schib);
|
|
||||||
schib->pmcw.mme = mme;
|
|
||||||
schib->pmcw.mbfc = mbfc;
|
|
||||||
/* address can be either a block address or a block index */
|
|
||||||
if (mbfc)
|
|
||||||
schib->mba = address;
|
|
||||||
else
|
|
||||||
schib->pmcw.mbi = address;
|
|
||||||
|
|
||||||
/* try to submit it */
|
sch->config.mme = mme;
|
||||||
switch(ret = msch_err(sch->schid, schib)) {
|
sch->config.mbfc = mbfc;
|
||||||
case 0:
|
/* address can be either a block address or a block index */
|
||||||
break;
|
if (mbfc)
|
||||||
case 1:
|
sch->config.mba = address;
|
||||||
case 2: /* in I/O or status pending */
|
else
|
||||||
ret = -EBUSY;
|
sch->config.mbi = address;
|
||||||
break;
|
|
||||||
case 3: /* subchannel is no longer valid */
|
|
||||||
ret = -ENODEV;
|
|
||||||
break;
|
|
||||||
default: /* msch caught an exception */
|
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
stsch(sch->schid, schib); /* restore the schib */
|
|
||||||
|
|
||||||
if (ret)
|
return cio_commit_config(sch);
|
||||||
break;
|
|
||||||
|
|
||||||
/* check if it worked */
|
|
||||||
if (schib->pmcw.mme == mme &&
|
|
||||||
schib->pmcw.mbfc == mbfc &&
|
|
||||||
(mbfc ? (schib->mba == address)
|
|
||||||
: (schib->pmcw.mbi == address)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct set_schib_struct {
|
struct set_schib_struct {
|
||||||
|
@ -338,7 +304,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
|
||||||
|
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
|
|
||||||
if (stsch(sch->schid, &sch->schib))
|
if (cio_update_schib(sch))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
|
if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
|
||||||
|
@ -1359,9 +1325,8 @@ static int __init init_cmf(void)
|
||||||
default:
|
default:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
pr_info("Channel measurement facility initialized using format "
|
||||||
printk(KERN_INFO "cio: Channel measurement facility using %s "
|
"%s (mode %s)\n", format_string, detect_string);
|
||||||
"format (%s)\n", format_string, detect_string);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,10 @@
|
||||||
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
||||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define KMSG_COMPONENT "cio"
|
||||||
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
@ -128,8 +132,8 @@ css_free_subchannel(struct subchannel *sch)
|
||||||
{
|
{
|
||||||
if (sch) {
|
if (sch) {
|
||||||
/* Reset intparm to zeroes. */
|
/* Reset intparm to zeroes. */
|
||||||
sch->schib.pmcw.intparm = 0;
|
sch->config.intparm = 0;
|
||||||
cio_modify(sch);
|
cio_commit_config(sch);
|
||||||
kfree(sch->lock);
|
kfree(sch->lock);
|
||||||
kfree(sch);
|
kfree(sch);
|
||||||
}
|
}
|
||||||
|
@ -844,8 +848,8 @@ out:
|
||||||
s390_unregister_crw_handler(CRW_RSC_CSS);
|
s390_unregister_crw_handler(CRW_RSC_CSS);
|
||||||
chsc_free_sei_area();
|
chsc_free_sei_area();
|
||||||
kfree(slow_subchannel_set);
|
kfree(slow_subchannel_set);
|
||||||
printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
|
pr_alert("The CSS device driver initialization failed with "
|
||||||
ret);
|
"errno=%d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -376,19 +376,23 @@ int ccw_device_set_offline(struct ccw_device *cdev)
|
||||||
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
|
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(cdev->ccwlock);
|
spin_unlock_irq(cdev->ccwlock);
|
||||||
|
/* Give up reference from ccw_device_set_online(). */
|
||||||
|
put_device(&cdev->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(cdev->ccwlock);
|
spin_unlock_irq(cdev->ccwlock);
|
||||||
if (ret == 0)
|
if (ret == 0) {
|
||||||
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
||||||
else {
|
/* Give up reference from ccw_device_set_online(). */
|
||||||
|
put_device(&cdev->dev);
|
||||||
|
} else {
|
||||||
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
|
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
|
||||||
"device 0.%x.%04x\n",
|
"device 0.%x.%04x\n",
|
||||||
ret, cdev->private->dev_id.ssid,
|
ret, cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno);
|
cdev->private->dev_id.devno);
|
||||||
cdev->online = 1;
|
cdev->online = 1;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -411,6 +415,9 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (cdev->online || !cdev->drv)
|
if (cdev->online || !cdev->drv)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
/* Hold on to an extra reference while device is online. */
|
||||||
|
if (!get_device(&cdev->dev))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
spin_lock_irq(cdev->ccwlock);
|
spin_lock_irq(cdev->ccwlock);
|
||||||
ret = ccw_device_online(cdev);
|
ret = ccw_device_online(cdev);
|
||||||
|
@ -422,10 +429,15 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
||||||
"device 0.%x.%04x\n",
|
"device 0.%x.%04x\n",
|
||||||
ret, cdev->private->dev_id.ssid,
|
ret, cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno);
|
cdev->private->dev_id.devno);
|
||||||
|
/* Give up online reference since onlining failed. */
|
||||||
|
put_device(&cdev->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (cdev->private->state != DEV_STATE_ONLINE)
|
if (cdev->private->state != DEV_STATE_ONLINE) {
|
||||||
|
/* Give up online reference since onlining failed. */
|
||||||
|
put_device(&cdev->dev);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
}
|
||||||
if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
|
if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
|
||||||
cdev->online = 1;
|
cdev->online = 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -440,6 +452,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
|
||||||
"device 0.%x.%04x\n",
|
"device 0.%x.%04x\n",
|
||||||
ret, cdev->private->dev_id.ssid,
|
ret, cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno);
|
cdev->private->dev_id.devno);
|
||||||
|
/* Give up online reference since onlining failed. */
|
||||||
|
put_device(&cdev->dev);
|
||||||
return (ret == 0) ? -ENODEV : ret;
|
return (ret == 0) ? -ENODEV : ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -704,6 +718,8 @@ ccw_device_release(struct device *dev)
|
||||||
struct ccw_device *cdev;
|
struct ccw_device *cdev;
|
||||||
|
|
||||||
cdev = to_ccwdev(dev);
|
cdev = to_ccwdev(dev);
|
||||||
|
/* Release reference of parent subchannel. */
|
||||||
|
put_device(cdev->dev.parent);
|
||||||
kfree(cdev->private);
|
kfree(cdev->private);
|
||||||
kfree(cdev);
|
kfree(cdev);
|
||||||
}
|
}
|
||||||
|
@ -735,8 +751,8 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
|
||||||
/* Do first half of device_register. */
|
/* Do first half of device_register. */
|
||||||
device_initialize(&cdev->dev);
|
device_initialize(&cdev->dev);
|
||||||
if (!get_device(&sch->dev)) {
|
if (!get_device(&sch->dev)) {
|
||||||
if (cdev->dev.release)
|
/* Release reference from device_initialize(). */
|
||||||
cdev->dev.release(&cdev->dev);
|
put_device(&cdev->dev);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -778,37 +794,55 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
|
||||||
struct subchannel *other_sch;
|
struct subchannel *other_sch;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
other_sch = to_subchannel(get_device(cdev->dev.parent));
|
/* Get reference for new parent. */
|
||||||
|
if (!get_device(&sch->dev))
|
||||||
|
return;
|
||||||
|
other_sch = to_subchannel(cdev->dev.parent);
|
||||||
|
/* Note: device_move() changes cdev->dev.parent */
|
||||||
ret = device_move(&cdev->dev, &sch->dev);
|
ret = device_move(&cdev->dev, &sch->dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
|
CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
|
||||||
"(ret=%d)!\n", cdev->private->dev_id.ssid,
|
"(ret=%d)!\n", cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno, ret);
|
cdev->private->dev_id.devno, ret);
|
||||||
put_device(&other_sch->dev);
|
/* Put reference for new parent. */
|
||||||
|
put_device(&sch->dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sch_set_cdev(other_sch, NULL);
|
sch_set_cdev(other_sch, NULL);
|
||||||
/* No need to keep a subchannel without ccw device around. */
|
/* No need to keep a subchannel without ccw device around. */
|
||||||
css_sch_device_unregister(other_sch);
|
css_sch_device_unregister(other_sch);
|
||||||
put_device(&other_sch->dev);
|
|
||||||
sch_attach_device(sch, cdev);
|
sch_attach_device(sch, cdev);
|
||||||
|
/* Put reference for old parent. */
|
||||||
|
put_device(&other_sch->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sch_attach_orphaned_device(struct subchannel *sch,
|
static void sch_attach_orphaned_device(struct subchannel *sch,
|
||||||
struct ccw_device *cdev)
|
struct ccw_device *cdev)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct subchannel *pseudo_sch;
|
||||||
|
|
||||||
/* Try to move the ccw device to its new subchannel. */
|
/* Get reference for new parent. */
|
||||||
|
if (!get_device(&sch->dev))
|
||||||
|
return;
|
||||||
|
pseudo_sch = to_subchannel(cdev->dev.parent);
|
||||||
|
/*
|
||||||
|
* Try to move the ccw device to its new subchannel.
|
||||||
|
* Note: device_move() changes cdev->dev.parent
|
||||||
|
*/
|
||||||
ret = device_move(&cdev->dev, &sch->dev);
|
ret = device_move(&cdev->dev, &sch->dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
|
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
|
||||||
"failed (ret=%d)!\n",
|
"failed (ret=%d)!\n",
|
||||||
cdev->private->dev_id.ssid,
|
cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno, ret);
|
cdev->private->dev_id.devno, ret);
|
||||||
|
/* Put reference for new parent. */
|
||||||
|
put_device(&sch->dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sch_attach_device(sch, cdev);
|
sch_attach_device(sch, cdev);
|
||||||
|
/* Put reference on pseudo subchannel. */
|
||||||
|
put_device(&pseudo_sch->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sch_create_and_recog_new_device(struct subchannel *sch)
|
static void sch_create_and_recog_new_device(struct subchannel *sch)
|
||||||
|
@ -830,9 +864,11 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
|
||||||
spin_lock_irq(sch->lock);
|
spin_lock_irq(sch->lock);
|
||||||
sch_set_cdev(sch, NULL);
|
sch_set_cdev(sch, NULL);
|
||||||
spin_unlock_irq(sch->lock);
|
spin_unlock_irq(sch->lock);
|
||||||
if (cdev->dev.release)
|
|
||||||
cdev->dev.release(&cdev->dev);
|
|
||||||
css_sch_device_unregister(sch);
|
css_sch_device_unregister(sch);
|
||||||
|
/* Put reference from io_subchannel_create_ccwdev(). */
|
||||||
|
put_device(&sch->dev);
|
||||||
|
/* Give up initial reference. */
|
||||||
|
put_device(&cdev->dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -854,15 +890,20 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
|
||||||
dev_id.devno = sch->schib.pmcw.dev;
|
dev_id.devno = sch->schib.pmcw.dev;
|
||||||
dev_id.ssid = sch->schid.ssid;
|
dev_id.ssid = sch->schid.ssid;
|
||||||
|
|
||||||
|
/* Increase refcount for pseudo subchannel. */
|
||||||
|
get_device(&css->pseudo_subchannel->dev);
|
||||||
/*
|
/*
|
||||||
* Move the orphaned ccw device to the orphanage so the replacing
|
* Move the orphaned ccw device to the orphanage so the replacing
|
||||||
* ccw device can take its place on the subchannel.
|
* ccw device can take its place on the subchannel.
|
||||||
|
* Note: device_move() changes cdev->dev.parent
|
||||||
*/
|
*/
|
||||||
ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
|
ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
|
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
|
||||||
"(ret=%d)!\n", cdev->private->dev_id.ssid,
|
"(ret=%d)!\n", cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno, ret);
|
cdev->private->dev_id.devno, ret);
|
||||||
|
/* Decrease refcount for pseudo subchannel again. */
|
||||||
|
put_device(&css->pseudo_subchannel->dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
cdev->ccwlock = css->pseudo_subchannel->lock;
|
cdev->ccwlock = css->pseudo_subchannel->lock;
|
||||||
|
@ -875,17 +916,23 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
|
||||||
if (replacing_cdev) {
|
if (replacing_cdev) {
|
||||||
sch_attach_disconnected_device(sch, replacing_cdev);
|
sch_attach_disconnected_device(sch, replacing_cdev);
|
||||||
/* Release reference from get_disc_ccwdev_by_dev_id() */
|
/* Release reference from get_disc_ccwdev_by_dev_id() */
|
||||||
put_device(&cdev->dev);
|
put_device(&replacing_cdev->dev);
|
||||||
|
/* Release reference of subchannel from old cdev. */
|
||||||
|
put_device(&sch->dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
|
replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
|
||||||
if (replacing_cdev) {
|
if (replacing_cdev) {
|
||||||
sch_attach_orphaned_device(sch, replacing_cdev);
|
sch_attach_orphaned_device(sch, replacing_cdev);
|
||||||
/* Release reference from get_orphaned_ccwdev_by_dev_id() */
|
/* Release reference from get_orphaned_ccwdev_by_dev_id() */
|
||||||
put_device(&cdev->dev);
|
put_device(&replacing_cdev->dev);
|
||||||
|
/* Release reference of subchannel from old cdev. */
|
||||||
|
put_device(&sch->dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
sch_create_and_recog_new_device(sch);
|
sch_create_and_recog_new_device(sch);
|
||||||
|
/* Release reference of subchannel from old cdev. */
|
||||||
|
put_device(&sch->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -903,6 +950,14 @@ io_subchannel_register(struct work_struct *work)
|
||||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||||
cdev = priv->cdev;
|
cdev = priv->cdev;
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
|
/*
|
||||||
|
* Check if subchannel is still registered. It may have become
|
||||||
|
* unregistered if a machine check hit us after finishing
|
||||||
|
* device recognition but before the register work could be
|
||||||
|
* queued.
|
||||||
|
*/
|
||||||
|
if (!device_is_registered(&sch->dev))
|
||||||
|
goto out_err;
|
||||||
css_update_ssd_info(sch);
|
css_update_ssd_info(sch);
|
||||||
/*
|
/*
|
||||||
* io_subchannel_register() will also be called after device
|
* io_subchannel_register() will also be called after device
|
||||||
|
@ -910,7 +965,7 @@ io_subchannel_register(struct work_struct *work)
|
||||||
* be registered). We need to reprobe since we may now have sense id
|
* be registered). We need to reprobe since we may now have sense id
|
||||||
* information.
|
* information.
|
||||||
*/
|
*/
|
||||||
if (klist_node_attached(&cdev->dev.knode_parent)) {
|
if (device_is_registered(&cdev->dev)) {
|
||||||
if (!cdev->drv) {
|
if (!cdev->drv) {
|
||||||
ret = device_reprobe(&cdev->dev);
|
ret = device_reprobe(&cdev->dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -934,22 +989,19 @@ io_subchannel_register(struct work_struct *work)
|
||||||
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
|
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
|
||||||
cdev->private->dev_id.ssid,
|
cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno, ret);
|
cdev->private->dev_id.devno, ret);
|
||||||
put_device(&cdev->dev);
|
|
||||||
spin_lock_irqsave(sch->lock, flags);
|
spin_lock_irqsave(sch->lock, flags);
|
||||||
sch_set_cdev(sch, NULL);
|
sch_set_cdev(sch, NULL);
|
||||||
spin_unlock_irqrestore(sch->lock, flags);
|
spin_unlock_irqrestore(sch->lock, flags);
|
||||||
kfree (cdev->private);
|
/* Release initial device reference. */
|
||||||
kfree (cdev);
|
put_device(&cdev->dev);
|
||||||
put_device(&sch->dev);
|
goto out_err;
|
||||||
if (atomic_dec_and_test(&ccw_device_init_count))
|
|
||||||
wake_up(&ccw_device_init_wq);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
put_device(&cdev->dev);
|
|
||||||
out:
|
out:
|
||||||
cdev->private->flags.recog_done = 1;
|
cdev->private->flags.recog_done = 1;
|
||||||
put_device(&sch->dev);
|
|
||||||
wake_up(&cdev->private->wait_q);
|
wake_up(&cdev->private->wait_q);
|
||||||
|
out_err:
|
||||||
|
/* Release reference for workqueue processing. */
|
||||||
|
put_device(&cdev->dev);
|
||||||
if (atomic_dec_and_test(&ccw_device_init_count))
|
if (atomic_dec_and_test(&ccw_device_init_count))
|
||||||
wake_up(&ccw_device_init_wq);
|
wake_up(&ccw_device_init_wq);
|
||||||
}
|
}
|
||||||
|
@ -968,8 +1020,8 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
css_sch_device_unregister(sch);
|
css_sch_device_unregister(sch);
|
||||||
/* Reset intparm to zeroes. */
|
/* Reset intparm to zeroes. */
|
||||||
sch->schib.pmcw.intparm = 0;
|
sch->config.intparm = 0;
|
||||||
cio_modify(sch);
|
cio_commit_config(sch);
|
||||||
/* Release cdev reference for workqueue processing.*/
|
/* Release cdev reference for workqueue processing.*/
|
||||||
put_device(&cdev->dev);
|
put_device(&cdev->dev);
|
||||||
/* Release subchannel reference for local processing. */
|
/* Release subchannel reference for local processing. */
|
||||||
|
@ -998,8 +1050,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
|
||||||
PREPARE_WORK(&cdev->private->kick_work,
|
PREPARE_WORK(&cdev->private->kick_work,
|
||||||
ccw_device_call_sch_unregister);
|
ccw_device_call_sch_unregister);
|
||||||
queue_work(slow_path_wq, &cdev->private->kick_work);
|
queue_work(slow_path_wq, &cdev->private->kick_work);
|
||||||
/* Release subchannel reference for asynchronous recognition. */
|
|
||||||
put_device(&sch->dev);
|
|
||||||
if (atomic_dec_and_test(&ccw_device_init_count))
|
if (atomic_dec_and_test(&ccw_device_init_count))
|
||||||
wake_up(&ccw_device_init_wq);
|
wake_up(&ccw_device_init_wq);
|
||||||
break;
|
break;
|
||||||
|
@ -1070,10 +1120,15 @@ static void ccw_device_move_to_sch(struct work_struct *work)
|
||||||
priv = container_of(work, struct ccw_device_private, kick_work);
|
priv = container_of(work, struct ccw_device_private, kick_work);
|
||||||
sch = priv->sch;
|
sch = priv->sch;
|
||||||
cdev = priv->cdev;
|
cdev = priv->cdev;
|
||||||
former_parent = ccw_device_is_orphan(cdev) ?
|
former_parent = to_subchannel(cdev->dev.parent);
|
||||||
NULL : to_subchannel(get_device(cdev->dev.parent));
|
/* Get reference for new parent. */
|
||||||
|
if (!get_device(&sch->dev))
|
||||||
|
return;
|
||||||
mutex_lock(&sch->reg_mutex);
|
mutex_lock(&sch->reg_mutex);
|
||||||
/* Try to move the ccw device to its new subchannel. */
|
/*
|
||||||
|
* Try to move the ccw device to its new subchannel.
|
||||||
|
* Note: device_move() changes cdev->dev.parent
|
||||||
|
*/
|
||||||
rc = device_move(&cdev->dev, &sch->dev);
|
rc = device_move(&cdev->dev, &sch->dev);
|
||||||
mutex_unlock(&sch->reg_mutex);
|
mutex_unlock(&sch->reg_mutex);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
@ -1083,21 +1138,23 @@ static void ccw_device_move_to_sch(struct work_struct *work)
|
||||||
cdev->private->dev_id.devno, sch->schid.ssid,
|
cdev->private->dev_id.devno, sch->schid.ssid,
|
||||||
sch->schid.sch_no, rc);
|
sch->schid.sch_no, rc);
|
||||||
css_sch_device_unregister(sch);
|
css_sch_device_unregister(sch);
|
||||||
|
/* Put reference for new parent again. */
|
||||||
|
put_device(&sch->dev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (former_parent) {
|
if (!sch_is_pseudo_sch(former_parent)) {
|
||||||
spin_lock_irq(former_parent->lock);
|
spin_lock_irq(former_parent->lock);
|
||||||
sch_set_cdev(former_parent, NULL);
|
sch_set_cdev(former_parent, NULL);
|
||||||
spin_unlock_irq(former_parent->lock);
|
spin_unlock_irq(former_parent->lock);
|
||||||
css_sch_device_unregister(former_parent);
|
css_sch_device_unregister(former_parent);
|
||||||
/* Reset intparm to zeroes. */
|
/* Reset intparm to zeroes. */
|
||||||
former_parent->schib.pmcw.intparm = 0;
|
former_parent->config.intparm = 0;
|
||||||
cio_modify(former_parent);
|
cio_commit_config(former_parent);
|
||||||
}
|
}
|
||||||
sch_attach_device(sch, cdev);
|
sch_attach_device(sch, cdev);
|
||||||
out:
|
out:
|
||||||
if (former_parent)
|
/* Put reference for old parent. */
|
||||||
put_device(&former_parent->dev);
|
put_device(&former_parent->dev);
|
||||||
put_device(&cdev->dev);
|
put_device(&cdev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1113,6 +1170,15 @@ static void io_subchannel_irq(struct subchannel *sch)
|
||||||
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
|
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void io_subchannel_init_config(struct subchannel *sch)
|
||||||
|
{
|
||||||
|
memset(&sch->config, 0, sizeof(sch->config));
|
||||||
|
sch->config.csense = 1;
|
||||||
|
/* Use subchannel mp mode when there is more than 1 installed CHPID. */
|
||||||
|
if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
|
||||||
|
sch->config.mp = 1;
|
||||||
|
}
|
||||||
|
|
||||||
static void io_subchannel_init_fields(struct subchannel *sch)
|
static void io_subchannel_init_fields(struct subchannel *sch)
|
||||||
{
|
{
|
||||||
if (cio_is_console(sch->schid))
|
if (cio_is_console(sch->schid))
|
||||||
|
@ -1127,18 +1193,34 @@ static void io_subchannel_init_fields(struct subchannel *sch)
|
||||||
sch->schib.pmcw.dev, sch->schid.ssid,
|
sch->schib.pmcw.dev, sch->schid.ssid,
|
||||||
sch->schid.sch_no, sch->schib.pmcw.pim,
|
sch->schid.sch_no, sch->schib.pmcw.pim,
|
||||||
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
|
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
|
||||||
/* Initially set up some fields in the pmcw. */
|
|
||||||
sch->schib.pmcw.ena = 0;
|
io_subchannel_init_config(sch);
|
||||||
sch->schib.pmcw.csense = 1; /* concurrent sense */
|
|
||||||
if ((sch->lpm & (sch->lpm - 1)) != 0)
|
|
||||||
sch->schib.pmcw.mp = 1; /* multipath mode */
|
|
||||||
/* clean up possible residual cmf stuff */
|
|
||||||
sch->schib.pmcw.mme = 0;
|
|
||||||
sch->schib.pmcw.mbfc = 0;
|
|
||||||
sch->schib.pmcw.mbi = 0;
|
|
||||||
sch->schib.mba = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void io_subchannel_do_unreg(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct subchannel *sch;
|
||||||
|
|
||||||
|
sch = container_of(work, struct subchannel, work);
|
||||||
|
css_sch_device_unregister(sch);
|
||||||
|
/* Reset intparm to zeroes. */
|
||||||
|
sch->config.intparm = 0;
|
||||||
|
cio_commit_config(sch);
|
||||||
|
put_device(&sch->dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Schedule unregister if we have no cdev. */
|
||||||
|
static void io_subchannel_schedule_removal(struct subchannel *sch)
|
||||||
|
{
|
||||||
|
get_device(&sch->dev);
|
||||||
|
INIT_WORK(&sch->work, io_subchannel_do_unreg);
|
||||||
|
queue_work(slow_path_wq, &sch->work);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: We always return 0 so that we bind to the device even on error.
|
||||||
|
* This is needed so that our remove function is called on unregister.
|
||||||
|
*/
|
||||||
static int io_subchannel_probe(struct subchannel *sch)
|
static int io_subchannel_probe(struct subchannel *sch)
|
||||||
{
|
{
|
||||||
struct ccw_device *cdev;
|
struct ccw_device *cdev;
|
||||||
|
@ -1168,9 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch)
|
||||||
ccw_device_register(cdev);
|
ccw_device_register(cdev);
|
||||||
/*
|
/*
|
||||||
* Check if the device is already online. If it is
|
* Check if the device is already online. If it is
|
||||||
* the reference count needs to be corrected
|
* the reference count needs to be corrected since we
|
||||||
* (see ccw_device_online and css_init_done for the
|
* didn't obtain a reference in ccw_device_set_online.
|
||||||
* ugly details).
|
|
||||||
*/
|
*/
|
||||||
if (cdev->private->state != DEV_STATE_NOT_OPER &&
|
if (cdev->private->state != DEV_STATE_NOT_OPER &&
|
||||||
cdev->private->state != DEV_STATE_OFFLINE &&
|
cdev->private->state != DEV_STATE_OFFLINE &&
|
||||||
|
@ -1179,23 +1260,24 @@ static int io_subchannel_probe(struct subchannel *sch)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
io_subchannel_init_fields(sch);
|
io_subchannel_init_fields(sch);
|
||||||
|
rc = cio_commit_config(sch);
|
||||||
|
if (rc)
|
||||||
|
goto out_schedule;
|
||||||
|
rc = sysfs_create_group(&sch->dev.kobj,
|
||||||
|
&io_subchannel_attr_group);
|
||||||
|
if (rc)
|
||||||
|
goto out_schedule;
|
||||||
|
/* Allocate I/O subchannel private data. */
|
||||||
|
sch->private = kzalloc(sizeof(struct io_subchannel_private),
|
||||||
|
GFP_KERNEL | GFP_DMA);
|
||||||
|
if (!sch->private)
|
||||||
|
goto out_err;
|
||||||
/*
|
/*
|
||||||
* First check if a fitting device may be found amongst the
|
* First check if a fitting device may be found amongst the
|
||||||
* disconnected devices or in the orphanage.
|
* disconnected devices or in the orphanage.
|
||||||
*/
|
*/
|
||||||
dev_id.devno = sch->schib.pmcw.dev;
|
dev_id.devno = sch->schib.pmcw.dev;
|
||||||
dev_id.ssid = sch->schid.ssid;
|
dev_id.ssid = sch->schid.ssid;
|
||||||
rc = sysfs_create_group(&sch->dev.kobj,
|
|
||||||
&io_subchannel_attr_group);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
/* Allocate I/O subchannel private data. */
|
|
||||||
sch->private = kzalloc(sizeof(struct io_subchannel_private),
|
|
||||||
GFP_KERNEL | GFP_DMA);
|
|
||||||
if (!sch->private) {
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
|
cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
|
||||||
if (!cdev)
|
if (!cdev)
|
||||||
cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
|
cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
|
||||||
|
@ -1213,24 +1295,21 @@ static int io_subchannel_probe(struct subchannel *sch)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
cdev = io_subchannel_create_ccwdev(sch);
|
cdev = io_subchannel_create_ccwdev(sch);
|
||||||
if (IS_ERR(cdev)) {
|
if (IS_ERR(cdev))
|
||||||
rc = PTR_ERR(cdev);
|
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
|
||||||
rc = io_subchannel_recog(cdev, sch);
|
rc = io_subchannel_recog(cdev, sch);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
spin_lock_irqsave(sch->lock, flags);
|
spin_lock_irqsave(sch->lock, flags);
|
||||||
sch_set_cdev(sch, NULL);
|
io_subchannel_recog_done(cdev);
|
||||||
spin_unlock_irqrestore(sch->lock, flags);
|
spin_unlock_irqrestore(sch->lock, flags);
|
||||||
if (cdev->dev.release)
|
|
||||||
cdev->dev.release(&cdev->dev);
|
|
||||||
goto out_err;
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
out_err:
|
out_err:
|
||||||
kfree(sch->private);
|
kfree(sch->private);
|
||||||
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
|
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
|
||||||
return rc;
|
out_schedule:
|
||||||
|
io_subchannel_schedule_removal(sch);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -1275,10 +1354,7 @@ static void io_subchannel_verify(struct subchannel *sch)
|
||||||
|
|
||||||
static int check_for_io_on_path(struct subchannel *sch, int mask)
|
static int check_for_io_on_path(struct subchannel *sch, int mask)
|
||||||
{
|
{
|
||||||
int cc;
|
if (cio_update_schib(sch))
|
||||||
|
|
||||||
cc = stsch(sch->schid, &sch->schib);
|
|
||||||
if (cc)
|
|
||||||
return 0;
|
return 0;
|
||||||
if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
|
if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1347,15 +1423,13 @@ static int io_subchannel_chp_event(struct subchannel *sch,
|
||||||
io_subchannel_verify(sch);
|
io_subchannel_verify(sch);
|
||||||
break;
|
break;
|
||||||
case CHP_OFFLINE:
|
case CHP_OFFLINE:
|
||||||
if (stsch(sch->schid, &sch->schib))
|
if (cio_update_schib(sch))
|
||||||
return -ENXIO;
|
|
||||||
if (!css_sch_is_valid(&sch->schib))
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
io_subchannel_terminate_path(sch, mask);
|
io_subchannel_terminate_path(sch, mask);
|
||||||
break;
|
break;
|
||||||
case CHP_ONLINE:
|
case CHP_ONLINE:
|
||||||
if (stsch(sch->schid, &sch->schib))
|
if (cio_update_schib(sch))
|
||||||
return -ENXIO;
|
return -ENODEV;
|
||||||
sch->lpm |= mask & sch->opm;
|
sch->lpm |= mask & sch->opm;
|
||||||
io_subchannel_verify(sch);
|
io_subchannel_verify(sch);
|
||||||
break;
|
break;
|
||||||
|
@ -1610,8 +1684,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
|
||||||
spin_lock_irqsave(sch->lock, flags);
|
spin_lock_irqsave(sch->lock, flags);
|
||||||
|
|
||||||
/* Reset intparm to zeroes. */
|
/* Reset intparm to zeroes. */
|
||||||
sch->schib.pmcw.intparm = 0;
|
sch->config.intparm = 0;
|
||||||
cio_modify(sch);
|
cio_commit_config(sch);
|
||||||
break;
|
break;
|
||||||
case REPROBE:
|
case REPROBE:
|
||||||
ccw_device_trigger_reprobe(cdev);
|
ccw_device_trigger_reprobe(cdev);
|
||||||
|
@ -1652,6 +1726,9 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
|
||||||
sch->private = cio_get_console_priv();
|
sch->private = cio_get_console_priv();
|
||||||
memset(sch->private, 0, sizeof(struct io_subchannel_private));
|
memset(sch->private, 0, sizeof(struct io_subchannel_private));
|
||||||
io_subchannel_init_fields(sch);
|
io_subchannel_init_fields(sch);
|
||||||
|
rc = cio_commit_config(sch);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
sch->driver = &io_subchannel_driver;
|
sch->driver = &io_subchannel_driver;
|
||||||
/* Initialize the ccw_device structure. */
|
/* Initialize the ccw_device structure. */
|
||||||
cdev->dev.parent= &sch->dev;
|
cdev->dev.parent= &sch->dev;
|
||||||
|
@ -1723,7 +1800,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
|
||||||
|
|
||||||
bus_id = id;
|
bus_id = id;
|
||||||
|
|
||||||
return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0);
|
return (strcmp(bus_id, dev_name(dev)) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1806,6 +1883,8 @@ ccw_device_remove (struct device *dev)
|
||||||
"device 0.%x.%04x\n",
|
"device 0.%x.%04x\n",
|
||||||
ret, cdev->private->dev_id.ssid,
|
ret, cdev->private->dev_id.ssid,
|
||||||
cdev->private->dev_id.devno);
|
cdev->private->dev_id.devno);
|
||||||
|
/* Give up reference obtained in ccw_device_set_online(). */
|
||||||
|
put_device(&cdev->dev);
|
||||||
}
|
}
|
||||||
ccw_device_set_timeout(cdev, 0);
|
ccw_device_set_timeout(cdev, 0);
|
||||||
cdev->drv = NULL;
|
cdev->drv = NULL;
|
||||||
|
|
|
@ -76,6 +76,7 @@ extern wait_queue_head_t ccw_device_init_wq;
|
||||||
extern atomic_t ccw_device_init_count;
|
extern atomic_t ccw_device_init_count;
|
||||||
|
|
||||||
void io_subchannel_recog_done(struct ccw_device *cdev);
|
void io_subchannel_recog_done(struct ccw_device *cdev);
|
||||||
|
void io_subchannel_init_config(struct subchannel *sch);
|
||||||
|
|
||||||
int ccw_device_cancel_halt_clear(struct ccw_device *);
|
int ccw_device_cancel_halt_clear(struct ccw_device *);
|
||||||
|
|
||||||
|
|
|
@ -140,8 +140,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
ret = stsch(sch->schid, &sch->schib);
|
if (cio_update_schib(sch))
|
||||||
if (ret || !sch->schib.pmcw.dnv)
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (!sch->schib.pmcw.ena)
|
if (!sch->schib.pmcw.ena)
|
||||||
/* Not operational -> done. */
|
/* Not operational -> done. */
|
||||||
|
@ -245,11 +244,13 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
|
||||||
* through ssch() and the path information is up to date.
|
* through ssch() and the path information is up to date.
|
||||||
*/
|
*/
|
||||||
old_lpm = sch->lpm;
|
old_lpm = sch->lpm;
|
||||||
stsch(sch->schid, &sch->schib);
|
|
||||||
sch->lpm = sch->schib.pmcw.pam & sch->opm;
|
|
||||||
/* Check since device may again have become not operational. */
|
/* Check since device may again have become not operational. */
|
||||||
if (!sch->schib.pmcw.dnv)
|
if (cio_update_schib(sch))
|
||||||
state = DEV_STATE_NOT_OPER;
|
state = DEV_STATE_NOT_OPER;
|
||||||
|
else
|
||||||
|
sch->lpm = sch->schib.pmcw.pam & sch->opm;
|
||||||
|
|
||||||
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
|
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
|
||||||
/* Force reprobe on all chpids. */
|
/* Force reprobe on all chpids. */
|
||||||
old_lpm = 0;
|
old_lpm = 0;
|
||||||
|
@ -399,9 +400,6 @@ ccw_device_done(struct ccw_device *cdev, int state)
|
||||||
ccw_device_oper_notify(cdev);
|
ccw_device_oper_notify(cdev);
|
||||||
}
|
}
|
||||||
wake_up(&cdev->private->wait_q);
|
wake_up(&cdev->private->wait_q);
|
||||||
|
|
||||||
if (css_init_done && state != DEV_STATE_ONLINE)
|
|
||||||
put_device (&cdev->dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cmp_pgid(struct pgid *p1, struct pgid *p2)
|
static int cmp_pgid(struct pgid *p1, struct pgid *p2)
|
||||||
|
@ -552,7 +550,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
|
||||||
|
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
/* Update schib - pom may have changed. */
|
/* Update schib - pom may have changed. */
|
||||||
stsch(sch->schid, &sch->schib);
|
if (cio_update_schib(sch)) {
|
||||||
|
cdev->private->flags.donotify = 0;
|
||||||
|
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
|
||||||
|
return;
|
||||||
|
}
|
||||||
/* Update lpm with verified path mask. */
|
/* Update lpm with verified path mask. */
|
||||||
sch->lpm = sch->vpm;
|
sch->lpm = sch->vpm;
|
||||||
/* Repeat path verification? */
|
/* Repeat path verification? */
|
||||||
|
@ -611,8 +613,6 @@ ccw_device_online(struct ccw_device *cdev)
|
||||||
(cdev->private->state != DEV_STATE_BOXED))
|
(cdev->private->state != DEV_STATE_BOXED))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
if (css_init_done && !get_device(&cdev->dev))
|
|
||||||
return -ENODEV;
|
|
||||||
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
|
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
/* Couldn't enable the subchannel for i/o. Sick device. */
|
/* Couldn't enable the subchannel for i/o. Sick device. */
|
||||||
|
@ -672,7 +672,7 @@ ccw_device_offline(struct ccw_device *cdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
|
if (cio_update_schib(sch))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
if (scsw_actl(&sch->schib.scsw) != 0)
|
if (scsw_actl(&sch->schib.scsw) != 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -750,7 +750,10 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
|
||||||
* Since we might not just be coming from an interrupt from the
|
* Since we might not just be coming from an interrupt from the
|
||||||
* subchannel we have to update the schib.
|
* subchannel we have to update the schib.
|
||||||
*/
|
*/
|
||||||
stsch(sch->schid, &sch->schib);
|
if (cio_update_schib(sch)) {
|
||||||
|
ccw_device_verify_done(cdev, -ENODEV);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (scsw_actl(&sch->schib.scsw) != 0 ||
|
if (scsw_actl(&sch->schib.scsw) != 0 ||
|
||||||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
|
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
|
||||||
|
@ -1016,20 +1019,21 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
|
||||||
|
|
||||||
sch = to_subchannel(cdev->dev.parent);
|
sch = to_subchannel(cdev->dev.parent);
|
||||||
/* Update some values. */
|
/* Update some values. */
|
||||||
if (stsch(sch->schid, &sch->schib))
|
if (cio_update_schib(sch))
|
||||||
return;
|
|
||||||
if (!sch->schib.pmcw.dnv)
|
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
* The pim, pam, pom values may not be accurate, but they are the best
|
* The pim, pam, pom values may not be accurate, but they are the best
|
||||||
* we have before performing device selection :/
|
* we have before performing device selection :/
|
||||||
*/
|
*/
|
||||||
sch->lpm = sch->schib.pmcw.pam & sch->opm;
|
sch->lpm = sch->schib.pmcw.pam & sch->opm;
|
||||||
/* Re-set some bits in the pmcw that were lost. */
|
/*
|
||||||
sch->schib.pmcw.csense = 1;
|
* Use the initial configuration since we can't be shure that the old
|
||||||
sch->schib.pmcw.ena = 0;
|
* paths are valid.
|
||||||
if ((sch->lpm & (sch->lpm - 1)) != 0)
|
*/
|
||||||
sch->schib.pmcw.mp = 1;
|
io_subchannel_init_config(sch);
|
||||||
|
if (cio_commit_config(sch))
|
||||||
|
return;
|
||||||
|
|
||||||
/* We should also udate ssd info, but this has to wait. */
|
/* We should also udate ssd info, but this has to wait. */
|
||||||
/* Check if this is another device which appeared on the same sch. */
|
/* Check if this is another device which appeared on the same sch. */
|
||||||
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
|
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
|
||||||
|
|
|
@ -504,7 +504,7 @@ ccw_device_verify_start(struct ccw_device *cdev)
|
||||||
sch->vpm = 0;
|
sch->vpm = 0;
|
||||||
|
|
||||||
/* Get current pam. */
|
/* Get current pam. */
|
||||||
if (stsch(sch->schid, &sch->schib)) {
|
if (cio_update_schib(sch)) {
|
||||||
ccw_device_verify_done(cdev, -ENODEV);
|
ccw_device_verify_done(cdev, -ENODEV);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче