2008-12-25 15:39:50 +03:00
|
|
|
/*
|
|
|
|
* Copyright IBM Corp. 2008
|
|
|
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define KMSG_COMPONENT "cpu"
|
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/delay.h>
|
2011-01-05 14:48:17 +03:00
|
|
|
#include <linux/cpu.h>
|
2008-12-25 15:39:50 +03:00
|
|
|
#include <asm/elf.h>
|
|
|
|
#include <asm/lowcore.h>
|
|
|
|
#include <asm/param.h>
|
2015-01-28 09:43:56 +03:00
|
|
|
#include <asm/smp.h>
|
2008-12-25 15:39:50 +03:00
|
|
|
|
2010-05-17 12:00:00 +04:00
|
|
|
static DEFINE_PER_CPU(struct cpuid, cpu_id);
|
|
|
|
|
2015-02-28 13:35:26 +03:00
|
|
|
void notrace cpu_relax(void)
|
2015-01-28 09:43:56 +03:00
|
|
|
{
|
|
|
|
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
|
|
|
|
asm volatile("diag 0,0,0x44");
|
|
|
|
barrier();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cpu_relax);
|
|
|
|
|
2010-05-17 12:00:00 +04:00
|
|
|
/*
|
|
|
|
* cpu_init - initializes state that is per-CPU.
|
|
|
|
*/
|
2013-06-19 01:04:52 +04:00
|
|
|
void cpu_init(void)
|
2010-05-17 12:00:00 +04:00
|
|
|
{
|
s390: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
At the end of the patch set all uses of __get_cpu_var have been removed so
the macro is removed too.
The patch set includes passes over all arches as well. Once these operations
are used throughout then specialized macros can be defined in non -x86
arches as well in order to optimize per cpu access by f.e. using a global
register that may be set to the per cpu base.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
this_cpu_inc(y)
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
CC: linux390@de.ibm.com
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 21:30:45 +04:00
|
|
|
struct cpuid *id = this_cpu_ptr(&cpu_id);
|
2010-05-17 12:00:00 +04:00
|
|
|
|
|
|
|
get_cpu_id(id);
|
|
|
|
atomic_inc(&init_mm.mm_count);
|
|
|
|
current->active_mm = &init_mm;
|
|
|
|
BUG_ON(current->mm);
|
|
|
|
enter_lazy_tlb(&init_mm, current);
|
|
|
|
}
|
|
|
|
|
2015-02-19 14:22:02 +03:00
|
|
|
/*
|
|
|
|
* cpu_have_feature - Test CPU features on module initialization
|
|
|
|
*/
|
|
|
|
int cpu_have_feature(unsigned int num)
|
|
|
|
{
|
|
|
|
return elf_hwcap & (1UL << num);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cpu_have_feature);
|
|
|
|
|
2008-12-25 15:39:50 +03:00
|
|
|
/*
|
|
|
|
* show_cpuinfo - Get information on one CPU for use by procfs.
|
|
|
|
*/
|
|
|
|
static int show_cpuinfo(struct seq_file *m, void *v)
|
|
|
|
{
|
2012-08-29 16:54:38 +04:00
|
|
|
static const char *hwcap_str[] = {
|
2008-12-25 15:39:50 +03:00
|
|
|
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
|
2014-10-06 19:53:53 +04:00
|
|
|
"edat", "etf3eh", "highgprs", "te", "vx"
|
2008-12-25 15:39:50 +03:00
|
|
|
};
|
2009-03-26 17:24:42 +03:00
|
|
|
unsigned long n = (unsigned long) v - 1;
|
|
|
|
int i;
|
2008-12-25 15:39:50 +03:00
|
|
|
|
2009-03-26 17:24:42 +03:00
|
|
|
if (!n) {
|
2011-01-05 14:48:18 +03:00
|
|
|
s390_adjust_jiffies();
|
2009-03-26 17:24:42 +03:00
|
|
|
seq_printf(m, "vendor_id : IBM/S390\n"
|
|
|
|
"# processors : %i\n"
|
|
|
|
"bogomips per cpu: %lu.%02lu\n",
|
|
|
|
num_online_cpus(), loops_per_jiffy/(500000/HZ),
|
|
|
|
(loops_per_jiffy/(5000/HZ))%100);
|
|
|
|
seq_puts(m, "features\t: ");
|
2012-08-29 16:54:38 +04:00
|
|
|
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
|
2009-03-26 17:24:42 +03:00
|
|
|
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
|
|
|
|
seq_printf(m, "%s ", hwcap_str[i]);
|
|
|
|
seq_puts(m, "\n");
|
2012-08-29 16:12:20 +04:00
|
|
|
show_cacheinfo(m);
|
2009-03-26 17:24:42 +03:00
|
|
|
}
|
2011-01-05 14:48:17 +03:00
|
|
|
get_online_cpus();
|
2009-03-26 17:24:42 +03:00
|
|
|
if (cpu_online(n)) {
|
2010-05-17 12:00:00 +04:00
|
|
|
struct cpuid *id = &per_cpu(cpu_id, n);
|
2009-03-26 17:24:42 +03:00
|
|
|
seq_printf(m, "processor %li: "
|
|
|
|
"version = %02X, "
|
|
|
|
"identification = %06X, "
|
|
|
|
"machine = %04X\n",
|
2010-05-17 12:00:00 +04:00
|
|
|
n, id->version, id->ident, id->machine);
|
2009-03-26 17:24:42 +03:00
|
|
|
}
|
2011-01-05 14:48:17 +03:00
|
|
|
put_online_cpus();
|
2009-03-26 17:24:42 +03:00
|
|
|
return 0;
|
2008-12-25 15:39:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *c_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
[S390] avoid warning in show_cpuinfo
The .start function and indirectly the .next function of the show_cpuinfo
sequential operation uses NR_CPUS as limit instead of nr_cpu_ids.
This can cause warnings like this:
WARNING: at /usr/src/linux/include/linux/cpumask.h:107
Process lscpu (pid: 575, task: 000000007deb4338, ksp: 000000007794f588)
Krnl PSW : 0704000180000000 0000000000106db4 (show_cpuinfo+0x108/0x234)
R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
Krnl GPRS: 0000000000000003 0000000000791988 000000000071b478 0000000000000004
0000000000000001 0000000000000000 000000007d139500 0000000000000400
0000000000000000 000000000070e24c 000000007d48d600 0000000000000005
000000007d48d600 00000000004dfa10 0000000000106cf8 000000007794fcc0
Krnl Code: 0000000000106da8: 95001000 cli 0(%r1),0
0000000000106dac: a774ffac brc 7,106d04
0000000000106db0: a7f40001 brc 15,106db2
>0000000000106db4: 92011000 mvi 0(%r1),1
0000000000106db8: a7f4ffa6 brc 15,106d04
0000000000106dbc: c0e5000065b4 brasl %r14,113924
0000000000106dc2: c09000303a45 larl %r9,70e24c
0000000000106dc8: c020001eefd4 larl %r2,4e4d70
Replacing NR_CPUS with nr_cpu_ids fixes it.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2011-10-30 18:16:05 +04:00
|
|
|
return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
|
2008-12-25 15:39:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
++*pos;
|
|
|
|
return c_start(m, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void c_stop(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct seq_operations cpuinfo_op = {
|
|
|
|
.start = c_start,
|
|
|
|
.next = c_next,
|
|
|
|
.stop = c_stop,
|
|
|
|
.show = show_cpuinfo,
|
|
|
|
};
|
|
|
|
|