2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
|
|
|
|
*
|
|
|
|
* Based on alpha version.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
2005-09-19 17:21:15 +04:00
|
|
|
#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
|
|
|
|
#define _ASM_POWERPC_OPROFILE_IMPL_H
|
2005-12-17 00:43:46 +03:00
|
|
|
#ifdef __KERNEL__
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#define OP_MAX_COUNTER 8
|
|
|
|
|
|
|
|
/* Per-counter configuration as set via oprofilefs. */
|
|
|
|
struct op_counter_config {
|
|
|
|
unsigned long enabled;
|
|
|
|
unsigned long event;
|
|
|
|
unsigned long count;
|
2005-12-16 05:02:04 +03:00
|
|
|
/* Classic doesn't support per-counter user/kernel selection */
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long kernel;
|
|
|
|
unsigned long user;
|
|
|
|
unsigned long unit_mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* System-wide configuration as set via oprofilefs. */
|
|
|
|
struct op_system_config {
|
2005-12-16 05:02:04 +03:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long mmcr0;
|
|
|
|
unsigned long mmcr1;
|
|
|
|
unsigned long mmcra;
|
2005-09-19 17:21:15 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long enable_kernel;
|
|
|
|
unsigned long enable_user;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Per-arch configuration */
|
2005-09-19 17:18:31 +04:00
|
|
|
struct op_powerpc_model {
|
2007-07-20 23:39:53 +04:00
|
|
|
int (*reg_setup) (struct op_counter_config *,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct op_system_config *,
|
|
|
|
int num_counters);
|
2007-07-20 23:39:53 +04:00
|
|
|
int (*cpu_setup) (struct op_counter_config *);
|
|
|
|
int (*start) (struct op_counter_config *);
|
|
|
|
int (*global_start) (struct op_counter_config *);
|
2005-04-17 02:20:36 +04:00
|
|
|
void (*stop) (void);
|
[POWERPC] cell: Add oprofile support
Add PPU event-based and cycle-based profiling support to Oprofile for Cell.
Oprofile is expected to collect data on all CPUs simultaneously.
However, there is one set of performance counters per node. There are
two hardware threads or virtual CPUs on each node. Hence, OProfile must
multiplex in time the performance counter collection on the two virtual
CPUs.
The multiplexing of the performance counters is done by a virtual
counter routine. Initially, the counters are configured to collect data
on the even CPUs in the system, one CPU per node. In order to capture
the PC for the virtual CPU when the performance counter interrupt occurs
(the specified number of events between samples has occurred), the even
processors are configured to handle the performance counter interrupts
for their node. The virtual counter routine is called via a kernel
timer after the virtual sample time. The routine stops the counters,
saves the current counts, loads the last counts for the other virtual
CPU on the node, sets interrupts to be handled by the other virtual CPU
and restarts the counters, the virtual timer routine is scheduled to run
again. The virtual sample time is kept relatively small to make sure
sampling occurs on both CPUs on the node with a relatively small
granularity. Whenever the counters overflow, the performance counter
interrupt is called to collect the PC for the CPU where data is being
collected.
The oprofile driver relies on a firmware RTAS call to setup the debug bus
to route the desired signals to the performance counter hardware to be
counted. The RTAS call must set the routing registers appropriately in
each of the islands to pass the signals down the debug bus as well as
routing the signals from a particular island onto the bus. There is a
second firmware RTAS call to reset the debug bus to the non pass thru
state when the counters are not in use.
Signed-off-by: Carl Love <carll@us.ibm.com>
Signed-off-by: Maynard Johnson <mpjohn@us.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-20 20:45:16 +03:00
|
|
|
void (*global_stop) (void);
|
2007-07-20 23:39:53 +04:00
|
|
|
int (*sync_start)(void);
|
|
|
|
int (*sync_stop)(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
void (*handle_interrupt) (struct pt_regs *,
|
|
|
|
struct op_counter_config *);
|
|
|
|
int num_counters;
|
|
|
|
};
|
|
|
|
|
2005-12-16 05:02:04 +03:00
|
|
|
extern struct op_powerpc_model op_model_fsl_booke;
|
2005-09-19 17:18:31 +04:00
|
|
|
extern struct op_powerpc_model op_model_rs64;
|
|
|
|
extern struct op_powerpc_model op_model_power4;
|
2005-12-16 05:02:04 +03:00
|
|
|
extern struct op_powerpc_model op_model_7450;
|
[POWERPC] cell: Add oprofile support
Add PPU event-based and cycle-based profiling support to Oprofile for Cell.
Oprofile is expected to collect data on all CPUs simultaneously.
However, there is one set of performance counters per node. There are
two hardware threads or virtual CPUs on each node. Hence, OProfile must
multiplex in time the performance counter collection on the two virtual
CPUs.
The multiplexing of the performance counters is done by a virtual
counter routine. Initially, the counters are configured to collect data
on the even CPUs in the system, one CPU per node. In order to capture
the PC for the virtual CPU when the performance counter interrupt occurs
(the specified number of events between samples has occurred), the even
processors are configured to handle the performance counter interrupts
for their node. The virtual counter routine is called via a kernel
timer after the virtual sample time. The routine stops the counters,
saves the current counts, loads the last counts for the other virtual
CPU on the node, sets interrupts to be handled by the other virtual CPU
and restarts the counters, the virtual timer routine is scheduled to run
again. The virtual sample time is kept relatively small to make sure
sampling occurs on both CPUs on the node with a relatively small
granularity. Whenever the counters overflow, the performance counter
interrupt is called to collect the PC for the CPU where data is being
collected.
The oprofile driver relies on a firmware RTAS call to setup the debug bus
to route the desired signals to the performance counter hardware to be
counted. The RTAS call must set the routing registers appropriately in
each of the islands to pass the signals down the debug bus as well as
routing the signals from a particular island onto the bus. There is a
second firmware RTAS call to reset the debug bus to the non pass thru
state when the counters are not in use.
Signed-off-by: Carl Love <carll@us.ibm.com>
Signed-off-by: Maynard Johnson <mpjohn@us.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-20 20:45:16 +03:00
|
|
|
extern struct op_powerpc_model op_model_cell;
|
2007-04-18 10:38:21 +04:00
|
|
|
extern struct op_powerpc_model op_model_pa6t;
|
|
|
|
|
2006-03-27 04:23:29 +04:00
|
|
|
|
2005-12-16 05:02:04 +03:00
|
|
|
/* All the classic PPC parts use these */
|
2007-01-29 06:23:14 +03:00
|
|
|
static inline unsigned int classic_ctr_read(unsigned int i)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
switch(i) {
|
|
|
|
case 0:
|
|
|
|
return mfspr(SPRN_PMC1);
|
|
|
|
case 1:
|
|
|
|
return mfspr(SPRN_PMC2);
|
|
|
|
case 2:
|
|
|
|
return mfspr(SPRN_PMC3);
|
|
|
|
case 3:
|
|
|
|
return mfspr(SPRN_PMC4);
|
|
|
|
case 4:
|
|
|
|
return mfspr(SPRN_PMC5);
|
|
|
|
case 5:
|
|
|
|
return mfspr(SPRN_PMC6);
|
2005-12-16 05:02:04 +03:00
|
|
|
|
|
|
|
/* No PPC32 chip has more than 6 so far */
|
|
|
|
#ifdef CONFIG_PPC64
|
2005-04-17 02:20:36 +04:00
|
|
|
case 6:
|
|
|
|
return mfspr(SPRN_PMC7);
|
|
|
|
case 7:
|
|
|
|
return mfspr(SPRN_PMC8);
|
2005-12-16 05:02:04 +03:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-29 06:23:14 +03:00
|
|
|
static inline void classic_ctr_write(unsigned int i, unsigned int val)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
switch(i) {
|
|
|
|
case 0:
|
|
|
|
mtspr(SPRN_PMC1, val);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
mtspr(SPRN_PMC2, val);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
mtspr(SPRN_PMC3, val);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
mtspr(SPRN_PMC4, val);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
mtspr(SPRN_PMC5, val);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
mtspr(SPRN_PMC6, val);
|
|
|
|
break;
|
2005-12-16 05:02:04 +03:00
|
|
|
|
|
|
|
/* No PPC32 chip has more than 6, yet */
|
|
|
|
#ifdef CONFIG_PPC64
|
2005-04-17 02:20:36 +04:00
|
|
|
case 6:
|
|
|
|
mtspr(SPRN_PMC7, val);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
mtspr(SPRN_PMC8, val);
|
|
|
|
break;
|
2005-12-16 05:02:04 +03:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-10-28 00:06:32 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-03-27 04:57:01 +04:00
|
|
|
extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
|
|
|
|
|
2005-12-17 00:43:46 +03:00
|
|
|
#endif /* __KERNEL__ */
|
2005-09-19 17:21:15 +04:00
|
|
|
#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
|