More ACPI and power management updates for v3.20-rc1

- Revert two ACPI EC driver commits, one that broke system suspend
    on Acer Aspire S5 and one that depends on it (Rafael J Wysocki).
 
  - Fix a typo leading to an incorrect check in the exynos-ppmu devfreq
    driver (Dan Carpenter).
 
  - Add support for one more Broadwell CPU model to intel_idle (Len Brown).
 
  - Fix an obscure problem with state transitions related to interrupts
    in the speedstep-smi cpufreq driver (Mikulas Patocka).
 
  - Remove some unnecessary messages related to the "out of memory"
    condition from the core PM code (Quentin Lambert).
 
  - Update turbostat parameters and documentation, add support for
    one more Broadwell CPU model to it and modify it to skip
    printing disabled package C-states (Len Brown).
 
 /
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABCAAGBQJU3mwEAAoJEILEb/54YlRxts0P/04Z7E9xv01yQFV1rdGRfWcI
 FEOljYtoZXiChhXNb9oB0obnAxE37aUXmKY/RL2WVnNnfvt/AQx3Ue3wvuVwmv95
 Cef1hYJw34J4P9VnrCoyN086Z41FA0n6Yuxd85aUPKYXxRqFedhLSlXPMrV7otdU
 nyK599dHZl+6AfFfmV+rcLdVQEwNVZoLQ1GgpGTRD93gHJW//XcL4zB4q/4ZjKWD
 jfsfdv9mWmhHpk3dGup7vFDg6BHc/qSbXWPTLWLtdjHqG4DJdhfkLilPOlrDN7wM
 SNpsdLB4Q+dFrLHock3pX7K0ApgPB6B3+nJQm9DaetiboQKKy9Ju4lg/oguygLky
 dvZSLrbo8e2TB9njEV0TXDh/C9QXO2k6vwUB4N3oMx4e+TVFrWOtxbvnzRK4bKzi
 fWaZEDVACboFNO4orM5aGQSGeylrF51jX6DBmNN4e49RwBdqOqZyyKBbfn/NI+Zt
 vJh0vSI4jaGPW6trtNUyfZuaZH8TsD8N7jzzfI/H3Rh+wOSbDTpHY3Pha8t0yI+C
 Gn71ryMyKX+9G/1e1uysZJ9SQxyJJDCfGQ8iU34p7p6bEj2Iknh7rP9Bfer3Ymj5
 wGy1DNKSA+tIcDG2mcw7yb7/PtDX5zG4uUsGB4v5qqTNHFyNgHoqQ5XHphK513aD
 QbjoDeciau1vQpY4zZdv
 =y7ew
 -----END PGP SIGNATURE-----

Merge tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more ACPI and power management updates from Rafael Wysocki:
 "These are two reverts related to system suspend breakage by one of a
  recent commits, a fix for a recently introduced bug in devfreq and a
  bunch of other things that didn't make it into my previous pull
  request, but otherwise are ready to go.

  Specifics:

   - Revert two ACPI EC driver commits, one that broke system suspend on
     Acer Aspire S5 and one that depends on it (Rafael J Wysocki).

   - Fix a typo leading to an incorrect check in the exynos-ppmu devfreq
     driver (Dan Carpenter).

   - Add support for one more Broadwell CPU model to intel_idle (Len Brown).

   - Fix an obscure problem with state transitions related to interrupts
     in the speedstep-smi cpufreq driver (Mikulas Patocka).

   - Remove some unnecessary messages related to the "out of memory"
     condition from the core PM code (Quentin Lambert).

   - Update turbostat parameters and documentation, add support for one
     more Broadwell CPU model to it and modify it to skip printing
     disabled package C-states (Len Brown)"

* tag 'pm+acpi-3.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / devfreq: event: testing the wrong variable
  cpufreq: speedstep-smi: enable interrupts when waiting
  PM / OPP / clk: Remove unnecessary OOM message
  Revert "ACPI / EC: Add query flushing support"
  Revert "ACPI / EC: Add GPE reference counting debugging messages"
  tools/power turbostat: support additional Broadwell model
  intel_idle: support additional Broadwell model
  tools/power turbostat: update parameters, documentation
  tools/power turbostat: Skip printing disabled package C-states
This commit is contained in:
Linus Torvalds 2015-02-13 13:45:57 -08:00
Родитель db3ecdee1c c7fb90dfbe
Коммит 18320f2a68
10 изменённых файлов: 302 добавлений и 286 удалений

Просмотреть файл

@ -31,7 +31,6 @@
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#define DEBUG_REF 0
#define pr_fmt(fmt) "ACPI : EC: " fmt
#include <linux/kernel.h>
@ -77,9 +76,7 @@ enum ec_command {
* when trying to clear the EC */
enum {
EC_FLAGS_EVENT_ENABLED, /* Event is enabled */
EC_FLAGS_EVENT_PENDING, /* Event is pending */
EC_FLAGS_EVENT_DETECTED, /* Event is detected */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
* OpReg are installed */
EC_FLAGS_STARTED, /* Driver is started */
@ -91,13 +88,6 @@ enum {
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
#define ec_debug_ref(ec, fmt, ...) \
do { \
if (DEBUG_REF) \
pr_debug("%lu: " fmt, ec->reference_count, \
## __VA_ARGS__); \
} while (0)
/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
@ -161,12 +151,6 @@ static bool acpi_ec_flushed(struct acpi_ec *ec)
return ec->reference_count == 1;
}
static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
{
return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
}
/* --------------------------------------------------------------------------
* EC Registers
* -------------------------------------------------------------------------- */
@ -334,99 +318,36 @@ static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
* the flush operation is not in
* progress
* @ec: the EC device
* @allow_event: whether event should be handled
*
* This function must be used before taking a new action that should hold
* the reference count. If this function returns false, then the action
* must be discarded or it will prevent the flush operation from being
* completed.
*
* During flushing, QR_EC command need to pass this check when there is a
* pending event, so that the reference count held for the pending event
* can be decreased by the completion of the QR_EC command.
*/
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec,
bool allow_event)
static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
{
if (!acpi_ec_started(ec)) {
if (!allow_event || !acpi_ec_has_pending_event(ec))
if (!acpi_ec_started(ec))
return false;
}
acpi_ec_submit_request(ec);
return true;
}
static void acpi_ec_submit_event(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
!test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags))
return;
/* Hold reference for pending event */
if (!acpi_ec_submit_flushable_request(ec, true))
return;
ec_debug_ref(ec, "Increase event\n");
if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
pr_debug("***** Event query started *****\n");
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
pr_debug("***** Event started *****\n");
schedule_work(&ec->work);
return;
}
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease event\n");
}
static void acpi_ec_complete_event(struct acpi_ec *ec)
static void acpi_ec_complete_query(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
pr_debug("***** Event query stopped *****\n");
/* Unhold reference for pending event */
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease event\n");
/* Check if there is another SCI_EVT detected */
acpi_ec_submit_event(ec);
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
pr_debug("***** Event stopped *****\n");
}
}
static void acpi_ec_submit_detection(struct acpi_ec *ec)
{
/* Hold reference for query submission */
if (!acpi_ec_submit_flushable_request(ec, false))
return;
ec_debug_ref(ec, "Increase query\n");
if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
pr_debug("***** Event detection blocked *****\n");
acpi_ec_submit_event(ec);
return;
}
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease query\n");
}
static void acpi_ec_complete_detection(struct acpi_ec *ec)
{
if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
pr_debug("***** Event detetion unblocked *****\n");
/* Unhold reference for query submission */
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease query\n");
}
}
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
/*
* An event may be pending even with SCI_EVT=0, so QR_EC should
* always be issued right after started.
*/
acpi_ec_submit_detection(ec);
spin_unlock_irqrestore(&ec->lock, flags);
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
unsigned long flags;
@ -468,7 +389,6 @@ static void advance_transaction(struct acpi_ec *ec)
t->rdata[t->ri++] = acpi_ec_read_data(ec);
if (t->rlen == t->ri) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
if (t->command == ACPI_EC_COMMAND_QUERY)
pr_debug("***** Command(%s) hardware completion *****\n",
acpi_ec_cmd_string(t->command));
@ -479,7 +399,6 @@ static void advance_transaction(struct acpi_ec *ec)
} else if (t->wlen == t->wi &&
(status & ACPI_EC_FLAG_IBF) == 0) {
t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
wakeup = true;
}
goto out;
@ -488,17 +407,16 @@ static void advance_transaction(struct acpi_ec *ec)
!(status & ACPI_EC_FLAG_SCI) &&
(t->command == ACPI_EC_COMMAND_QUERY)) {
t->flags |= ACPI_EC_COMMAND_POLL;
acpi_ec_complete_detection(ec);
acpi_ec_complete_query(ec);
t->rdata[t->ri++] = 0x00;
t->flags |= ACPI_EC_COMMAND_COMPLETE;
acpi_ec_complete_event(ec);
pr_debug("***** Command(%s) software completion *****\n",
acpi_ec_cmd_string(t->command));
wakeup = true;
} else if ((status & ACPI_EC_FLAG_IBF) == 0) {
acpi_ec_write_cmd(ec, t->command);
t->flags |= ACPI_EC_COMMAND_POLL;
acpi_ec_complete_detection(ec);
acpi_ec_complete_query(ec);
} else
goto err;
goto out;
@ -519,7 +437,7 @@ err:
}
out:
if (status & ACPI_EC_FLAG_SCI)
acpi_ec_submit_detection(ec);
acpi_ec_submit_query(ec);
if (wakeup && in_interrupt())
wake_up(&ec->wait);
}
@ -580,11 +498,10 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec, true)) {
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
goto unlock;
}
ec_debug_ref(ec, "Increase command\n");
/* following two actions should be kept atomic */
ec->curr = t;
pr_debug("***** Command(%s) started *****\n",
@ -600,7 +517,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease command\n");
unlock:
spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
@ -762,10 +678,8 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
pr_debug("+++++ Starting EC +++++\n");
/* Enable GPE for event processing (SCI_EVT=1) */
if (!resuming) {
if (!resuming)
acpi_ec_submit_request(ec);
ec_debug_ref(ec, "Increase driver\n");
}
pr_info("+++++ EC started +++++\n");
}
spin_unlock_irqrestore(&ec->lock, flags);
@ -794,10 +708,8 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
wait_event(ec->wait, acpi_ec_stopped(ec));
spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
if (!suspending)
acpi_ec_complete_request(ec);
ec_debug_ref(ec, "Decrease driver\n");
}
clear_bit(EC_FLAGS_STARTED, &ec->flags);
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
pr_info("+++++ EC stopped +++++\n");
@ -967,9 +879,7 @@ static void acpi_ec_gpe_poller(struct work_struct *work)
{
struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
pr_debug("***** Event poller started *****\n");
acpi_ec_query(ec, NULL);
pr_debug("***** Event poller stopped *****\n");
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@ -1039,6 +949,7 @@ static struct acpi_ec *make_acpi_ec(void)
if (!ec)
return NULL;
ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
@ -1189,7 +1100,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = ec_install_handlers(ec);
/* EC is fully operational, allow queries */
acpi_ec_enable_event(ec);
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
/* Clear stale _Q events if hardware might require that */
if (EC_FLAGS_CLEAR_ON_RESUME)

Просмотреть файл

@ -855,7 +855,6 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
fw_priv = ERR_PTR(-ENOMEM);
goto exit;
}

Просмотреть файл

@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce) {
dev_err(dev, "Not enough memory for clock entry.\n");
if (!ce)
return -ENOMEM;
}
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);

Просмотреть файл

@ -474,10 +474,8 @@ static int _opp_add_dynamic(struct device *dev, unsigned long freq,
/* allocate new OPP node */
new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
if (!new_opp)
return -ENOMEM;
}
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
@ -695,10 +693,8 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
/* keep the node allocated */
new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
if (!new_opp)
return -ENOMEM;
}
mutex_lock(&dev_opp_list_lock);

Просмотреть файл

@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
pr_debug("previous speed is %u\n", prev_speed);
preempt_disable();
local_irq_save(flags);
/* switch to low state */
@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
out:
local_irq_restore(flags);
preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(speedstep_get_freqs);

Просмотреть файл

@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
return;
/* Disable IRQs */
preempt_disable();
local_irq_save(flags);
command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
do {
if (retry) {
/*
* We need to enable interrupts, otherwise the blockage
* won't resolve.
*
* We disable preemption so that other processes don't
* run. If other processes were running, they could
* submit more DMA requests, making the blockage worse.
*/
pr_debug("retry %u, previous result %u, waiting...\n",
retry, result);
local_irq_enable();
mdelay(retry * 50);
local_irq_disable();
}
retry++;
__asm__ __volatile__(
@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
/* enable IRQs */
local_irq_restore(flags);
preempt_enable();
if (new_state == state)
pr_debug("change to %u MHz succeeded after %u tries "

Просмотреть файл

@ -327,8 +327,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
if (IS_ERR(edev)) {
ret = PTR_ERR(edev);
if (IS_ERR(edev[i])) {
ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
goto err;

Просмотреть файл

@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x46, idle_cpu_hsw),
ICPU(0x4d, idle_cpu_avn),
ICPU(0x3d, idle_cpu_bdw),
ICPU(0x47, idle_cpu_bdw),
ICPU(0x4f, idle_cpu_bdw),
ICPU(0x56, idle_cpu_bdw),
{}

Просмотреть файл

@ -9,7 +9,7 @@ turbostat \- Report processor frequency and idle statistics
.br
.B turbostat
.RB [ Options ]
.RB [ "\-i interval_sec" ]
.RB [ "\--interval seconds" ]
.SH DESCRIPTION
\fBturbostat \fP reports processor topology, frequency,
idle power-state statistics, temperature and power on X86 processors.
@ -18,31 +18,41 @@ The first method is to supply a
\fBcommand\fP, which is forked and statistics are printed
upon its completion.
The second method is to omit the command,
and turbodstat will print statistics every 5 seconds.
The 5-second interval can changed using the -i option.
and turbostat displays statistics every 5 seconds.
The 5-second interval can be changed using the --interval option.
Some information is not availalbe on older processors.
Some information is not available on older processors.
.SS Options
The \fB-p\fP option limits output to the 1st thread in 1st core of each package.
\fB--Counter MSR#\fP shows the delta of the specified 64-bit MSR counter.
.PP
The \fB-P\fP option limits output to the 1st thread in each Package.
\fB--counter MSR#\fP shows the delta of the specified 32-bit MSR counter.
.PP
The \fB-S\fP option limits output to a 1-line System Summary for each interval.
\fB--Dump\fP displays the raw counter values.
.PP
The \fB-v\fP option increases verbosity.
\fB--debug\fP displays additional system configuration information. Invoking this parameter
more than once may also enable internal turbostat debug information.
.PP
The \fB-c MSR#\fP option includes the delta of the specified 32-bit MSR counter.
\fB--interval seconds\fP overrides the default 5-second measurement interval.
.PP
The \fB-C MSR#\fP option includes the delta of the specified 64-bit MSR counter.
\fB--help\fP displays usage for the most common parameters.
.PP
The \fB-m MSR#\fP option includes the the specified 32-bit MSR value.
\fB--Joules\fP displays energy in Joules, rather than dividing Joules by time to print power in Watts.
.PP
The \fB-M MSR#\fP option includes the the specified 64-bit MSR value.
\fB--MSR MSR#\fP shows the specified 64-bit MSR value.
.PP
The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
The default is 5 seconds.
\fB--msr MSR#\fP shows the specified 32-bit MSR value.
.PP
The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
\fB--Package\fP limits output to the system summary plus the 1st thread in each Package.
.PP
\fB--processor\fP limits output to the system summary plus the 1st thread in each processor of each package. Ie. it skips hyper-threaded siblings.
.PP
\fB--Summary\fP limits output to a 1-line System Summary for each interval.
.PP
\fB--TCC temperature\fP sets the Thermal Control Circuit temperature for systems which do not export that value. This is used for making sense of the Digital Thermal Sensor outputs, as they return degrees Celsius below the TCC activation temperature.
.PP
\fB--version\fP displays the version.
.PP
The \fBcommand\fP parameter forks \fBcommand\fP, and upon its exit,
displays the statistics gathered since it was forked.
.PP
.SH FIELD DESCRIPTIONS
@ -52,7 +62,7 @@ displays the statistics gathered since it was forked.
\fBCPU\fP Linux CPU (logical processor) number.
Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
\fBAVG_MHz\fP number of cycles executed divided by time elapsed.
\fB%Buzy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
\fB%Busy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state).
\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.
@ -68,7 +78,7 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T
.fi
.PP
.SH EXAMPLE
Without any parameters, turbostat prints out counters ever 5 seconds.
Without any parameters, turbostat displays statistics ever 5 seconds.
(override interval with "-i sec" option, or specify a command
for turbostat to fork).
@ -91,19 +101,19 @@ Subsequent rows show per-CPU statistics.
3 3 3 0.20 1596 3492 0 0.44 0.00 99.37 0.00 23
3 7 5 0.31 1596 3492 0 0.33
.fi
.SH VERBOSE EXAMPLE
The "-v" option adds verbosity to the output:
.SH DEBUG EXAMPLE
The "--debug" option prints additional system information before measurements:
.nf
[root@ivy]# turbostat -v
turbostat v3.0 November 23, 2012 - Len Brown <lenb@kernel.org>
turbostat version 4.0 10-Feb, 2015 - Len Brown <lenb@kernel.org>
CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9)
CPUID(6): APERF, DTS, PTM, EPB
RAPL: 851 sec. Joule Counter Range
RAPL: 851 sec. Joule Counter Range, at 77 Watts
cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300
16 * 100 = 1600 MHz max efficiency
35 * 100 = 3500 MHz TSC frequency
cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6-noret)
cpu0: MSR_IA32_POWER_CTL: 0x0014005d (C1E auto-promotion: DISabled)
cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6n)
cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
37 * 100 = 3700 MHz max turbo 4 active cores
38 * 100 = 3800 MHz max turbo 3 active cores
@ -112,9 +122,9 @@ cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced)
cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.)
cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.)
cpu0: MSR_PKG_POWER_LIMIT: 0x830000148268 (UNlocked)
cpu0: MSR_PKG_POWER_LIMIT: 0x30000148268 (UNlocked)
cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled)
cpu0: PKG Limit #2: ENabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
cpu0: PKG Limit #2: DISabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
cpu0: MSR_PP0_POLICY: 0
cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
@ -123,9 +133,9 @@ cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked)
cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C)
cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C)
cpu0: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1)
cpu1: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1)
cpu2: MSR_IA32_THERM_STATUS: 0x88540000 (21 C +/- 1)
cpu0: MSR_IA32_THERM_STATUS: 0x88580000 (17 C +/- 1)
cpu1: MSR_IA32_THERM_STATUS: 0x885a0000 (15 C +/- 1)
cpu2: MSR_IA32_THERM_STATUS: 0x88570000 (18 C +/- 1)
cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
...
.fi
@ -195,7 +205,7 @@ in those kernels.
AVG_MHz = APERF_delta/measurement_interval. This is the actual
number of elapsed cycles divided by the entire sample interval --
including idle time. Note that this calculation is resiliant
including idle time. Note that this calculation is resilient
to systems lacking a non-stop TSC.
TSC_MHz = TSC_delta/measurement_interval.

Просмотреть файл

@ -33,6 +33,7 @@
#include <signal.h>
#include <sys/time.h>
#include <stdlib.h>
#include <getopt.h>
#include <dirent.h>
#include <string.h>
#include <ctype.h>
@ -42,17 +43,19 @@
#include <errno.h>
char *proc_stat = "/proc/stat";
unsigned int interval_sec = 5; /* set with -i interval_sec */
unsigned int verbose; /* set with -v */
unsigned int rapl_verbose; /* set with -R */
unsigned int rapl_joules; /* set with -J */
unsigned int thermal_verbose; /* set with -T */
unsigned int summary_only; /* set with -S */
unsigned int dump_only; /* set with -s */
unsigned int interval_sec = 5;
unsigned int debug;
unsigned int rapl_joules;
unsigned int summary_only;
unsigned int dump_only;
unsigned int skip_c0;
unsigned int skip_c1;
unsigned int do_nhm_cstates;
unsigned int do_snb_cstates;
unsigned int do_pc2;
unsigned int do_pc3;
unsigned int do_pc6;
unsigned int do_pc7;
unsigned int do_c8_c9_c10;
unsigned int do_slm_cstates;
unsigned int use_c1_residency_msr;
@ -313,13 +316,13 @@ void print_header(void)
if (do_ptm)
outp += sprintf(outp, " PkgTmp");
if (do_snb_cstates)
if (do_pc2)
outp += sprintf(outp, " Pkg%%pc2");
if (do_nhm_cstates && !do_slm_cstates)
if (do_pc3)
outp += sprintf(outp, " Pkg%%pc3");
if (do_nhm_cstates && !do_slm_cstates)
if (do_pc6)
outp += sprintf(outp, " Pkg%%pc6");
if (do_snb_cstates)
if (do_pc7)
outp += sprintf(outp, " Pkg%%pc7");
if (do_c8_c9_c10) {
outp += sprintf(outp, " Pkg%%pc8");
@ -394,8 +397,11 @@ int dump_counters(struct thread_data *t, struct core_data *c,
if (p) {
outp += sprintf(outp, "package: %d\n", p->package_id);
outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
if (do_pc3)
outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
if (do_pc6)
outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
if (do_pc7)
outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
@ -528,13 +534,13 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (do_ptm)
outp += sprintf(outp, "%8d", p->pkg_temp_c);
if (do_snb_cstates)
if (do_pc2)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
if (do_nhm_cstates && !do_slm_cstates)
if (do_pc3)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
if (do_nhm_cstates && !do_slm_cstates)
if (do_pc6)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
if (do_snb_cstates)
if (do_pc7)
outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
if (do_c8_c9_c10) {
outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
@ -631,8 +637,11 @@ void
delta_package(struct pkg_data *new, struct pkg_data *old)
{
old->pc2 = new->pc2 - old->pc2;
if (do_pc3)
old->pc3 = new->pc3 - old->pc3;
if (do_pc6)
old->pc6 = new->pc6 - old->pc6;
if (do_pc7)
old->pc7 = new->pc7 - old->pc7;
old->pc8 = new->pc8 - old->pc8;
old->pc9 = new->pc9 - old->pc9;
@ -717,7 +726,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
}
if (old->mperf == 0) {
if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
old->mperf = 1; /* divide by 0 protection */
}
@ -774,8 +783,11 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->core_temp_c = 0;
p->pc2 = 0;
if (do_pc3)
p->pc3 = 0;
if (do_pc6)
p->pc6 = 0;
if (do_pc7)
p->pc7 = 0;
p->pc8 = 0;
p->pc9 = 0;
@ -815,8 +827,11 @@ int sum_counters(struct thread_data *t, struct core_data *c,
return 0;
average.packages.pc2 += p->pc2;
if (do_pc3)
average.packages.pc3 += p->pc3;
if (do_pc6)
average.packages.pc6 += p->pc6;
if (do_pc7)
average.packages.pc7 += p->pc7;
average.packages.pc8 += p->pc8;
average.packages.pc9 += p->pc9;
@ -859,8 +874,11 @@ void compute_average(struct thread_data *t, struct core_data *c,
average.cores.c7 /= topo.num_cores;
average.packages.pc2 /= topo.num_packages;
if (do_pc3)
average.packages.pc3 /= topo.num_packages;
if (do_pc6)
average.packages.pc6 /= topo.num_packages;
if (do_pc7)
average.packages.pc7 /= topo.num_packages;
average.packages.pc8 /= topo.num_packages;
@ -961,18 +979,18 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
if (do_nhm_cstates && !do_slm_cstates) {
if (do_pc3)
if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
return -9;
if (do_pc6)
if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
return -10;
}
if (do_snb_cstates) {
if (do_pc2)
if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
return -11;
if (do_pc7)
if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
return -12;
}
if (do_c8_c9_c10) {
if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
return -13;
@ -1019,6 +1037,37 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return 0;
}
/*
* MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
* If you change the values, note they are used both in comparisons
* (>= PCL__7) and to index pkg_cstate_limit_strings[].
*/
#define PCLUKN 0 /* Unknown */
#define PCLRSV 1 /* Reserved */
#define PCL__0 2 /* PC0 */
#define PCL__1 3 /* PC1 */
#define PCL__2 4 /* PC2 */
#define PCL__3 5 /* PC3 */
#define PCL__4 6 /* PC4 */
#define PCL__6 7 /* PC6 */
#define PCL_6N 8 /* PC6 No Retention */
#define PCL_6R 9 /* PC6 Retention */
#define PCL__7 10 /* PC7 */
#define PCL_7S 11 /* PC7 Shrink */
#define PCLUNL 12 /* Unlimited */
int pkg_cstate_limit = PCLUKN;
char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
"pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "unlimited"};
int nhm_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL};
int snb_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL};
int hsw_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCLRSV, PCLUNL};
int slv_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7};
int amt_pkg_cstate_limits[8] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
int phi_pkg_cstate_limits[8] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL};
void print_verbose_header(void)
{
unsigned long long msr;
@ -1098,44 +1147,14 @@ print_nhm_turbo_ratio_limits:
fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ",
fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
(msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
(msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
(msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
(msr & (1 << 15)) ? "" : "UN",
(unsigned int)msr & 7);
switch(msr & 0x7) {
case 0:
fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
break;
case 1:
fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
break;
case 2:
fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
break;
case 3:
fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
break;
case 4:
fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
break;
case 5:
fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
break;
case 6:
fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
break;
case 7:
fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
break;
default:
fprintf(stderr, "invalid");
}
fprintf(stderr, ")\n");
(unsigned int)msr & 7,
pkg_cstate_limit_strings[pkg_cstate_limit]);
if (!do_nhm_turbo_ratio_limit)
return;
@ -1516,9 +1535,14 @@ void check_permissions()
* MSR_CORE_C3_RESIDENCY 0x000003fc
* MSR_CORE_C6_RESIDENCY 0x000003fd
*
* Side effect:
* sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
*/
int has_nhm_msrs(unsigned int family, unsigned int model)
int probe_nhm_msrs(unsigned int family, unsigned int model)
{
unsigned long long msr;
int *pkg_cstate_limits;
if (!genuine_intel)
return 0;
@ -1531,31 +1555,47 @@ int has_nhm_msrs(unsigned int family, unsigned int model)
case 0x1F: /* Core i7 and i5 Processor - Nehalem */
case 0x25: /* Westmere Client - Clarkdale, Arrandale */
case 0x2C: /* Westmere EP - Gulftown */
case 0x2E: /* Nehalem-EX Xeon - Beckton */
case 0x2F: /* Westmere-EX Xeon - Eagleton */
pkg_cstate_limits = nhm_pkg_cstate_limits;
break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
case 0x3A: /* IVB */
case 0x3E: /* IVB Xeon */
pkg_cstate_limits = snb_pkg_cstate_limits;
break;
case 0x3C: /* HSW */
case 0x3F: /* HSX */
case 0x45: /* HSW */
case 0x46: /* HSW */
case 0x37: /* BYT */
case 0x4D: /* AVN */
case 0x3D: /* BDW */
case 0x47: /* BDW */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
case 0x2E: /* Nehalem-EX Xeon - Beckton */
case 0x2F: /* Westmere-EX Xeon - Eagleton */
return 1;
pkg_cstate_limits = hsw_pkg_cstate_limits;
break;
case 0x37: /* BYT */
case 0x4D: /* AVN */
pkg_cstate_limits = slv_pkg_cstate_limits;
break;
case 0x4C: /* AMT */
pkg_cstate_limits = amt_pkg_cstate_limits;
break;
case 0x57: /* PHI */
pkg_cstate_limits = phi_pkg_cstate_limits;
break;
default:
return 0;
}
get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
pkg_cstate_limit = pkg_cstate_limits[msr & 0x7];
return 1;
}
int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
{
if (!has_nhm_msrs(family, model))
return 0;
switch (model) {
/* Nehalem compatible, but do not include turbo-ratio limit support */
case 0x2E: /* Nehalem-EX Xeon - Beckton */
@ -1769,6 +1809,7 @@ void rapl_probe(unsigned int family, unsigned int model)
case 0x45: /* HSW */
case 0x46: /* HSW */
case 0x3D: /* BDW */
case 0x47: /* BDW */
do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
break;
case 0x3F: /* HSX */
@ -1807,7 +1848,7 @@ void rapl_probe(unsigned int family, unsigned int model)
tdp = get_tdp(model);
rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
if (verbose)
if (debug)
fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
return;
@ -1932,7 +1973,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
return -1;
if (verbose) {
if (debug) {
fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
"(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
rapl_power_units, rapl_energy_units, rapl_time_units);
@ -1989,7 +2030,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
print_power_limit_msr(cpu, msr, "DRAM Limit");
}
if (do_rapl & RAPL_CORE_POLICY) {
if (verbose) {
if (debug) {
if (get_msr(cpu, MSR_PP0_POLICY, &msr))
return -7;
@ -1997,7 +2038,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
}
}
if (do_rapl & RAPL_CORES) {
if (verbose) {
if (debug) {
if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
return -9;
@ -2007,7 +2048,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
}
}
if (do_rapl & RAPL_GFX) {
if (verbose) {
if (debug) {
if (get_msr(cpu, MSR_PP1_POLICY, &msr))
return -8;
@ -2046,6 +2087,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
case 0x45: /* HSW */
case 0x46: /* HSW */
case 0x3D: /* BDW */
case 0x47: /* BDW */
case 0x4F: /* BDX */
case 0x56: /* BDX-DE */
return 1;
@ -2168,7 +2210,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
target_c_local = (msr >> 16) & 0xFF;
if (verbose)
if (debug)
fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
cpu, msr, target_c_local);
@ -2198,7 +2240,7 @@ void check_cpuid()
if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
genuine_intel = 1;
if (verbose)
if (debug)
fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
(char *)&ebx, (char *)&edx, (char *)&ecx);
@ -2209,7 +2251,7 @@ void check_cpuid()
if (family == 6 || family == 0xf)
model += ((fms >> 16) & 0xf) << 4;
if (verbose)
if (debug)
fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
max_level, family, model, stepping, family, model, stepping);
@ -2245,20 +2287,24 @@ void check_cpuid()
do_ptm = eax & (1 << 6);
has_epb = ecx & (1 << 3);
if (verbose)
if (debug)
fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
has_aperf ? "" : "No ",
do_dts ? "" : "No ",
do_ptm ? "" : "No ",
has_epb ? "" : "No ");
do_nhm_platform_info = do_nhm_cstates = do_smi = has_nhm_msrs(family, model);
do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
do_snb_cstates = has_snb_msrs(family, model);
do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
do_pc3 = (pkg_cstate_limit >= PCL__3);
do_pc6 = (pkg_cstate_limit >= PCL__6);
do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
do_c8_c9_c10 = has_hsw_msrs(family, model);
do_slm_cstates = is_slm(family, model);
bclk = discover_bclk(family, model);
do_nhm_turbo_ratio_limit = has_nhm_turbo_ratio_limit(family, model);
do_nhm_turbo_ratio_limit = do_nhm_platform_info && has_nhm_turbo_ratio_limit(family, model);
do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
rapl_probe(family, model);
perf_limit_reasons_probe(family, model);
@ -2267,10 +2313,25 @@ void check_cpuid()
}
void usage()
void help()
{
errx(1, "%s: [-v][-R][-T][-p|-P|-S][-c MSR#][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
progname);
fprintf(stderr,
"Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
"\n"
"Turbostat forks the specified COMMAND and prints statistics\n"
"when COMMAND completes.\n"
"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
"to print statistics, until interrupted.\n"
"--debug run in \"debug\" mode\n"
"--interval sec Override default 5-second measurement interval\n"
"--help print this help message\n"
"--counter msr print 32-bit counter at address \"msr\"\n"
"--Counter msr print 64-bit Counter at address \"msr\"\n"
"--msr msr print 32-bit value at address \"msr\"\n"
"--MSR msr print 64-bit Value at address \"msr\"\n"
"--version print version information\n"
"\n"
"For more help, run \"man turbostat\"\n");
}
@ -2309,7 +2370,7 @@ void topology_probe()
if (!summary_only && topo.num_cpus > 1)
show_cpu = 1;
if (verbose > 1)
if (debug > 1)
fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
@ -2344,7 +2405,7 @@ void topology_probe()
int siblings;
if (cpu_is_not_present(i)) {
if (verbose > 1)
if (debug > 1)
fprintf(stderr, "cpu%d NOT PRESENT\n", i);
continue;
}
@ -2359,26 +2420,26 @@ void topology_probe()
siblings = get_num_ht_siblings(i);
if (siblings > max_siblings)
max_siblings = siblings;
if (verbose > 1)
if (debug > 1)
fprintf(stderr, "cpu %d pkg %d core %d\n",
i, cpus[i].physical_package_id, cpus[i].core_id);
}
topo.num_cores_per_pkg = max_core_id + 1;
if (verbose > 1)
if (debug > 1)
fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
max_core_id, topo.num_cores_per_pkg);
if (!summary_only && topo.num_cores_per_pkg > 1)
show_core = 1;
topo.num_packages = max_package_id + 1;
if (verbose > 1)
if (debug > 1)
fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
max_package_id, topo.num_packages);
if (!summary_only && topo.num_packages > 1)
show_pkg = 1;
topo.num_threads_per_core = max_siblings;
if (verbose > 1)
if (debug > 1)
fprintf(stderr, "max_siblings %d\n", max_siblings);
free(cpus);
@ -2493,21 +2554,21 @@ void turbostat_init()
setup_all_buffers();
if (verbose)
if (debug)
print_verbose_header();
if (verbose)
if (debug)
for_all_cpus(print_epb, ODD_COUNTERS);
if (verbose)
if (debug)
for_all_cpus(print_perf_limit, ODD_COUNTERS);
if (verbose)
if (debug)
for_all_cpus(print_rapl, ODD_COUNTERS);
for_all_cpus(set_temperature_target, ODD_COUNTERS);
if (verbose)
if (debug)
for_all_cpus(print_thermal, ODD_COUNTERS);
}
@ -2572,56 +2633,82 @@ int get_and_dump_counters(void)
return status;
}
void print_version() {
fprintf(stderr, "turbostat version 4.1 10-Feb, 2015"
" - Len Brown <lenb@kernel.org>\n");
}
void cmdline(int argc, char **argv)
{
int opt;
int option_index = 0;
static struct option long_options[] = {
{"Counter", required_argument, 0, 'C'},
{"counter", required_argument, 0, 'c'},
{"Dump", no_argument, 0, 'D'},
{"debug", no_argument, 0, 'd'},
{"interval", required_argument, 0, 'i'},
{"help", no_argument, 0, 'h'},
{"Joules", no_argument, 0, 'J'},
{"MSR", required_argument, 0, 'M'},
{"msr", required_argument, 0, 'm'},
{"Package", no_argument, 0, 'p'},
{"processor", no_argument, 0, 'p'},
{"Summary", no_argument, 0, 'S'},
{"TCC", required_argument, 0, 'T'},
{"version", no_argument, 0, 'v' },
{0, 0, 0, 0 }
};
progname = argv[0];
while ((opt = getopt(argc, argv, "+pPsSvi:c:C:m:M:RJT:")) != -1) {
while ((opt = getopt_long_only(argc, argv, "C:c:Ddhi:JM:m:PpST:v",
long_options, &option_index)) != -1) {
switch (opt) {
case 'p':
show_core_only++;
break;
case 'P':
show_pkg_only++;
break;
case 's':
dump_only++;
break;
case 'S':
summary_only++;
break;
case 'v':
verbose++;
break;
case 'i':
interval_sec = atoi(optarg);
case 'C':
sscanf(optarg, "%x", &extra_delta_offset64);
break;
case 'c':
sscanf(optarg, "%x", &extra_delta_offset32);
break;
case 'C':
sscanf(optarg, "%x", &extra_delta_offset64);
case 'D':
dump_only++;
break;
case 'm':
sscanf(optarg, "%x", &extra_msr_offset32);
case 'd':
debug++;
break;
case 'M':
sscanf(optarg, "%x", &extra_msr_offset64);
break;
case 'R':
rapl_verbose++;
break;
case 'T':
tcc_activation_temp_override = atoi(optarg);
case 'h':
default:
help();
exit(1);
case 'i':
interval_sec = atoi(optarg);
break;
case 'J':
rapl_joules++;
break;
default:
usage();
case 'M':
sscanf(optarg, "%x", &extra_msr_offset64);
break;
case 'm':
sscanf(optarg, "%x", &extra_msr_offset32);
break;
case 'P':
show_pkg_only++;
break;
case 'p':
show_core_only++;
break;
case 'S':
summary_only++;
break;
case 'T':
tcc_activation_temp_override = atoi(optarg);
break;
case 'v':
print_version();
exit(0);
break;
}
}
}
@ -2630,9 +2717,8 @@ int main(int argc, char **argv)
{
cmdline(argc, argv);
if (verbose)
fprintf(stderr, "turbostat v3.9 23-Jan, 2015"
" - Len Brown <lenb@kernel.org>\n");
if (debug)
print_version();
turbostat_init();