2018-10-24 22:41:34 +03:00
|
|
|
#!/usr/bin/env python3
|
2019-06-04 11:11:32 +03:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
2016-05-18 14:26:21 +03:00
|
|
|
#
|
|
|
|
# top-like utility for displaying kvm statistics
|
|
|
|
#
|
|
|
|
# Copyright 2006-2008 Qumranet Technologies
|
|
|
|
# Copyright 2008-2011 Red Hat, Inc.
|
|
|
|
#
|
|
|
|
# Authors:
|
|
|
|
# Avi Kivity <avi@redhat.com>
|
|
|
|
#
|
2016-05-18 14:26:25 +03:00
|
|
|
"""The kvm_stat module outputs statistics about running KVM VMs
|
|
|
|
|
|
|
|
Three different ways of output formatting are available:
|
|
|
|
- as a top-like text ui
|
|
|
|
- in a key -> value format
|
|
|
|
- in an all keys, all values format
|
|
|
|
|
|
|
|
The data is sampled from the KVM's debugfs entries and its perf events.
|
|
|
|
"""
|
2017-10-04 06:08:11 +03:00
|
|
|
from __future__ import print_function
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
import curses
|
|
|
|
import sys
|
2017-10-04 06:08:11 +03:00
|
|
|
import locale
|
2016-05-18 14:26:21 +03:00
|
|
|
import os
|
|
|
|
import time
|
2020-03-06 14:42:45 +03:00
|
|
|
import argparse
|
2016-05-18 14:26:21 +03:00
|
|
|
import ctypes
|
|
|
|
import fcntl
|
|
|
|
import resource
|
|
|
|
import struct
|
|
|
|
import re
|
2017-03-10 15:40:13 +03:00
|
|
|
import subprocess
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
import signal
|
2018-01-09 15:27:02 +03:00
|
|
|
from collections import defaultdict, namedtuple
|
2020-03-06 14:42:47 +03:00
|
|
|
from functools import reduce
|
|
|
|
from datetime import datetime
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
VMX_EXIT_REASONS = {
|
|
|
|
'EXCEPTION_NMI': 0,
|
|
|
|
'EXTERNAL_INTERRUPT': 1,
|
|
|
|
'TRIPLE_FAULT': 2,
|
|
|
|
'PENDING_INTERRUPT': 7,
|
|
|
|
'NMI_WINDOW': 8,
|
|
|
|
'TASK_SWITCH': 9,
|
|
|
|
'CPUID': 10,
|
|
|
|
'HLT': 12,
|
|
|
|
'INVLPG': 14,
|
|
|
|
'RDPMC': 15,
|
|
|
|
'RDTSC': 16,
|
|
|
|
'VMCALL': 18,
|
|
|
|
'VMCLEAR': 19,
|
|
|
|
'VMLAUNCH': 20,
|
|
|
|
'VMPTRLD': 21,
|
|
|
|
'VMPTRST': 22,
|
|
|
|
'VMREAD': 23,
|
|
|
|
'VMRESUME': 24,
|
|
|
|
'VMWRITE': 25,
|
|
|
|
'VMOFF': 26,
|
|
|
|
'VMON': 27,
|
|
|
|
'CR_ACCESS': 28,
|
|
|
|
'DR_ACCESS': 29,
|
|
|
|
'IO_INSTRUCTION': 30,
|
|
|
|
'MSR_READ': 31,
|
|
|
|
'MSR_WRITE': 32,
|
|
|
|
'INVALID_STATE': 33,
|
|
|
|
'MWAIT_INSTRUCTION': 36,
|
|
|
|
'MONITOR_INSTRUCTION': 39,
|
|
|
|
'PAUSE_INSTRUCTION': 40,
|
|
|
|
'MCE_DURING_VMENTRY': 41,
|
|
|
|
'TPR_BELOW_THRESHOLD': 43,
|
|
|
|
'APIC_ACCESS': 44,
|
|
|
|
'EPT_VIOLATION': 48,
|
|
|
|
'EPT_MISCONFIG': 49,
|
|
|
|
'WBINVD': 54,
|
|
|
|
'XSETBV': 55,
|
|
|
|
'APIC_WRITE': 56,
|
|
|
|
'INVPCID': 58,
|
|
|
|
}
|
|
|
|
|
|
|
|
SVM_EXIT_REASONS = {
|
|
|
|
'READ_CR0': 0x000,
|
|
|
|
'READ_CR3': 0x003,
|
|
|
|
'READ_CR4': 0x004,
|
|
|
|
'READ_CR8': 0x008,
|
|
|
|
'WRITE_CR0': 0x010,
|
|
|
|
'WRITE_CR3': 0x013,
|
|
|
|
'WRITE_CR4': 0x014,
|
|
|
|
'WRITE_CR8': 0x018,
|
|
|
|
'READ_DR0': 0x020,
|
|
|
|
'READ_DR1': 0x021,
|
|
|
|
'READ_DR2': 0x022,
|
|
|
|
'READ_DR3': 0x023,
|
|
|
|
'READ_DR4': 0x024,
|
|
|
|
'READ_DR5': 0x025,
|
|
|
|
'READ_DR6': 0x026,
|
|
|
|
'READ_DR7': 0x027,
|
|
|
|
'WRITE_DR0': 0x030,
|
|
|
|
'WRITE_DR1': 0x031,
|
|
|
|
'WRITE_DR2': 0x032,
|
|
|
|
'WRITE_DR3': 0x033,
|
|
|
|
'WRITE_DR4': 0x034,
|
|
|
|
'WRITE_DR5': 0x035,
|
|
|
|
'WRITE_DR6': 0x036,
|
|
|
|
'WRITE_DR7': 0x037,
|
|
|
|
'EXCP_BASE': 0x040,
|
|
|
|
'INTR': 0x060,
|
|
|
|
'NMI': 0x061,
|
|
|
|
'SMI': 0x062,
|
|
|
|
'INIT': 0x063,
|
|
|
|
'VINTR': 0x064,
|
|
|
|
'CR0_SEL_WRITE': 0x065,
|
|
|
|
'IDTR_READ': 0x066,
|
|
|
|
'GDTR_READ': 0x067,
|
|
|
|
'LDTR_READ': 0x068,
|
|
|
|
'TR_READ': 0x069,
|
|
|
|
'IDTR_WRITE': 0x06a,
|
|
|
|
'GDTR_WRITE': 0x06b,
|
|
|
|
'LDTR_WRITE': 0x06c,
|
|
|
|
'TR_WRITE': 0x06d,
|
|
|
|
'RDTSC': 0x06e,
|
|
|
|
'RDPMC': 0x06f,
|
|
|
|
'PUSHF': 0x070,
|
|
|
|
'POPF': 0x071,
|
|
|
|
'CPUID': 0x072,
|
|
|
|
'RSM': 0x073,
|
|
|
|
'IRET': 0x074,
|
|
|
|
'SWINT': 0x075,
|
|
|
|
'INVD': 0x076,
|
|
|
|
'PAUSE': 0x077,
|
|
|
|
'HLT': 0x078,
|
|
|
|
'INVLPG': 0x079,
|
|
|
|
'INVLPGA': 0x07a,
|
|
|
|
'IOIO': 0x07b,
|
|
|
|
'MSR': 0x07c,
|
|
|
|
'TASK_SWITCH': 0x07d,
|
|
|
|
'FERR_FREEZE': 0x07e,
|
|
|
|
'SHUTDOWN': 0x07f,
|
|
|
|
'VMRUN': 0x080,
|
|
|
|
'VMMCALL': 0x081,
|
|
|
|
'VMLOAD': 0x082,
|
|
|
|
'VMSAVE': 0x083,
|
|
|
|
'STGI': 0x084,
|
|
|
|
'CLGI': 0x085,
|
|
|
|
'SKINIT': 0x086,
|
|
|
|
'RDTSCP': 0x087,
|
|
|
|
'ICEBP': 0x088,
|
|
|
|
'WBINVD': 0x089,
|
|
|
|
'MONITOR': 0x08a,
|
|
|
|
'MWAIT': 0x08b,
|
|
|
|
'MWAIT_COND': 0x08c,
|
|
|
|
'XSETBV': 0x08d,
|
|
|
|
'NPF': 0x400,
|
|
|
|
}
|
|
|
|
|
|
|
|
# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h)
|
|
|
|
AARCH64_EXIT_REASONS = {
|
|
|
|
'UNKNOWN': 0x00,
|
|
|
|
'WFI': 0x01,
|
|
|
|
'CP15_32': 0x03,
|
|
|
|
'CP15_64': 0x04,
|
|
|
|
'CP14_MR': 0x05,
|
|
|
|
'CP14_LS': 0x06,
|
|
|
|
'FP_ASIMD': 0x07,
|
|
|
|
'CP10_ID': 0x08,
|
|
|
|
'CP14_64': 0x0C,
|
|
|
|
'ILL_ISS': 0x0E,
|
|
|
|
'SVC32': 0x11,
|
|
|
|
'HVC32': 0x12,
|
|
|
|
'SMC32': 0x13,
|
|
|
|
'SVC64': 0x15,
|
|
|
|
'HVC64': 0x16,
|
|
|
|
'SMC64': 0x17,
|
|
|
|
'SYS64': 0x18,
|
|
|
|
'IABT': 0x20,
|
|
|
|
'IABT_HYP': 0x21,
|
|
|
|
'PC_ALIGN': 0x22,
|
|
|
|
'DABT': 0x24,
|
|
|
|
'DABT_HYP': 0x25,
|
|
|
|
'SP_ALIGN': 0x26,
|
|
|
|
'FP_EXC32': 0x28,
|
|
|
|
'FP_EXC64': 0x2C,
|
|
|
|
'SERROR': 0x2F,
|
|
|
|
'BREAKPT': 0x30,
|
|
|
|
'BREAKPT_HYP': 0x31,
|
|
|
|
'SOFTSTP': 0x32,
|
|
|
|
'SOFTSTP_HYP': 0x33,
|
|
|
|
'WATCHPT': 0x34,
|
|
|
|
'WATCHPT_HYP': 0x35,
|
|
|
|
'BKPT32': 0x38,
|
|
|
|
'VECTOR32': 0x3A,
|
|
|
|
'BRK64': 0x3C,
|
|
|
|
}
|
|
|
|
|
|
|
|
# From include/uapi/linux/kvm.h, KVM_EXIT_xxx
|
|
|
|
USERSPACE_EXIT_REASONS = {
|
|
|
|
'UNKNOWN': 0,
|
|
|
|
'EXCEPTION': 1,
|
|
|
|
'IO': 2,
|
|
|
|
'HYPERCALL': 3,
|
|
|
|
'DEBUG': 4,
|
|
|
|
'HLT': 5,
|
|
|
|
'MMIO': 6,
|
|
|
|
'IRQ_WINDOW_OPEN': 7,
|
|
|
|
'SHUTDOWN': 8,
|
|
|
|
'FAIL_ENTRY': 9,
|
|
|
|
'INTR': 10,
|
|
|
|
'SET_TPR': 11,
|
|
|
|
'TPR_ACCESS': 12,
|
|
|
|
'S390_SIEIC': 13,
|
|
|
|
'S390_RESET': 14,
|
|
|
|
'DCR': 15,
|
|
|
|
'NMI': 16,
|
|
|
|
'INTERNAL_ERROR': 17,
|
|
|
|
'OSI': 18,
|
|
|
|
'PAPR_HCALL': 19,
|
|
|
|
'S390_UCONTROL': 20,
|
|
|
|
'WATCHDOG': 21,
|
|
|
|
'S390_TSCH': 22,
|
|
|
|
'EPR': 23,
|
|
|
|
'SYSTEM_EVENT': 24,
|
|
|
|
}
|
|
|
|
|
|
|
|
IOCTL_NUMBERS = {
|
|
|
|
'SET_FILTER': 0x40082406,
|
|
|
|
'ENABLE': 0x00002400,
|
|
|
|
'DISABLE': 0x00002401,
|
|
|
|
'RESET': 0x00002403,
|
|
|
|
}
|
|
|
|
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
signal_received = False
|
|
|
|
|
2017-10-04 06:08:11 +03:00
|
|
|
ENCODING = locale.getpreferredencoding(False)
|
2018-02-22 14:16:28 +03:00
|
|
|
TRACE_FILTER = re.compile(r'^[^\(]*$')
|
2017-10-04 06:08:11 +03:00
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class Arch(object):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Encapsulates global architecture specific data.
|
|
|
|
|
|
|
|
Contains the performance event open syscall and ioctl numbers, as
|
|
|
|
well as the VM exit reasons for the architecture it runs on.
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
"""
|
|
|
|
@staticmethod
|
|
|
|
def get_arch():
|
|
|
|
machine = os.uname()[4]
|
|
|
|
|
|
|
|
if machine.startswith('ppc'):
|
|
|
|
return ArchPPC()
|
|
|
|
elif machine.startswith('aarch64'):
|
|
|
|
return ArchA64()
|
|
|
|
elif machine.startswith('s390'):
|
|
|
|
return ArchS390()
|
|
|
|
else:
|
|
|
|
# X86_64
|
|
|
|
for line in open('/proc/cpuinfo'):
|
|
|
|
if not line.startswith('flags'):
|
|
|
|
continue
|
|
|
|
|
|
|
|
flags = line.split()
|
|
|
|
if 'vmx' in flags:
|
|
|
|
return ArchX86(VMX_EXIT_REASONS)
|
|
|
|
if 'svm' in flags:
|
|
|
|
return ArchX86(SVM_EXIT_REASONS)
|
|
|
|
return
|
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
def tracepoint_is_child(self, field):
|
|
|
|
if (TRACE_FILTER.match(field)):
|
|
|
|
return None
|
|
|
|
return field.split('(', 1)[0]
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class ArchX86(Arch):
|
|
|
|
def __init__(self, exit_reasons):
|
|
|
|
self.sc_perf_evt_open = 298
|
|
|
|
self.ioctl_numbers = IOCTL_NUMBERS
|
2019-12-10 07:48:29 +03:00
|
|
|
self.exit_reason_field = 'exit_reason'
|
2016-05-18 14:26:21 +03:00
|
|
|
self.exit_reasons = exit_reasons
|
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
def debugfs_is_child(self, field):
|
|
|
|
""" Returns name of parent if 'field' is a child, None otherwise """
|
|
|
|
return None
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class ArchPPC(Arch):
|
|
|
|
def __init__(self):
|
|
|
|
self.sc_perf_evt_open = 319
|
|
|
|
self.ioctl_numbers = IOCTL_NUMBERS
|
|
|
|
self.ioctl_numbers['ENABLE'] = 0x20002400
|
|
|
|
self.ioctl_numbers['DISABLE'] = 0x20002401
|
tools: kvm_stat: Powerpc related fixes
kvm_stat script is failing to execute on powerpc :
# ./kvm_stat
Traceback (most recent call last):
File "./kvm_stat", line 825, in <module>
main()
File "./kvm_stat", line 813, in main
providers = get_providers(options)
File "./kvm_stat", line 778, in get_providers
providers.append(TracepointProvider())
File "./kvm_stat", line 416, in __init__
self.filters = get_filters()
File "./kvm_stat", line 315, in get_filters
if ARCH.exit_reasons:
AttributeError: 'ArchPPC' object has no attribute 'exit_reasons'
This is because, its trying to access a non-defined attribute.
Also, the IOCTL number of RESET is incorrect for powerpc. The correct
number has been added.
Signed-off-by: Hemant Kumar <hemant@linux.vnet.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-19 06:24:54 +03:00
|
|
|
self.ioctl_numbers['RESET'] = 0x20002403
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
# PPC comes in 32 and 64 bit and some generated ioctl
|
|
|
|
# numbers depend on the wordsize.
|
|
|
|
char_ptr_size = ctypes.sizeof(ctypes.c_char_p)
|
|
|
|
self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
|
2019-12-10 07:48:29 +03:00
|
|
|
self.exit_reason_field = 'exit_nr'
|
tools: kvm_stat: Powerpc related fixes
kvm_stat script is failing to execute on powerpc :
# ./kvm_stat
Traceback (most recent call last):
File "./kvm_stat", line 825, in <module>
main()
File "./kvm_stat", line 813, in main
providers = get_providers(options)
File "./kvm_stat", line 778, in get_providers
providers.append(TracepointProvider())
File "./kvm_stat", line 416, in __init__
self.filters = get_filters()
File "./kvm_stat", line 315, in get_filters
if ARCH.exit_reasons:
AttributeError: 'ArchPPC' object has no attribute 'exit_reasons'
This is because, its trying to access a non-defined attribute.
Also, the IOCTL number of RESET is incorrect for powerpc. The correct
number has been added.
Signed-off-by: Hemant Kumar <hemant@linux.vnet.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-04-19 06:24:54 +03:00
|
|
|
self.exit_reasons = {}
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
def debugfs_is_child(self, field):
|
|
|
|
""" Returns name of parent if 'field' is a child, None otherwise """
|
|
|
|
return None
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class ArchA64(Arch):
|
|
|
|
def __init__(self):
|
|
|
|
self.sc_perf_evt_open = 241
|
|
|
|
self.ioctl_numbers = IOCTL_NUMBERS
|
2019-12-10 07:48:29 +03:00
|
|
|
self.exit_reason_field = 'esr_ec'
|
2016-05-18 14:26:21 +03:00
|
|
|
self.exit_reasons = AARCH64_EXIT_REASONS
|
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
def debugfs_is_child(self, field):
|
|
|
|
""" Returns name of parent if 'field' is a child, None otherwise """
|
|
|
|
return None
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class ArchS390(Arch):
|
|
|
|
def __init__(self):
|
|
|
|
self.sc_perf_evt_open = 331
|
|
|
|
self.ioctl_numbers = IOCTL_NUMBERS
|
2019-12-10 07:48:29 +03:00
|
|
|
self.exit_reason_field = None
|
2016-05-18 14:26:21 +03:00
|
|
|
self.exit_reasons = None
|
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
def debugfs_is_child(self, field):
|
|
|
|
""" Returns name of parent if 'field' is a child, None otherwise """
|
|
|
|
if field.startswith('instruction_'):
|
|
|
|
return 'exit_instruction'
|
|
|
|
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
ARCH = Arch.get_arch()
|
|
|
|
|
|
|
|
|
|
|
|
class perf_event_attr(ctypes.Structure):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Struct that holds the necessary data to set up a trace event.
|
|
|
|
|
|
|
|
For an extensive explanation see perf_event_open(2) and
|
|
|
|
include/uapi/linux/perf_event.h, struct perf_event_attr
|
|
|
|
|
|
|
|
All fields that are not initialized in the constructor are 0.
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:21 +03:00
|
|
|
_fields_ = [('type', ctypes.c_uint32),
|
|
|
|
('size', ctypes.c_uint32),
|
|
|
|
('config', ctypes.c_uint64),
|
|
|
|
('sample_freq', ctypes.c_uint64),
|
|
|
|
('sample_type', ctypes.c_uint64),
|
|
|
|
('read_format', ctypes.c_uint64),
|
|
|
|
('flags', ctypes.c_uint64),
|
|
|
|
('wakeup_events', ctypes.c_uint32),
|
|
|
|
('bp_type', ctypes.c_uint32),
|
|
|
|
('bp_addr', ctypes.c_uint64),
|
|
|
|
('bp_len', ctypes.c_uint64),
|
|
|
|
]
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super(self.__class__, self).__init__()
|
|
|
|
self.type = PERF_TYPE_TRACEPOINT
|
|
|
|
self.size = ctypes.sizeof(self)
|
|
|
|
self.read_format = PERF_FORMAT_GROUP
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
PERF_TYPE_TRACEPOINT = 2
|
|
|
|
PERF_FORMAT_GROUP = 1 << 3
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class Group(object):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Represents a perf event group."""
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
def __init__(self):
|
|
|
|
self.events = []
|
|
|
|
|
|
|
|
def add_event(self, event):
|
|
|
|
self.events.append(event)
|
|
|
|
|
|
|
|
def read(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Returns a dict with 'event name: value' for all events in the
|
|
|
|
group.
|
|
|
|
|
|
|
|
Values are read by reading from the file descriptor of the
|
|
|
|
event that is the group leader. See perf_event_open(2) for
|
|
|
|
details.
|
|
|
|
|
|
|
|
Read format for the used event configuration is:
|
|
|
|
struct read_format {
|
|
|
|
u64 nr; /* The number of events */
|
|
|
|
struct {
|
|
|
|
u64 value; /* The value of the event */
|
|
|
|
} values[nr];
|
|
|
|
};
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:21 +03:00
|
|
|
length = 8 * (1 + len(self.events))
|
|
|
|
read_format = 'xxxxxxxx' + 'Q' * len(self.events)
|
|
|
|
return dict(zip([event.name for event in self.events],
|
|
|
|
struct.unpack(read_format,
|
|
|
|
os.read(self.events[0].fd, length))))
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class Event(object):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Represents a performance event and manages its life cycle."""
|
2016-05-18 14:26:24 +03:00
|
|
|
def __init__(self, name, group, trace_cpu, trace_pid, trace_point,
|
|
|
|
trace_filter, trace_set='kvm'):
|
2017-06-07 22:08:33 +03:00
|
|
|
self.libc = ctypes.CDLL('libc.so.6', use_errno=True)
|
|
|
|
self.syscall = self.libc.syscall
|
2016-05-18 14:26:21 +03:00
|
|
|
self.name = name
|
|
|
|
self.fd = None
|
2018-02-22 14:16:26 +03:00
|
|
|
self._setup_event(group, trace_cpu, trace_pid, trace_point,
|
|
|
|
trace_filter, trace_set)
|
2016-05-18 14:26:24 +03:00
|
|
|
|
|
|
|
def __del__(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Closes the event's file descriptor.
|
|
|
|
|
|
|
|
As no python file object was created for the file descriptor,
|
|
|
|
python will not reference count the descriptor and will not
|
|
|
|
close it itself automatically, so we do it.
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:24 +03:00
|
|
|
if self.fd:
|
|
|
|
os.close(self.fd)
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _perf_event_open(self, attr, pid, cpu, group_fd, flags):
|
2017-06-07 22:08:33 +03:00
|
|
|
"""Wrapper for the sys_perf_evt_open() syscall.
|
|
|
|
|
|
|
|
Used to set up performance events, returns a file descriptor or -1
|
|
|
|
on error.
|
|
|
|
|
|
|
|
Attributes are:
|
|
|
|
- syscall number
|
|
|
|
- struct perf_event_attr *
|
|
|
|
- pid or -1 to monitor all pids
|
|
|
|
- cpu number or -1 to monitor all cpus
|
|
|
|
- The file descriptor of the group leader or -1 to create a group.
|
|
|
|
- flags
|
|
|
|
|
|
|
|
"""
|
|
|
|
return self.syscall(ARCH.sc_perf_evt_open, ctypes.pointer(attr),
|
|
|
|
ctypes.c_int(pid), ctypes.c_int(cpu),
|
|
|
|
ctypes.c_int(group_fd), ctypes.c_long(flags))
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _setup_event_attribute(self, trace_set, trace_point):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Returns an initialized ctype perf_event_attr struct."""
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set,
|
|
|
|
trace_point, 'id')
|
|
|
|
|
|
|
|
event_attr = perf_event_attr()
|
|
|
|
event_attr.config = int(open(id_path).read())
|
|
|
|
return event_attr
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _setup_event(self, group, trace_cpu, trace_pid, trace_point,
|
|
|
|
trace_filter, trace_set):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Sets up the perf event in Linux.
|
|
|
|
|
|
|
|
Issues the syscall to register the event in the kernel and
|
|
|
|
then sets the optional filter.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
event_attr = self._setup_event_attribute(trace_set, trace_point)
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2016-05-18 14:26:25 +03:00
|
|
|
# First event will be group leader.
|
2016-05-18 14:26:21 +03:00
|
|
|
group_leader = -1
|
2016-05-18 14:26:25 +03:00
|
|
|
|
|
|
|
# All others have to pass the leader's descriptor instead.
|
2016-05-18 14:26:21 +03:00
|
|
|
if group.events:
|
|
|
|
group_leader = group.events[0].fd
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
fd = self._perf_event_open(event_attr, trace_pid,
|
|
|
|
trace_cpu, group_leader, 0)
|
2016-05-18 14:26:21 +03:00
|
|
|
if fd == -1:
|
|
|
|
err = ctypes.get_errno()
|
|
|
|
raise OSError(err, os.strerror(err),
|
|
|
|
'while calling sys_perf_event_open().')
|
|
|
|
|
|
|
|
if trace_filter:
|
|
|
|
fcntl.ioctl(fd, ARCH.ioctl_numbers['SET_FILTER'],
|
|
|
|
trace_filter)
|
|
|
|
|
|
|
|
self.fd = fd
|
|
|
|
|
|
|
|
def enable(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Enables the trace event in the kernel.
|
|
|
|
|
|
|
|
Enabling the group leader makes reading counters from it and the
|
|
|
|
events under it possible.
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:21 +03:00
|
|
|
fcntl.ioctl(self.fd, ARCH.ioctl_numbers['ENABLE'], 0)
|
|
|
|
|
|
|
|
def disable(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Disables the trace event in the kernel.
|
|
|
|
|
|
|
|
Disabling the group leader makes reading all counters under it
|
|
|
|
impossible.
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:21 +03:00
|
|
|
fcntl.ioctl(self.fd, ARCH.ioctl_numbers['DISABLE'], 0)
|
|
|
|
|
|
|
|
def reset(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Resets the count of the trace event in the kernel."""
|
2016-05-18 14:26:21 +03:00
|
|
|
fcntl.ioctl(self.fd, ARCH.ioctl_numbers['RESET'], 0)
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2017-06-07 22:08:33 +03:00
|
|
|
class Provider(object):
|
|
|
|
"""Encapsulates functionalities used by all providers."""
|
2018-02-22 14:16:28 +03:00
|
|
|
def __init__(self, pid):
|
|
|
|
self.child_events = False
|
|
|
|
self.pid = pid
|
|
|
|
|
2017-06-07 22:08:33 +03:00
|
|
|
@staticmethod
|
|
|
|
def is_field_wanted(fields_filter, field):
|
|
|
|
"""Indicate whether field is valid according to fields_filter."""
|
2017-12-11 14:25:22 +03:00
|
|
|
if not fields_filter:
|
2017-06-07 22:08:33 +03:00
|
|
|
return True
|
|
|
|
return re.match(fields_filter, field) is not None
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def walkdir(path):
|
|
|
|
"""Returns os.walk() data for specified directory.
|
|
|
|
|
|
|
|
As it is only a wrapper it returns the same 3-tuple of (dirpath,
|
|
|
|
dirnames, filenames).
|
|
|
|
"""
|
|
|
|
return next(os.walk(path))
|
|
|
|
|
|
|
|
|
|
|
|
class TracepointProvider(Provider):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Data provider for the stats class.
|
|
|
|
|
|
|
|
Manages the events/groups from which it acquires its data.
|
|
|
|
|
|
|
|
"""
|
2017-06-07 22:08:32 +03:00
|
|
|
def __init__(self, pid, fields_filter):
|
2016-05-18 14:26:21 +03:00
|
|
|
self.group_leaders = []
|
2018-02-22 14:16:26 +03:00
|
|
|
self.filters = self._get_filters()
|
2017-06-07 22:08:32 +03:00
|
|
|
self.update_fields(fields_filter)
|
2018-02-22 14:16:28 +03:00
|
|
|
super(TracepointProvider, self).__init__(pid)
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2017-06-07 22:08:33 +03:00
|
|
|
@staticmethod
|
2018-02-22 14:16:26 +03:00
|
|
|
def _get_filters():
|
2017-06-07 22:08:33 +03:00
|
|
|
"""Returns a dict of trace events, their filter ids and
|
|
|
|
the values that can be filtered.
|
|
|
|
|
|
|
|
Trace events can be filtered for special values by setting a
|
|
|
|
filter string via an ioctl. The string normally has the format
|
|
|
|
identifier==value. For each filter a new event will be created, to
|
|
|
|
be able to distinguish the events.
|
|
|
|
|
|
|
|
"""
|
|
|
|
filters = {}
|
|
|
|
filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS)
|
2019-12-10 07:48:29 +03:00
|
|
|
if ARCH.exit_reason_field and ARCH.exit_reasons:
|
|
|
|
filters['kvm_exit'] = (ARCH.exit_reason_field, ARCH.exit_reasons)
|
2017-06-07 22:08:33 +03:00
|
|
|
return filters
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _get_available_fields(self):
|
2018-02-22 14:16:28 +03:00
|
|
|
"""Returns a list of available events of format 'event name(filter
|
2016-05-18 14:26:25 +03:00
|
|
|
name)'.
|
|
|
|
|
|
|
|
All available events have directories under
|
|
|
|
/sys/kernel/debug/tracing/events/ which export information
|
|
|
|
about the specific event. Therefore, listing the dirs gives us
|
|
|
|
a list of all available events.
|
|
|
|
|
|
|
|
Some events like the vm exit reasons can be filtered for
|
|
|
|
specific values. To take account for that, the routine below
|
|
|
|
creates special fields with the following format:
|
|
|
|
event name(filter name)
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:21 +03:00
|
|
|
path = os.path.join(PATH_DEBUGFS_TRACING, 'events', 'kvm')
|
2017-06-07 22:08:33 +03:00
|
|
|
fields = self.walkdir(path)[1]
|
2016-05-18 14:26:21 +03:00
|
|
|
extra = []
|
|
|
|
for field in fields:
|
|
|
|
if field in self.filters:
|
|
|
|
filter_name_, filter_dicts = self.filters[field]
|
|
|
|
for name in filter_dicts:
|
|
|
|
extra.append(field + '(' + name + ')')
|
|
|
|
fields += extra
|
|
|
|
return fields
|
|
|
|
|
2017-06-07 22:08:32 +03:00
|
|
|
def update_fields(self, fields_filter):
|
|
|
|
"""Refresh fields, applying fields_filter"""
|
2018-02-22 14:16:26 +03:00
|
|
|
self.fields = [field for field in self._get_available_fields()
|
2019-04-21 16:26:24 +03:00
|
|
|
if self.is_field_wanted(fields_filter, field)]
|
|
|
|
# add parents for child fields - otherwise we won't see any output!
|
|
|
|
for field in self._fields:
|
|
|
|
parent = ARCH.tracepoint_is_child(field)
|
|
|
|
if (parent and parent not in self._fields):
|
|
|
|
self.fields.append(parent)
|
2017-06-07 22:08:33 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2018-02-22 14:16:26 +03:00
|
|
|
def _get_online_cpus():
|
2017-06-07 22:08:33 +03:00
|
|
|
"""Returns a list of cpu id integers."""
|
|
|
|
def parse_int_list(list_string):
|
|
|
|
"""Returns an int list from a string of comma separated integers and
|
|
|
|
integer ranges."""
|
|
|
|
integers = []
|
|
|
|
members = list_string.split(',')
|
|
|
|
|
|
|
|
for member in members:
|
|
|
|
if '-' not in member:
|
|
|
|
integers.append(int(member))
|
|
|
|
else:
|
|
|
|
int_range = member.split('-')
|
|
|
|
integers.extend(range(int(int_range[0]),
|
|
|
|
int(int_range[1]) + 1))
|
|
|
|
|
|
|
|
return integers
|
|
|
|
|
|
|
|
with open('/sys/devices/system/cpu/online') as cpu_list:
|
|
|
|
cpu_string = cpu_list.readline()
|
|
|
|
return parse_int_list(cpu_string)
|
2017-06-07 22:08:32 +03:00
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _setup_traces(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Creates all event and group objects needed to be able to retrieve
|
|
|
|
data."""
|
2018-02-22 14:16:26 +03:00
|
|
|
fields = self._get_available_fields()
|
2016-05-18 14:26:24 +03:00
|
|
|
if self._pid > 0:
|
|
|
|
# Fetch list of all threads of the monitored pid, as qemu
|
|
|
|
# starts a thread for each vcpu.
|
|
|
|
path = os.path.join('/proc', str(self._pid), 'task')
|
2017-06-07 22:08:33 +03:00
|
|
|
groupids = self.walkdir(path)[1]
|
2016-05-18 14:26:24 +03:00
|
|
|
else:
|
2018-02-22 14:16:26 +03:00
|
|
|
groupids = self._get_online_cpus()
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
# The constant is needed as a buffer for python libs, std
|
|
|
|
# streams and other files that the script opens.
|
2017-03-10 15:40:04 +03:00
|
|
|
newlim = len(groupids) * len(fields) + 50
|
2016-05-18 14:26:21 +03:00
|
|
|
try:
|
|
|
|
softlim_, hardlim = resource.getrlimit(resource.RLIMIT_NOFILE)
|
|
|
|
|
|
|
|
if hardlim < newlim:
|
|
|
|
# Now we need CAP_SYS_RESOURCE, to increase the hard limit.
|
|
|
|
resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, newlim))
|
|
|
|
else:
|
|
|
|
# Raising the soft limit is sufficient.
|
|
|
|
resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, hardlim))
|
|
|
|
|
|
|
|
except ValueError:
|
|
|
|
sys.exit("NOFILE rlimit could not be raised to {0}".format(newlim))
|
|
|
|
|
2016-05-18 14:26:24 +03:00
|
|
|
for groupid in groupids:
|
2016-05-18 14:26:21 +03:00
|
|
|
group = Group()
|
2017-03-10 15:40:04 +03:00
|
|
|
for name in fields:
|
2016-05-18 14:26:21 +03:00
|
|
|
tracepoint = name
|
|
|
|
tracefilter = None
|
|
|
|
match = re.match(r'(.*)\((.*)\)', name)
|
|
|
|
if match:
|
|
|
|
tracepoint, sub = match.groups()
|
|
|
|
tracefilter = ('%s==%d\0' %
|
|
|
|
(self.filters[tracepoint][0],
|
|
|
|
self.filters[tracepoint][1][sub]))
|
|
|
|
|
2016-05-18 14:26:24 +03:00
|
|
|
# From perf_event_open(2):
|
|
|
|
# pid > 0 and cpu == -1
|
|
|
|
# This measures the specified process/thread on any CPU.
|
|
|
|
#
|
|
|
|
# pid == -1 and cpu >= 0
|
|
|
|
# This measures all processes/threads on the specified CPU.
|
|
|
|
trace_cpu = groupid if self._pid == 0 else -1
|
|
|
|
trace_pid = int(groupid) if self._pid != 0 else -1
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
group.add_event(Event(name=name,
|
|
|
|
group=group,
|
2016-05-18 14:26:24 +03:00
|
|
|
trace_cpu=trace_cpu,
|
|
|
|
trace_pid=trace_pid,
|
2016-05-18 14:26:21 +03:00
|
|
|
trace_point=tracepoint,
|
|
|
|
trace_filter=tracefilter))
|
2016-05-18 14:26:24 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
self.group_leaders.append(group)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def fields(self):
|
|
|
|
return self._fields
|
|
|
|
|
|
|
|
@fields.setter
|
|
|
|
def fields(self, fields):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Enables/disables the (un)wanted events"""
|
2016-05-18 14:26:21 +03:00
|
|
|
self._fields = fields
|
|
|
|
for group in self.group_leaders:
|
|
|
|
for index, event in enumerate(group.events):
|
|
|
|
if event.name in fields:
|
|
|
|
event.reset()
|
|
|
|
event.enable()
|
|
|
|
else:
|
|
|
|
# Do not disable the group leader.
|
|
|
|
# It would disable all of its events.
|
|
|
|
if index != 0:
|
|
|
|
event.disable()
|
|
|
|
|
2016-05-18 14:26:24 +03:00
|
|
|
@property
|
|
|
|
def pid(self):
|
|
|
|
return self._pid
|
|
|
|
|
|
|
|
@pid.setter
|
|
|
|
def pid(self, pid):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Changes the monitored pid by setting new traces."""
|
2016-05-18 14:26:24 +03:00
|
|
|
self._pid = pid
|
2016-05-18 14:26:25 +03:00
|
|
|
# The garbage collector will get rid of all Event/Group
|
|
|
|
# objects and open files after removing the references.
|
2016-05-18 14:26:24 +03:00
|
|
|
self.group_leaders = []
|
2018-02-22 14:16:26 +03:00
|
|
|
self._setup_traces()
|
2016-05-18 14:26:24 +03:00
|
|
|
self.fields = self._fields
|
|
|
|
|
2017-06-25 22:34:16 +03:00
|
|
|
def read(self, by_guest=0):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Returns 'event name: current value' for all enabled events."""
|
2016-05-18 14:26:21 +03:00
|
|
|
ret = defaultdict(int)
|
|
|
|
for group in self.group_leaders:
|
2017-10-04 06:08:11 +03:00
|
|
|
for name, val in group.read().items():
|
2018-02-22 14:16:28 +03:00
|
|
|
if name not in self._fields:
|
|
|
|
continue
|
|
|
|
parent = ARCH.tracepoint_is_child(name)
|
|
|
|
if parent:
|
|
|
|
name += ' ' + parent
|
|
|
|
ret[name] += val
|
2016-05-18 14:26:21 +03:00
|
|
|
return ret
|
|
|
|
|
2017-03-10 15:40:15 +03:00
|
|
|
def reset(self):
|
|
|
|
"""Reset all field counters"""
|
|
|
|
for group in self.group_leaders:
|
|
|
|
for event in group.events:
|
|
|
|
event.reset()
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2017-06-07 22:08:33 +03:00
|
|
|
class DebugfsProvider(Provider):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Provides data from the files that KVM creates in the kvm debugfs
|
|
|
|
folder."""
|
2017-06-25 22:34:15 +03:00
|
|
|
def __init__(self, pid, fields_filter, include_past):
|
2017-06-07 22:08:32 +03:00
|
|
|
self.update_fields(fields_filter)
|
2017-03-10 15:40:15 +03:00
|
|
|
self._baseline = {}
|
2016-05-18 14:26:24 +03:00
|
|
|
self.do_read = True
|
2017-03-10 15:40:03 +03:00
|
|
|
self.paths = []
|
2018-02-22 14:16:28 +03:00
|
|
|
super(DebugfsProvider, self).__init__(pid)
|
2017-06-25 22:34:15 +03:00
|
|
|
if include_past:
|
2018-02-22 14:16:26 +03:00
|
|
|
self._restore()
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _get_available_fields(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
""""Returns a list of available fields.
|
|
|
|
|
|
|
|
The fields are all available KVM debugfs files
|
|
|
|
|
|
|
|
"""
|
2021-10-06 15:17:24 +03:00
|
|
|
exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns', 'halt_wait_ns']
|
2020-12-09 00:08:29 +03:00
|
|
|
fields = [field for field in self.walkdir(PATH_DEBUGFS_KVM)[2]
|
|
|
|
if field not in exempt_list]
|
|
|
|
|
|
|
|
return fields
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2017-06-07 22:08:32 +03:00
|
|
|
def update_fields(self, fields_filter):
|
|
|
|
"""Refresh fields, applying fields_filter"""
|
2018-02-22 14:16:26 +03:00
|
|
|
self._fields = [field for field in self._get_available_fields()
|
2019-04-21 16:26:24 +03:00
|
|
|
if self.is_field_wanted(fields_filter, field)]
|
|
|
|
# add parents for child fields - otherwise we won't see any output!
|
|
|
|
for field in self._fields:
|
|
|
|
parent = ARCH.debugfs_is_child(field)
|
|
|
|
if (parent and parent not in self._fields):
|
|
|
|
self.fields.append(parent)
|
2017-06-07 22:08:32 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
@property
|
|
|
|
def fields(self):
|
|
|
|
return self._fields
|
|
|
|
|
|
|
|
@fields.setter
|
|
|
|
def fields(self, fields):
|
|
|
|
self._fields = fields
|
2017-03-10 15:40:15 +03:00
|
|
|
self.reset()
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2016-05-18 14:26:24 +03:00
|
|
|
@property
|
|
|
|
def pid(self):
|
|
|
|
return self._pid
|
|
|
|
|
|
|
|
@pid.setter
|
|
|
|
def pid(self, pid):
|
2017-06-07 22:08:32 +03:00
|
|
|
self._pid = pid
|
2016-05-18 14:26:24 +03:00
|
|
|
if pid != 0:
|
2017-06-07 22:08:33 +03:00
|
|
|
vms = self.walkdir(PATH_DEBUGFS_KVM)[1]
|
2016-05-18 14:26:24 +03:00
|
|
|
if len(vms) == 0:
|
|
|
|
self.do_read = False
|
|
|
|
|
2018-08-24 15:03:55 +03:00
|
|
|
self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
|
2016-05-18 14:26:24 +03:00
|
|
|
|
|
|
|
else:
|
2017-03-10 15:40:15 +03:00
|
|
|
self.paths = []
|
2016-05-18 14:26:24 +03:00
|
|
|
self.do_read = True
|
|
|
|
|
2018-08-24 15:03:56 +03:00
|
|
|
def _verify_paths(self):
|
|
|
|
"""Remove invalid paths"""
|
|
|
|
for path in self.paths:
|
|
|
|
if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
|
|
|
|
self.paths.remove(path)
|
|
|
|
continue
|
|
|
|
|
2017-06-25 22:34:16 +03:00
|
|
|
def read(self, reset=0, by_guest=0):
|
2017-06-25 22:34:15 +03:00
|
|
|
"""Returns a dict with format:'file name / field -> current value'.
|
|
|
|
|
|
|
|
Parameter 'reset':
|
|
|
|
0 plain read
|
|
|
|
1 reset field counts to 0
|
|
|
|
2 restore the original field counts
|
|
|
|
|
|
|
|
"""
|
2016-05-18 14:26:24 +03:00
|
|
|
results = {}
|
|
|
|
|
|
|
|
# If no debugfs filtering support is available, then don't read.
|
|
|
|
if not self.do_read:
|
|
|
|
return results
|
2018-08-24 15:03:56 +03:00
|
|
|
self._verify_paths()
|
2016-05-18 14:26:24 +03:00
|
|
|
|
2017-03-10 15:40:15 +03:00
|
|
|
paths = self.paths
|
|
|
|
if self._pid == 0:
|
|
|
|
paths = []
|
|
|
|
for entry in os.walk(PATH_DEBUGFS_KVM):
|
|
|
|
for dir in entry[1]:
|
|
|
|
paths.append(dir)
|
|
|
|
for path in paths:
|
2016-05-18 14:26:24 +03:00
|
|
|
for field in self._fields:
|
2018-02-22 14:16:26 +03:00
|
|
|
value = self._read_field(field, path)
|
2017-03-10 15:40:15 +03:00
|
|
|
key = path + field
|
2017-06-25 22:34:15 +03:00
|
|
|
if reset == 1:
|
2017-03-10 15:40:15 +03:00
|
|
|
self._baseline[key] = value
|
2017-06-25 22:34:15 +03:00
|
|
|
if reset == 2:
|
|
|
|
self._baseline[key] = 0
|
2017-03-10 15:40:15 +03:00
|
|
|
if self._baseline.get(key, -1) == -1:
|
|
|
|
self._baseline[key] = value
|
2018-02-22 14:16:28 +03:00
|
|
|
parent = ARCH.debugfs_is_child(field)
|
|
|
|
if parent:
|
|
|
|
field = field + ' ' + parent
|
|
|
|
else:
|
|
|
|
if by_guest:
|
|
|
|
field = key.split('-')[0] # set 'field' to 'pid'
|
|
|
|
increment = value - self._baseline.get(key, 0)
|
|
|
|
if field in results:
|
|
|
|
results[field] += increment
|
2017-06-25 22:34:16 +03:00
|
|
|
else:
|
|
|
|
results[field] = increment
|
2016-05-18 14:26:24 +03:00
|
|
|
|
|
|
|
return results
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _read_field(self, field, path):
|
2016-05-18 14:26:24 +03:00
|
|
|
"""Returns the value of a single field from a specific VM."""
|
|
|
|
try:
|
|
|
|
return int(open(os.path.join(PATH_DEBUGFS_KVM,
|
|
|
|
path,
|
|
|
|
field))
|
|
|
|
.read())
|
|
|
|
except IOError:
|
|
|
|
return 0
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2017-03-10 15:40:15 +03:00
|
|
|
def reset(self):
|
|
|
|
"""Reset field counters"""
|
|
|
|
self._baseline = {}
|
|
|
|
self.read(1)
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _restore(self):
|
2017-06-25 22:34:15 +03:00
|
|
|
"""Reset field counters"""
|
|
|
|
self._baseline = {}
|
|
|
|
self.read(2)
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2018-01-09 15:27:02 +03:00
|
|
|
EventStat = namedtuple('EventStat', ['value', 'delta'])
|
|
|
|
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class Stats(object):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Manages the data providers and the data they provide.
|
|
|
|
|
|
|
|
It is used to set filters on the provider's data and collect all
|
|
|
|
provider data.
|
|
|
|
|
|
|
|
"""
|
2017-06-07 22:08:32 +03:00
|
|
|
def __init__(self, options):
|
2018-02-22 14:16:26 +03:00
|
|
|
self.providers = self._get_providers(options)
|
2017-06-07 22:08:32 +03:00
|
|
|
self._pid_filter = options.pid
|
|
|
|
self._fields_filter = options.fields
|
2016-05-18 14:26:21 +03:00
|
|
|
self.values = {}
|
2018-02-22 14:16:28 +03:00
|
|
|
self._child_events = False
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _get_providers(self, options):
|
2017-06-07 22:08:33 +03:00
|
|
|
"""Returns a list of data providers depending on the passed options."""
|
|
|
|
providers = []
|
|
|
|
|
|
|
|
if options.debugfs:
|
2017-06-25 22:34:15 +03:00
|
|
|
providers.append(DebugfsProvider(options.pid, options.fields,
|
2020-03-06 14:42:45 +03:00
|
|
|
options.debugfs_include_past))
|
2017-06-07 22:08:33 +03:00
|
|
|
if options.tracepoints or not providers:
|
|
|
|
providers.append(TracepointProvider(options.pid, options.fields))
|
|
|
|
|
|
|
|
return providers
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _update_provider_filters(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Propagates fields filters to providers."""
|
2016-05-18 14:26:21 +03:00
|
|
|
# As we reset the counters when updating the fields we can
|
|
|
|
# also clear the cache of old values.
|
|
|
|
self.values = {}
|
|
|
|
for provider in self.providers:
|
2017-06-07 22:08:32 +03:00
|
|
|
provider.update_fields(self._fields_filter)
|
2016-05-18 14:26:24 +03:00
|
|
|
|
2017-03-10 15:40:15 +03:00
|
|
|
def reset(self):
|
|
|
|
self.values = {}
|
|
|
|
for provider in self.providers:
|
|
|
|
provider.reset()
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
@property
|
|
|
|
def fields_filter(self):
|
|
|
|
return self._fields_filter
|
|
|
|
|
|
|
|
@fields_filter.setter
|
|
|
|
def fields_filter(self, fields_filter):
|
2017-03-10 15:40:15 +03:00
|
|
|
if fields_filter != self._fields_filter:
|
|
|
|
self._fields_filter = fields_filter
|
2018-02-22 14:16:26 +03:00
|
|
|
self._update_provider_filters()
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2016-05-18 14:26:24 +03:00
|
|
|
@property
|
|
|
|
def pid_filter(self):
|
|
|
|
return self._pid_filter
|
|
|
|
|
|
|
|
@pid_filter.setter
|
|
|
|
def pid_filter(self, pid):
|
2017-03-10 15:40:15 +03:00
|
|
|
if pid != self._pid_filter:
|
|
|
|
self._pid_filter = pid
|
|
|
|
self.values = {}
|
2017-06-07 22:08:32 +03:00
|
|
|
for provider in self.providers:
|
|
|
|
provider.pid = self._pid_filter
|
2016-05-18 14:26:24 +03:00
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
@property
|
|
|
|
def child_events(self):
|
|
|
|
return self._child_events
|
|
|
|
|
|
|
|
@child_events.setter
|
|
|
|
def child_events(self, val):
|
|
|
|
self._child_events = val
|
|
|
|
for provider in self.providers:
|
|
|
|
provider.child_events = val
|
|
|
|
|
2017-06-25 22:34:16 +03:00
|
|
|
def get(self, by_guest=0):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Returns a dict with field -> (value, delta to last value) of all
|
2018-02-22 14:16:28 +03:00
|
|
|
provider data.
|
|
|
|
Key formats:
|
|
|
|
* plain: 'key' is event name
|
|
|
|
* child-parent: 'key' is in format '<child> <parent>'
|
|
|
|
* pid: 'key' is the pid of the guest, and the record contains the
|
|
|
|
aggregated event data
|
|
|
|
These formats are generated by the providers, and handled in class TUI.
|
|
|
|
"""
|
2016-05-18 14:26:21 +03:00
|
|
|
for provider in self.providers:
|
2017-06-25 22:34:16 +03:00
|
|
|
new = provider.read(by_guest=by_guest)
|
2018-02-22 14:16:28 +03:00
|
|
|
for key in new:
|
2018-01-09 15:27:02 +03:00
|
|
|
oldval = self.values.get(key, EventStat(0, 0)).value
|
2016-05-18 14:26:21 +03:00
|
|
|
newval = new.get(key, 0)
|
2017-03-10 15:40:15 +03:00
|
|
|
newdelta = newval - oldval
|
2018-01-09 15:27:02 +03:00
|
|
|
self.values[key] = EventStat(newval, newdelta)
|
2016-05-18 14:26:21 +03:00
|
|
|
return self.values
|
|
|
|
|
2017-06-25 22:34:16 +03:00
|
|
|
def toggle_display_guests(self, to_pid):
|
|
|
|
"""Toggle between collection of stats by individual event and by
|
|
|
|
guest pid
|
|
|
|
|
|
|
|
Events reported by DebugfsProvider change when switching to/from
|
|
|
|
reading by guest values. Hence we have to remove the excess event
|
|
|
|
names from self.values.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if any(isinstance(ins, TracepointProvider) for ins in self.providers):
|
|
|
|
return 1
|
|
|
|
if to_pid:
|
|
|
|
for provider in self.providers:
|
|
|
|
if isinstance(provider, DebugfsProvider):
|
|
|
|
for key in provider.fields:
|
|
|
|
if key in self.values.keys():
|
|
|
|
del self.values[key]
|
|
|
|
else:
|
|
|
|
oldvals = self.values.copy()
|
|
|
|
for key in oldvals:
|
|
|
|
if key.isdigit():
|
|
|
|
del self.values[key]
|
|
|
|
# Update oldval (see get())
|
|
|
|
self.get(to_pid)
|
|
|
|
return 0
|
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
|
2017-06-07 22:08:39 +03:00
|
|
|
DELAY_DEFAULT = 3.0
|
2017-03-10 15:40:08 +03:00
|
|
|
MAX_GUEST_NAME_LEN = 48
|
2017-03-10 15:40:11 +03:00
|
|
|
MAX_REGEX_LEN = 44
|
2017-06-07 22:08:41 +03:00
|
|
|
SORT_DEFAULT = 0
|
2020-03-06 14:42:46 +03:00
|
|
|
MIN_DELAY = 0.1
|
|
|
|
MAX_DELAY = 25.5
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
class Tui(object):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Instruments curses to draw a nice text ui."""
|
2020-03-06 14:42:46 +03:00
|
|
|
def __init__(self, stats, opts):
|
2016-05-18 14:26:21 +03:00
|
|
|
self.stats = stats
|
|
|
|
self.screen = None
|
2017-06-07 22:08:39 +03:00
|
|
|
self._delay_initial = 0.25
|
2020-03-06 14:42:46 +03:00
|
|
|
self._delay_regular = opts.set_delay
|
2017-06-07 22:08:41 +03:00
|
|
|
self._sorting = SORT_DEFAULT
|
2017-06-25 22:34:16 +03:00
|
|
|
self._display_guests = 0
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
"""Initialises curses for later use. Based on curses.wrapper
|
|
|
|
implementation from the Python standard library."""
|
|
|
|
self.screen = curses.initscr()
|
|
|
|
curses.noecho()
|
|
|
|
curses.cbreak()
|
|
|
|
|
|
|
|
# The try/catch works around a minor bit of
|
|
|
|
# over-conscientiousness in the curses module, the error
|
|
|
|
# return from C start_color() is ignorable.
|
|
|
|
try:
|
|
|
|
curses.start_color()
|
2017-03-10 15:40:01 +03:00
|
|
|
except curses.error:
|
2016-05-18 14:26:21 +03:00
|
|
|
pass
|
|
|
|
|
2017-03-10 15:40:00 +03:00
|
|
|
# Hide cursor in extra statement as some monochrome terminals
|
|
|
|
# might support hiding but not colors.
|
|
|
|
try:
|
|
|
|
curses.curs_set(0)
|
|
|
|
except curses.error:
|
|
|
|
pass
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
curses.use_default_colors()
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, *exception):
|
2017-06-07 22:08:25 +03:00
|
|
|
"""Resets the terminal to its normal state. Based on curses.wrapper
|
2016-05-18 14:26:21 +03:00
|
|
|
implementation from the Python standard library."""
|
|
|
|
if self.screen:
|
|
|
|
self.screen.keypad(0)
|
|
|
|
curses.echo()
|
|
|
|
curses.nocbreak()
|
|
|
|
curses.endwin()
|
|
|
|
|
2017-12-11 14:25:19 +03:00
|
|
|
@staticmethod
|
|
|
|
def get_all_gnames():
|
2017-06-07 22:08:43 +03:00
|
|
|
"""Returns a list of (pid, gname) tuples of all running guests"""
|
|
|
|
res = []
|
2017-06-07 22:08:33 +03:00
|
|
|
try:
|
|
|
|
child = subprocess.Popen(['ps', '-A', '--format', 'pid,args'],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
except:
|
|
|
|
raise Exception
|
|
|
|
for line in child.stdout:
|
2017-10-04 06:08:11 +03:00
|
|
|
line = line.decode(ENCODING).lstrip().split(' ', 1)
|
2017-06-07 22:08:33 +03:00
|
|
|
# perform a sanity check before calling the more expensive
|
|
|
|
# function to possibly extract the guest name
|
2017-06-07 22:08:43 +03:00
|
|
|
if ' -name ' in line[1]:
|
2017-12-11 14:25:19 +03:00
|
|
|
res.append((line[0], Tui.get_gname_from_pid(line[0])))
|
2017-06-07 22:08:33 +03:00
|
|
|
child.stdout.close()
|
|
|
|
|
2017-06-07 22:08:43 +03:00
|
|
|
return res
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _print_all_gnames(self, row):
|
2017-06-07 22:08:43 +03:00
|
|
|
"""Print a list of all running guests along with their pids."""
|
|
|
|
self.screen.addstr(row, 2, '%8s %-60s' %
|
|
|
|
('Pid', 'Guest Name (fuzzy list, might be '
|
|
|
|
'inaccurate!)'),
|
|
|
|
curses.A_UNDERLINE)
|
|
|
|
row += 1
|
|
|
|
try:
|
|
|
|
for line in self.get_all_gnames():
|
|
|
|
self.screen.addstr(row, 2, '%8s %-60s' % (line[0], line[1]))
|
|
|
|
row += 1
|
|
|
|
if row >= self.screen.getmaxyx()[0]:
|
|
|
|
break
|
|
|
|
except Exception:
|
|
|
|
self.screen.addstr(row + 1, 2, 'Not available')
|
|
|
|
|
2017-12-11 14:25:19 +03:00
|
|
|
@staticmethod
|
|
|
|
def get_pid_from_gname(gname):
|
2017-06-07 22:08:43 +03:00
|
|
|
"""Fuzzy function to convert guest name to QEMU process pid.
|
|
|
|
|
|
|
|
Returns a list of potential pids, can be empty if no match found.
|
|
|
|
Throws an exception on processing errors.
|
|
|
|
|
|
|
|
"""
|
|
|
|
pids = []
|
2017-12-11 14:25:19 +03:00
|
|
|
for line in Tui.get_all_gnames():
|
2017-06-07 22:08:43 +03:00
|
|
|
if gname == line[1]:
|
|
|
|
pids.append(int(line[0]))
|
|
|
|
|
2017-06-07 22:08:33 +03:00
|
|
|
return pids
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_gname_from_pid(pid):
|
|
|
|
"""Returns the guest name for a QEMU process pid.
|
|
|
|
|
|
|
|
Extracts the guest name from the QEMU comma line by processing the
|
|
|
|
'-name' option. Will also handle names specified out of sequence.
|
|
|
|
|
|
|
|
"""
|
|
|
|
name = ''
|
|
|
|
try:
|
|
|
|
line = open('/proc/{}/cmdline'
|
2017-10-04 06:08:11 +03:00
|
|
|
.format(pid), 'r').read().split('\0')
|
2017-06-07 22:08:33 +03:00
|
|
|
parms = line[line.index('-name') + 1].split(',')
|
|
|
|
while '' in parms:
|
|
|
|
# commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results
|
|
|
|
# in # ['foo', '', 'bar'], which we revert here
|
|
|
|
idx = parms.index('')
|
|
|
|
parms[idx - 1] += ',' + parms[idx + 1]
|
|
|
|
del parms[idx:idx+2]
|
|
|
|
# the '-name' switch allows for two ways to specify the guest name,
|
|
|
|
# where the plain name overrides the name specified via 'guest='
|
|
|
|
for arg in parms:
|
|
|
|
if '=' not in arg:
|
|
|
|
name = arg
|
|
|
|
break
|
|
|
|
if arg[:6] == 'guest=':
|
|
|
|
name = arg[6:]
|
|
|
|
except (ValueError, IOError, IndexError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
return name
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _update_pid(self, pid):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Propagates pid selection to stats object."""
|
2018-02-22 14:16:27 +03:00
|
|
|
self.screen.addstr(4, 1, 'Updating pid filter...')
|
|
|
|
self.screen.refresh()
|
2016-05-18 14:26:24 +03:00
|
|
|
self.stats.pid_filter = pid
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _refresh_header(self, pid=None):
|
2017-03-10 15:40:06 +03:00
|
|
|
"""Refreshes the header."""
|
|
|
|
if pid is None:
|
|
|
|
pid = self.stats.pid_filter
|
2016-05-18 14:26:21 +03:00
|
|
|
self.screen.erase()
|
2017-06-07 22:08:33 +03:00
|
|
|
gname = self.get_gname_from_pid(pid)
|
2018-08-24 15:04:01 +03:00
|
|
|
self._gname = gname
|
2017-03-10 15:40:08 +03:00
|
|
|
if gname:
|
|
|
|
gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
|
|
|
|
if len(gname) > MAX_GUEST_NAME_LEN
|
|
|
|
else gname))
|
2017-03-10 15:40:06 +03:00
|
|
|
if pid > 0:
|
2018-08-24 15:04:00 +03:00
|
|
|
self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
|
2016-05-18 14:26:24 +03:00
|
|
|
else:
|
2018-08-24 15:04:00 +03:00
|
|
|
self._headline = 'kvm statistics - summary'
|
|
|
|
self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
|
2018-02-22 14:16:28 +03:00
|
|
|
if self.stats.fields_filter:
|
2017-03-10 15:40:11 +03:00
|
|
|
regex = self.stats.fields_filter
|
|
|
|
if len(regex) > MAX_REGEX_LEN:
|
|
|
|
regex = regex[:MAX_REGEX_LEN] + '...'
|
|
|
|
self.screen.addstr(1, 17, 'regex filter: {0}'.format(regex))
|
2017-06-25 22:34:16 +03:00
|
|
|
if self._display_guests:
|
|
|
|
col_name = 'Guest Name'
|
|
|
|
else:
|
|
|
|
col_name = 'Event'
|
2017-06-07 22:08:37 +03:00
|
|
|
self.screen.addstr(2, 1, '%-40s %10s%7s %8s' %
|
2017-06-25 22:34:16 +03:00
|
|
|
(col_name, 'Total', '%Total', 'CurAvg/s'),
|
2017-06-07 22:08:36 +03:00
|
|
|
curses.A_STANDOUT)
|
2017-03-10 15:40:06 +03:00
|
|
|
self.screen.addstr(4, 1, 'Collecting data...')
|
|
|
|
self.screen.refresh()
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _refresh_body(self, sleeptime):
|
2018-02-22 14:16:29 +03:00
|
|
|
def insert_child(sorted_items, child, values, parent):
|
|
|
|
num = len(sorted_items)
|
|
|
|
for i in range(0, num):
|
|
|
|
# only add child if parent is present
|
|
|
|
if parent.startswith(sorted_items[i][0]):
|
|
|
|
sorted_items.insert(i + 1, (' ' + child, values))
|
|
|
|
|
|
|
|
def get_sorted_events(self, stats):
|
|
|
|
""" separate parent and child events """
|
|
|
|
if self._sorting == SORT_DEFAULT:
|
2018-03-24 01:07:16 +03:00
|
|
|
def sortkey(pair):
|
2018-02-22 14:16:29 +03:00
|
|
|
# sort by (delta value, overall value)
|
2018-03-24 01:07:16 +03:00
|
|
|
v = pair[1]
|
2018-02-22 14:16:29 +03:00
|
|
|
return (v.delta, v.value)
|
|
|
|
else:
|
2018-03-24 01:07:16 +03:00
|
|
|
def sortkey(pair):
|
2018-02-22 14:16:29 +03:00
|
|
|
# sort by overall value
|
2018-03-24 01:07:16 +03:00
|
|
|
v = pair[1]
|
2018-02-22 14:16:29 +03:00
|
|
|
return v.value
|
|
|
|
|
|
|
|
childs = []
|
|
|
|
sorted_items = []
|
|
|
|
# we can't rule out child events to appear prior to parents even
|
|
|
|
# when sorted - separate out all children first, and add in later
|
|
|
|
for key, values in sorted(stats.items(), key=sortkey,
|
|
|
|
reverse=True):
|
|
|
|
if values == (0, 0):
|
|
|
|
continue
|
|
|
|
if key.find(' ') != -1:
|
|
|
|
if not self.stats.child_events:
|
|
|
|
continue
|
|
|
|
childs.insert(0, (key, values))
|
|
|
|
else:
|
|
|
|
sorted_items.append((key, values))
|
|
|
|
if self.stats.child_events:
|
|
|
|
for key, values in childs:
|
|
|
|
(child, parent) = key.split(' ')
|
|
|
|
insert_child(sorted_items, child, values, parent)
|
|
|
|
|
|
|
|
return sorted_items
|
|
|
|
|
2018-08-24 15:03:57 +03:00
|
|
|
if not self._is_running_guest(self.stats.pid_filter):
|
2018-08-24 15:04:01 +03:00
|
|
|
if self._gname:
|
2020-03-06 14:42:44 +03:00
|
|
|
try: # ...to identify the guest by name in case it's back
|
2018-08-24 15:04:01 +03:00
|
|
|
pids = self.get_pid_from_gname(self._gname)
|
|
|
|
if len(pids) == 1:
|
|
|
|
self._refresh_header(pids[0])
|
|
|
|
self._update_pid(pids[0])
|
|
|
|
return
|
|
|
|
except:
|
|
|
|
pass
|
2018-08-24 15:04:00 +03:00
|
|
|
self._display_guest_dead()
|
2018-08-24 15:03:57 +03:00
|
|
|
# leave final data on screen
|
|
|
|
return
|
2016-05-18 14:26:21 +03:00
|
|
|
row = 3
|
2017-03-10 15:40:06 +03:00
|
|
|
self.screen.move(row, 0)
|
|
|
|
self.screen.clrtobot()
|
2017-06-25 22:34:16 +03:00
|
|
|
stats = self.stats.get(self._display_guests)
|
2017-03-10 15:40:16 +03:00
|
|
|
total = 0.
|
2018-02-05 15:59:57 +03:00
|
|
|
ctotal = 0.
|
2018-01-09 15:27:03 +03:00
|
|
|
for key, values in stats.items():
|
2018-02-22 14:16:28 +03:00
|
|
|
if self._display_guests:
|
|
|
|
if self.get_gname_from_pid(key):
|
|
|
|
total += values.value
|
|
|
|
continue
|
|
|
|
if not key.find(' ') != -1:
|
2018-01-09 15:27:03 +03:00
|
|
|
total += values.value
|
2018-02-05 15:59:57 +03:00
|
|
|
else:
|
|
|
|
ctotal += values.value
|
|
|
|
if total == 0.:
|
|
|
|
# we don't have any fields, or all non-child events are filtered
|
|
|
|
total = ctotal
|
2018-01-09 15:27:03 +03:00
|
|
|
|
2018-02-22 14:16:28 +03:00
|
|
|
# print events
|
2017-12-11 14:25:29 +03:00
|
|
|
tavg = 0
|
2018-02-22 14:16:29 +03:00
|
|
|
tcur = 0
|
2018-08-24 15:03:59 +03:00
|
|
|
guest_removed = False
|
2018-02-22 14:16:29 +03:00
|
|
|
for key, values in get_sorted_events(self, stats):
|
|
|
|
if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
|
2016-05-18 14:26:21 +03:00
|
|
|
break
|
2018-02-22 14:16:29 +03:00
|
|
|
if self._display_guests:
|
|
|
|
key = self.get_gname_from_pid(key)
|
|
|
|
if not key:
|
|
|
|
continue
|
2018-08-24 15:03:59 +03:00
|
|
|
cur = int(round(values.delta / sleeptime)) if values.delta else 0
|
|
|
|
if cur < 0:
|
|
|
|
guest_removed = True
|
|
|
|
continue
|
2018-02-22 14:16:29 +03:00
|
|
|
if key[0] != ' ':
|
|
|
|
if values.delta:
|
|
|
|
tcur += values.delta
|
|
|
|
ptotal = values.value
|
|
|
|
ltotal = total
|
|
|
|
else:
|
|
|
|
ltotal = ptotal
|
|
|
|
self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key,
|
|
|
|
values.value,
|
|
|
|
values.value * 100 / float(ltotal), cur))
|
2016-05-18 14:26:21 +03:00
|
|
|
row += 1
|
2017-06-07 22:08:35 +03:00
|
|
|
if row == 3:
|
2018-08-24 15:03:59 +03:00
|
|
|
if guest_removed:
|
|
|
|
self.screen.addstr(4, 1, 'Guest removed, updating...')
|
|
|
|
else:
|
|
|
|
self.screen.addstr(4, 1, 'No matching events reported yet')
|
2018-02-22 14:16:30 +03:00
|
|
|
if row > 4:
|
2018-02-22 14:16:29 +03:00
|
|
|
tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
|
2017-12-11 14:25:29 +03:00
|
|
|
self.screen.addstr(row, 1, '%-40s %10d %8s' %
|
2018-02-22 14:16:29 +03:00
|
|
|
('Total', total, tavg), curses.A_BOLD)
|
2016-05-18 14:26:21 +03:00
|
|
|
self.screen.refresh()
|
|
|
|
|
2018-08-24 15:04:00 +03:00
|
|
|
def _display_guest_dead(self):
|
|
|
|
marker = ' Guest is DEAD '
|
|
|
|
y = min(len(self._headline), 80 - len(marker))
|
|
|
|
self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _show_msg(self, text):
|
2017-06-25 22:34:16 +03:00
|
|
|
"""Display message centered text and exit on key press"""
|
|
|
|
hint = 'Press any key to continue'
|
|
|
|
curses.cbreak()
|
|
|
|
self.screen.erase()
|
|
|
|
(x, term_width) = self.screen.getmaxyx()
|
|
|
|
row = 2
|
|
|
|
for line in text:
|
2018-08-24 15:03:55 +03:00
|
|
|
start = (term_width - len(line)) // 2
|
2017-06-25 22:34:16 +03:00
|
|
|
self.screen.addstr(row, start, line)
|
|
|
|
row += 1
|
2018-08-24 15:03:55 +03:00
|
|
|
self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
|
2017-06-25 22:34:16 +03:00
|
|
|
curses.A_STANDOUT)
|
|
|
|
self.screen.getkey()
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _show_help_interactive(self):
|
2017-06-07 22:08:38 +03:00
|
|
|
"""Display help with list of interactive commands"""
|
2017-06-25 22:34:16 +03:00
|
|
|
msg = (' b toggle events by guests (debugfs only, honors'
|
|
|
|
' filters)',
|
|
|
|
' c clear filter',
|
2017-06-07 22:08:38 +03:00
|
|
|
' f filter by regular expression',
|
2018-02-22 14:16:27 +03:00
|
|
|
' g filter by guest name/PID',
|
2017-06-07 22:08:38 +03:00
|
|
|
' h display interactive commands reference',
|
2017-06-07 22:08:41 +03:00
|
|
|
' o toggle sorting order (Total vs CurAvg/s)',
|
2018-02-22 14:16:27 +03:00
|
|
|
' p filter by guest name/PID',
|
2017-06-07 22:08:38 +03:00
|
|
|
' q quit',
|
|
|
|
' r reset stats',
|
2020-03-06 14:42:46 +03:00
|
|
|
' s set delay between refreshs (value range: '
|
|
|
|
'%s-%s secs)' % (MIN_DELAY, MAX_DELAY),
|
2017-06-07 22:08:38 +03:00
|
|
|
' x toggle reporting of stats for individual child trace'
|
|
|
|
' events',
|
|
|
|
'Any other key refreshes statistics immediately')
|
|
|
|
curses.cbreak()
|
|
|
|
self.screen.erase()
|
|
|
|
self.screen.addstr(0, 0, "Interactive commands reference",
|
|
|
|
curses.A_BOLD)
|
|
|
|
self.screen.addstr(2, 0, "Press any key to exit", curses.A_STANDOUT)
|
|
|
|
row = 4
|
|
|
|
for line in msg:
|
|
|
|
self.screen.addstr(row, 0, line)
|
|
|
|
row += 1
|
|
|
|
self.screen.getkey()
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header()
|
2017-06-07 22:08:38 +03:00
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _show_filter_selection(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Draws filter selection mask.
|
|
|
|
|
|
|
|
Asks for a valid regex and sets the fields filter accordingly.
|
|
|
|
|
|
|
|
"""
|
2018-02-05 15:59:58 +03:00
|
|
|
msg = ''
|
2016-05-18 14:26:21 +03:00
|
|
|
while True:
|
|
|
|
self.screen.erase()
|
|
|
|
self.screen.addstr(0, 0,
|
|
|
|
"Show statistics for events matching a regex.",
|
|
|
|
curses.A_BOLD)
|
|
|
|
self.screen.addstr(2, 0,
|
|
|
|
"Current regex: {0}"
|
|
|
|
.format(self.stats.fields_filter))
|
2018-02-05 15:59:58 +03:00
|
|
|
self.screen.addstr(5, 0, msg)
|
2016-05-18 14:26:21 +03:00
|
|
|
self.screen.addstr(3, 0, "New regex: ")
|
|
|
|
curses.echo()
|
2017-10-04 06:08:11 +03:00
|
|
|
regex = self.screen.getstr().decode(ENCODING)
|
2016-05-18 14:26:21 +03:00
|
|
|
curses.noecho()
|
|
|
|
if len(regex) == 0:
|
2018-02-22 14:16:28 +03:00
|
|
|
self.stats.fields_filter = ''
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header()
|
2016-05-18 14:26:21 +03:00
|
|
|
return
|
|
|
|
try:
|
|
|
|
re.compile(regex)
|
|
|
|
self.stats.fields_filter = regex
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header()
|
2016-05-18 14:26:21 +03:00
|
|
|
return
|
|
|
|
except re.error:
|
2018-02-05 15:59:58 +03:00
|
|
|
msg = '"' + regex + '": Not a valid regular expression'
|
2016-05-18 14:26:21 +03:00
|
|
|
continue
|
|
|
|
|
2018-02-22 14:16:26 +03:00
|
|
|
def _show_set_update_interval(self):
|
2017-06-07 22:08:39 +03:00
|
|
|
"""Draws update interval selection mask."""
|
|
|
|
msg = ''
|
|
|
|
while True:
|
|
|
|
self.screen.erase()
|
2020-03-06 14:42:44 +03:00
|
|
|
self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).'
|
|
|
|
% DELAY_DEFAULT, curses.A_BOLD)
|
2017-06-07 22:08:39 +03:00
|
|
|
self.screen.addstr(4, 0, msg)
|
|
|
|
self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
|
|
|
|
self._delay_regular)
|
|
|
|
curses.echo()
|
2017-10-04 06:08:11 +03:00
|
|
|
val = self.screen.getstr().decode(ENCODING)
|
2017-06-07 22:08:39 +03:00
|
|
|
curses.noecho()
|
|
|
|
|
|
|
|
try:
|
|
|
|
if len(val) > 0:
|
|
|
|
delay = float(val)
|
2020-03-06 14:42:46 +03:00
|
|
|
err = is_delay_valid(delay)
|
|
|
|
if err is not None:
|
|
|
|
msg = err
|
2017-06-07 22:08:39 +03:00
|
|
|
continue
|
|
|
|
else:
|
|
|
|
delay = DELAY_DEFAULT
|
|
|
|
self._delay_regular = delay
|
|
|
|
break
|
|
|
|
|
|
|
|
except ValueError:
|
|
|
|
msg = '"' + str(val) + '": Invalid value'
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header()
|
2017-06-07 22:08:39 +03:00
|
|
|
|
2018-08-24 15:03:57 +03:00
|
|
|
def _is_running_guest(self, pid):
|
|
|
|
"""Check if pid is still a running process."""
|
|
|
|
if not pid:
|
|
|
|
return True
|
|
|
|
return os.path.isdir(os.path.join('/proc/', str(pid)))
|
|
|
|
|
2018-02-22 14:16:27 +03:00
|
|
|
def _show_vm_selection_by_guest(self):
|
2017-03-10 15:40:13 +03:00
|
|
|
"""Draws guest selection mask.
|
|
|
|
|
2018-02-22 14:16:27 +03:00
|
|
|
Asks for a guest name or pid until a valid guest name or '' is entered.
|
2017-03-10 15:40:13 +03:00
|
|
|
|
|
|
|
"""
|
|
|
|
msg = ''
|
|
|
|
while True:
|
|
|
|
self.screen.erase()
|
|
|
|
self.screen.addstr(0, 0,
|
2018-02-22 14:16:27 +03:00
|
|
|
'Show statistics for specific guest or pid.',
|
2017-03-10 15:40:13 +03:00
|
|
|
curses.A_BOLD)
|
|
|
|
self.screen.addstr(1, 0,
|
|
|
|
'This might limit the shown data to the trace '
|
|
|
|
'statistics.')
|
|
|
|
self.screen.addstr(5, 0, msg)
|
2018-02-22 14:16:26 +03:00
|
|
|
self._print_all_gnames(7)
|
2017-03-10 15:40:13 +03:00
|
|
|
curses.echo()
|
2018-02-22 14:16:27 +03:00
|
|
|
curses.curs_set(1)
|
|
|
|
self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ")
|
|
|
|
guest = self.screen.getstr().decode(ENCODING)
|
2017-03-10 15:40:13 +03:00
|
|
|
curses.noecho()
|
|
|
|
|
2018-02-22 14:16:27 +03:00
|
|
|
pid = 0
|
|
|
|
if not guest or guest == '0':
|
2017-03-10 15:40:13 +03:00
|
|
|
break
|
2018-02-22 14:16:27 +03:00
|
|
|
if guest.isdigit():
|
2018-08-24 15:03:57 +03:00
|
|
|
if not self._is_running_guest(guest):
|
2018-02-22 14:16:27 +03:00
|
|
|
msg = '"' + guest + '": Not a running process'
|
2017-03-10 15:40:13 +03:00
|
|
|
continue
|
2018-02-22 14:16:27 +03:00
|
|
|
pid = int(guest)
|
2017-03-10 15:40:13 +03:00
|
|
|
break
|
2018-02-22 14:16:27 +03:00
|
|
|
pids = []
|
|
|
|
try:
|
|
|
|
pids = self.get_pid_from_gname(guest)
|
|
|
|
except:
|
|
|
|
msg = '"' + guest + '": Internal error while searching, ' \
|
|
|
|
'use pid filter instead'
|
|
|
|
continue
|
|
|
|
if len(pids) == 0:
|
|
|
|
msg = '"' + guest + '": Not an active guest'
|
|
|
|
continue
|
|
|
|
if len(pids) > 1:
|
|
|
|
msg = '"' + guest + '": Multiple matches found, use pid ' \
|
|
|
|
'filter instead'
|
|
|
|
continue
|
|
|
|
pid = pids[0]
|
|
|
|
break
|
|
|
|
curses.curs_set(0)
|
|
|
|
self._refresh_header(pid)
|
|
|
|
self._update_pid(pid)
|
2017-03-10 15:40:13 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
def show_stats(self):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Refreshes the screen and processes user input."""
|
2017-06-07 22:08:39 +03:00
|
|
|
sleeptime = self._delay_initial
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header()
|
2017-06-07 22:08:26 +03:00
|
|
|
start = 0.0 # result based on init value never appears on screen
|
2016-05-18 14:26:21 +03:00
|
|
|
while True:
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_body(time.time() - start)
|
2016-05-18 14:26:21 +03:00
|
|
|
curses.halfdelay(int(sleeptime * 10))
|
2017-06-07 22:08:26 +03:00
|
|
|
start = time.time()
|
2017-06-07 22:08:39 +03:00
|
|
|
sleeptime = self._delay_regular
|
2016-05-18 14:26:21 +03:00
|
|
|
try:
|
|
|
|
char = self.screen.getkey()
|
2017-06-25 22:34:16 +03:00
|
|
|
if char == 'b':
|
|
|
|
self._display_guests = not self._display_guests
|
|
|
|
if self.stats.toggle_display_guests(self._display_guests):
|
2018-02-22 14:16:26 +03:00
|
|
|
self._show_msg(['Command not available with '
|
|
|
|
'tracepoints enabled', 'Restart with '
|
|
|
|
'debugfs only (see option \'-d\') and '
|
|
|
|
'try again!'])
|
2017-06-25 22:34:16 +03:00
|
|
|
self._display_guests = not self._display_guests
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header()
|
2017-03-10 15:40:14 +03:00
|
|
|
if char == 'c':
|
2018-02-22 14:16:28 +03:00
|
|
|
self.stats.fields_filter = ''
|
2018-02-22 14:16:26 +03:00
|
|
|
self._refresh_header(0)
|
|
|
|
self._update_pid(0)
|
2016-05-18 14:26:21 +03:00
|
|
|
if char == 'f':
|
2017-06-07 22:08:34 +03:00
|
|
|
curses.curs_set(1)
|
2018-02-22 14:16:26 +03:00
|
|
|
self._show_filter_selection()
|
2017-06-07 22:08:34 +03:00
|
|
|
curses.curs_set(0)
|
2017-06-07 22:08:39 +03:00
|
|
|
sleeptime = self._delay_initial
|
2018-02-22 14:16:27 +03:00
|
|
|
if char == 'g' or char == 'p':
|
|
|
|
self._show_vm_selection_by_guest()
|
2017-06-07 22:08:39 +03:00
|
|
|
sleeptime = self._delay_initial
|
2017-06-07 22:08:38 +03:00
|
|
|
if char == 'h':
|
2018-02-22 14:16:26 +03:00
|
|
|
self._show_help_interactive()
|
2017-06-07 22:08:41 +03:00
|
|
|
if char == 'o':
|
|
|
|
self._sorting = not self._sorting
|
2017-06-07 22:08:38 +03:00
|
|
|
if char == 'q':
|
|
|
|
break
|
2017-03-10 15:40:15 +03:00
|
|
|
if char == 'r':
|
|
|
|
self.stats.reset()
|
2017-06-07 22:08:39 +03:00
|
|
|
if char == 's':
|
|
|
|
curses.curs_set(1)
|
2018-02-22 14:16:26 +03:00
|
|
|
self._show_set_update_interval()
|
2017-06-07 22:08:39 +03:00
|
|
|
curses.curs_set(0)
|
|
|
|
sleeptime = self._delay_initial
|
2017-06-07 22:08:38 +03:00
|
|
|
if char == 'x':
|
2018-02-22 14:16:28 +03:00
|
|
|
self.stats.child_events = not self.stats.child_events
|
2016-05-18 14:26:21 +03:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
break
|
|
|
|
except curses.error:
|
|
|
|
continue
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
def batch(stats):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Prints statistics in a key, value format."""
|
2017-03-10 15:40:02 +03:00
|
|
|
try:
|
|
|
|
s = stats.get()
|
|
|
|
time.sleep(1)
|
|
|
|
s = stats.get()
|
2018-01-09 15:27:03 +03:00
|
|
|
for key, values in sorted(s.items()):
|
2018-02-22 14:16:28 +03:00
|
|
|
print('%-42s%10d%10d' % (key.split(' ')[0], values.value,
|
|
|
|
values.delta))
|
2017-03-10 15:40:02 +03:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
pass
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2020-03-06 14:42:47 +03:00
|
|
|
class StdFormat(object):
|
|
|
|
def __init__(self, keys):
|
|
|
|
self._banner = ''
|
2018-01-09 15:27:03 +03:00
|
|
|
for key in keys:
|
2020-03-06 14:42:47 +03:00
|
|
|
self._banner += key.split(' ')[0] + ' '
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2020-03-06 14:42:47 +03:00
|
|
|
def get_banner(self):
|
|
|
|
return self._banner
|
|
|
|
|
2020-04-02 11:57:03 +03:00
|
|
|
def get_statline(self, keys, s):
|
2020-03-06 14:42:47 +03:00
|
|
|
res = ''
|
2018-01-09 15:27:03 +03:00
|
|
|
for key in keys:
|
2020-03-06 14:42:47 +03:00
|
|
|
res += ' %9d' % s[key].delta
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
|
|
class CSVFormat(object):
|
|
|
|
def __init__(self, keys):
|
|
|
|
self._banner = 'timestamp'
|
|
|
|
self._banner += reduce(lambda res, key: "{},{!s}".format(res,
|
|
|
|
key.split(' ')[0]), keys, '')
|
|
|
|
|
|
|
|
def get_banner(self):
|
|
|
|
return self._banner
|
|
|
|
|
2020-04-02 11:57:03 +03:00
|
|
|
def get_statline(self, keys, s):
|
2020-03-06 14:42:47 +03:00
|
|
|
return reduce(lambda res, key: "{},{!s}".format(res, s[key].delta),
|
|
|
|
keys, '')
|
|
|
|
|
|
|
|
|
|
|
|
def log(stats, opts, frmt, keys):
|
|
|
|
"""Prints statistics as reiterating key block, multiple value blocks."""
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
global signal_received
|
2016-05-18 14:26:21 +03:00
|
|
|
line = 0
|
|
|
|
banner_repeat = 20
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
f = None
|
|
|
|
|
|
|
|
def do_banner(opts):
|
|
|
|
nonlocal f
|
|
|
|
if opts.log_to_file:
|
|
|
|
if not f:
|
|
|
|
try:
|
|
|
|
f = open(opts.log_to_file, 'a')
|
|
|
|
except (IOError, OSError):
|
|
|
|
sys.exit("Error: Could not open file: %s" %
|
|
|
|
opts.log_to_file)
|
|
|
|
if isinstance(frmt, CSVFormat) and f.tell() != 0:
|
|
|
|
return
|
|
|
|
print(frmt.get_banner(), file=f or sys.stdout)
|
|
|
|
|
|
|
|
def do_statline(opts, values):
|
|
|
|
statline = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + \
|
|
|
|
frmt.get_statline(keys, values)
|
|
|
|
print(statline, file=f or sys.stdout)
|
|
|
|
|
|
|
|
do_banner(opts)
|
|
|
|
banner_printed = True
|
2016-05-18 14:26:21 +03:00
|
|
|
while True:
|
2017-03-10 15:40:02 +03:00
|
|
|
try:
|
2020-03-06 14:42:46 +03:00
|
|
|
time.sleep(opts.set_delay)
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
if signal_received:
|
|
|
|
banner_printed = True
|
|
|
|
line = 0
|
|
|
|
f.close()
|
|
|
|
do_banner(opts)
|
|
|
|
signal_received = False
|
|
|
|
if (line % banner_repeat == 0 and not banner_printed and
|
|
|
|
not (opts.log_to_file and isinstance(frmt, CSVFormat))):
|
|
|
|
do_banner(opts)
|
2020-04-02 11:57:03 +03:00
|
|
|
banner_printed = True
|
|
|
|
values = stats.get()
|
|
|
|
if (not opts.skip_zero_records or
|
|
|
|
any(values[k].delta != 0 for k in keys)):
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
do_statline(opts, values)
|
2020-04-02 11:57:03 +03:00
|
|
|
line += 1
|
|
|
|
banner_printed = False
|
2017-03-10 15:40:02 +03:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
break
|
2016-05-18 14:26:21 +03:00
|
|
|
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
if opts.log_to_file:
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
|
|
def handle_signal(sig, frame):
|
|
|
|
global signal_received
|
|
|
|
|
|
|
|
signal_received = True
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2020-03-06 14:42:46 +03:00
|
|
|
def is_delay_valid(delay):
|
|
|
|
"""Verify delay is in valid value range."""
|
|
|
|
msg = None
|
|
|
|
if delay < MIN_DELAY:
|
|
|
|
msg = '"' + str(delay) + '": Delay must be >=%s' % MIN_DELAY
|
|
|
|
if delay > MAX_DELAY:
|
|
|
|
msg = '"' + str(delay) + '": Delay must be <=%s' % MAX_DELAY
|
|
|
|
return msg
|
|
|
|
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
def get_options():
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Returns processed program arguments."""
|
2016-05-18 14:26:21 +03:00
|
|
|
description_text = """
|
|
|
|
This script displays various statistics about VMs running under KVM.
|
|
|
|
The statistics are gathered from the KVM debugfs entries and / or the
|
|
|
|
currently available perf traces.
|
|
|
|
|
|
|
|
The monitoring takes additional cpu cycles and might affect the VM's
|
|
|
|
performance.
|
|
|
|
|
|
|
|
Requirements:
|
|
|
|
- Access to:
|
2017-07-25 14:05:53 +03:00
|
|
|
%s
|
|
|
|
%s/events/*
|
2016-05-18 14:26:21 +03:00
|
|
|
/proc/pid/task
|
|
|
|
- /proc/sys/kernel/perf_event_paranoid < 1 if user has no
|
|
|
|
CAP_SYS_ADMIN and perf events are used.
|
|
|
|
- CAP_SYS_RESOURCE if the hard limit is not high enough to allow
|
|
|
|
the large number of files that are possibly opened.
|
2017-03-10 15:40:07 +03:00
|
|
|
|
|
|
|
Interactive Commands:
|
2017-06-25 22:34:16 +03:00
|
|
|
b toggle events by guests (debugfs only, honors filters)
|
2017-03-10 15:40:14 +03:00
|
|
|
c clear filter
|
2017-03-10 15:40:07 +03:00
|
|
|
f filter by regular expression
|
2017-03-10 15:40:13 +03:00
|
|
|
g filter by guest name
|
2017-06-07 22:08:38 +03:00
|
|
|
h display interactive commands reference
|
2017-06-07 22:08:41 +03:00
|
|
|
o toggle sorting order (Total vs CurAvg/s)
|
2017-03-10 15:40:07 +03:00
|
|
|
p filter by PID
|
|
|
|
q quit
|
2017-03-10 15:40:15 +03:00
|
|
|
r reset stats
|
2020-03-06 14:42:44 +03:00
|
|
|
s set update interval (value range: 0.1-25.5 secs)
|
2017-06-07 22:08:38 +03:00
|
|
|
x toggle reporting of stats for individual child trace events
|
2017-03-10 15:40:07 +03:00
|
|
|
Press any other key to refresh statistics immediately.
|
2017-07-25 14:05:53 +03:00
|
|
|
""" % (PATH_DEBUGFS_KVM, PATH_DEBUGFS_TRACING)
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2020-03-06 14:42:45 +03:00
|
|
|
class Guest_to_pid(argparse.Action):
|
|
|
|
def __call__(self, parser, namespace, values, option_string=None):
|
|
|
|
try:
|
|
|
|
pids = Tui.get_pid_from_gname(values)
|
|
|
|
except:
|
|
|
|
sys.exit('Error while searching for guest "{}". Use "-p" to '
|
|
|
|
'specify a pid instead?'.format(values))
|
|
|
|
if len(pids) == 0:
|
|
|
|
sys.exit('Error: No guest by the name "{}" found'
|
|
|
|
.format(values))
|
|
|
|
if len(pids) > 1:
|
|
|
|
sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
|
tools/kvm_stat: fix display of error when multiple processes are found
[ Upstream commit 933b5f9f98da29af646b51b36a0753692908ef64 ]
Instead of printing an error message, kvm_stat script fails when we
restrict statistics to a guest by its name and there are multiple guests
with such name:
# kvm_stat -g my_vm
Traceback (most recent call last):
File "/usr/bin/kvm_stat", line 1819, in <module>
main()
File "/usr/bin/kvm_stat", line 1779, in main
options = get_options()
File "/usr/bin/kvm_stat", line 1718, in get_options
options = argparser.parse_args()
File "/usr/lib64/python3.10/argparse.py", line 1825, in parse_args
args, argv = self.parse_known_args(args, namespace)
File "/usr/lib64/python3.10/argparse.py", line 1858, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib64/python3.10/argparse.py", line 2067, in _parse_known_args
start_index = consume_optional(start_index)
File "/usr/lib64/python3.10/argparse.py", line 2007, in consume_optional
take_action(action, args, option_string)
File "/usr/lib64/python3.10/argparse.py", line 1935, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/bin/kvm_stat", line 1649, in __call__
' to specify the desired pid'.format(" ".join(pids)))
TypeError: sequence item 0: expected str instance, int found
To avoid this, it's needed to convert pids int values to strings before
pass them to join().
Signed-off-by: Dmitry Klochkov <kdmitry556@gmail.com>
Message-Id: <20220614121141.160689-1-kdmitry556@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2022-06-14 15:11:41 +03:00
|
|
|
' to specify the desired pid'
|
|
|
|
.format(" ".join(map(str, pids))))
|
2020-03-06 14:42:45 +03:00
|
|
|
namespace.pid = pids[0]
|
|
|
|
|
|
|
|
argparser = argparse.ArgumentParser(description=description_text,
|
|
|
|
formatter_class=argparse
|
|
|
|
.RawTextHelpFormatter)
|
|
|
|
argparser.add_argument('-1', '--once', '--batch',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='run in batch mode for one second',
|
|
|
|
)
|
2020-03-06 14:42:47 +03:00
|
|
|
argparser.add_argument('-c', '--csv',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
help='log in csv format - requires option -l/-L',
|
2020-03-06 14:42:47 +03:00
|
|
|
)
|
2020-03-06 14:42:45 +03:00
|
|
|
argparser.add_argument('-d', '--debugfs',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='retrieve statistics from debugfs',
|
|
|
|
)
|
|
|
|
argparser.add_argument('-f', '--fields',
|
|
|
|
default='',
|
|
|
|
help='''fields to display (regex)
|
|
|
|
"-f help" for a list of available events''',
|
|
|
|
)
|
|
|
|
argparser.add_argument('-g', '--guest',
|
|
|
|
type=str,
|
|
|
|
help='restrict statistics to guest by name',
|
|
|
|
action=Guest_to_pid,
|
|
|
|
)
|
|
|
|
argparser.add_argument('-i', '--debugfs-include-past',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='include all available data on past events for'
|
|
|
|
' debugfs',
|
|
|
|
)
|
|
|
|
argparser.add_argument('-l', '--log',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='run in logging mode (like vmstat)',
|
|
|
|
)
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
argparser.add_argument('-L', '--log-to-file',
|
|
|
|
type=str,
|
|
|
|
metavar='FILE',
|
|
|
|
help="like '--log', but logging to a file"
|
|
|
|
)
|
2020-03-06 14:42:45 +03:00
|
|
|
argparser.add_argument('-p', '--pid',
|
|
|
|
type=int,
|
|
|
|
default=0,
|
|
|
|
help='restrict statistics to pid',
|
|
|
|
)
|
2020-03-06 14:42:46 +03:00
|
|
|
argparser.add_argument('-s', '--set-delay',
|
|
|
|
type=float,
|
|
|
|
default=DELAY_DEFAULT,
|
|
|
|
metavar='DELAY',
|
|
|
|
help='set delay between refreshs (value range: '
|
|
|
|
'%s-%s secs)' % (MIN_DELAY, MAX_DELAY),
|
|
|
|
)
|
2020-03-06 14:42:45 +03:00
|
|
|
argparser.add_argument('-t', '--tracepoints',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='retrieve statistics from tracepoints',
|
|
|
|
)
|
2020-04-02 11:57:03 +03:00
|
|
|
argparser.add_argument('-z', '--skip-zero-records',
|
|
|
|
action='store_true',
|
|
|
|
default=False,
|
|
|
|
help='omit records with all zeros in logging mode',
|
|
|
|
)
|
2020-03-06 14:42:45 +03:00
|
|
|
options = argparser.parse_args()
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
if options.csv and not (options.log or options.log_to_file):
|
2020-03-06 14:42:47 +03:00
|
|
|
sys.exit('Error: Option -c/--csv requires -l/--log')
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
if options.skip_zero_records and not (options.log or options.log_to_file):
|
|
|
|
sys.exit('Error: Option -z/--skip-zero-records requires -l/-L')
|
2017-12-11 14:25:25 +03:00
|
|
|
try:
|
|
|
|
# verify that we were passed a valid regex up front
|
|
|
|
re.compile(options.fields)
|
|
|
|
except re.error:
|
|
|
|
sys.exit('Error: "' + options.fields + '" is not a valid regular '
|
|
|
|
'expression')
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
return options
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
def check_access(options):
|
2016-05-18 14:26:25 +03:00
|
|
|
"""Exits if the current user can't access all needed directories."""
|
2017-03-10 15:40:03 +03:00
|
|
|
if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or
|
|
|
|
not options.debugfs):
|
2016-05-18 14:26:21 +03:00
|
|
|
sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
|
|
|
|
"when using the option -t (default).\n"
|
|
|
|
"If it is enabled, make {0} readable by the "
|
|
|
|
"current user.\n"
|
|
|
|
.format(PATH_DEBUGFS_TRACING))
|
|
|
|
if options.tracepoints:
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
sys.stderr.write("Falling back to debugfs statistics!\n")
|
|
|
|
options.debugfs = True
|
2017-03-10 15:40:03 +03:00
|
|
|
time.sleep(5)
|
2016-05-18 14:26:21 +03:00
|
|
|
|
|
|
|
return options
|
|
|
|
|
2017-03-10 15:40:05 +03:00
|
|
|
|
2018-02-22 14:16:24 +03:00
|
|
|
def assign_globals():
|
|
|
|
global PATH_DEBUGFS_KVM
|
|
|
|
global PATH_DEBUGFS_TRACING
|
|
|
|
|
|
|
|
debugfs = ''
|
2018-03-24 01:07:17 +03:00
|
|
|
for line in open('/proc/mounts'):
|
2018-02-22 14:16:24 +03:00
|
|
|
if line.split(' ')[0] == 'debugfs':
|
|
|
|
debugfs = line.split(' ')[1]
|
|
|
|
break
|
|
|
|
if debugfs == '':
|
|
|
|
sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in "
|
|
|
|
"your kernel, mounted and\nreadable by the current "
|
|
|
|
"user:\n"
|
|
|
|
"('mount -t debugfs debugfs /sys/kernel/debug')\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm')
|
|
|
|
PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing')
|
|
|
|
|
|
|
|
if not os.path.exists(PATH_DEBUGFS_KVM):
|
|
|
|
sys.stderr.write("Please make sure that CONFIG_KVM is enabled in "
|
|
|
|
"your kernel and that the modules are loaded.\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
def main():
|
2018-02-22 14:16:24 +03:00
|
|
|
assign_globals()
|
2016-05-18 14:26:21 +03:00
|
|
|
options = get_options()
|
|
|
|
options = check_access(options)
|
2016-05-18 14:26:24 +03:00
|
|
|
|
|
|
|
if (options.pid > 0 and
|
|
|
|
not os.path.isdir(os.path.join('/proc/',
|
|
|
|
str(options.pid)))):
|
|
|
|
sys.stderr.write('Did you use a (unsupported) tid instead of a pid?\n')
|
|
|
|
sys.exit('Specified pid does not exist.')
|
|
|
|
|
2020-03-06 14:42:46 +03:00
|
|
|
err = is_delay_valid(options.set_delay)
|
|
|
|
if err is not None:
|
|
|
|
sys.exit('Error: ' + err)
|
|
|
|
|
2017-06-07 22:08:32 +03:00
|
|
|
stats = Stats(options)
|
2016-05-18 14:26:21 +03:00
|
|
|
|
2017-12-21 15:03:27 +03:00
|
|
|
if options.fields == 'help':
|
2017-12-11 14:25:22 +03:00
|
|
|
stats.fields_filter = None
|
2017-12-21 15:03:27 +03:00
|
|
|
event_list = []
|
|
|
|
for key in stats.get().keys():
|
|
|
|
event_list.append(key.split('(', 1)[0])
|
|
|
|
sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n')
|
|
|
|
sys.exit(0)
|
2017-07-25 14:05:54 +03:00
|
|
|
|
tools/kvm_stat: Add command line switch '-L' to log to file
To integrate with logrotate, we have a signal handler that will re-open
the logfile.
Assuming we have a systemd unit file with
ExecStart=kvm_stat -dtc -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
and a logrotate config featuring
postrotate
/bin/systemctl reload kvm_stat.service
endscript
Then the overall flow will look like this:
(1) systemd starts kvm_stat, logging to A.
(2) At some point, logrotate runs, moving A to B.
kvm_stat continues to write to B at this point.
(3) After rotating, logrotate restarts the kvm_stat unit via systemctl.
(4) The kvm_stat unit sends a SIGHUP to kvm_stat, finally making it
switch over to writing to A again.
Note that in order to keep the structure of the cvs output in tact, we
make sure to, in contrast to the standard log format, only write the
header once at the beginning of a file. This implies that the header is
suppressed when appending to an existing file. Unlike with the standard
format, where we append to an existing file by starting out with a
header.
Signed-off-by: Stefan Raspl <raspl@de.ibm.com>
Message-Id: <20200402085705.61155-3-raspl@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-04-02 11:57:04 +03:00
|
|
|
if options.log or options.log_to_file:
|
|
|
|
if options.log_to_file:
|
|
|
|
signal.signal(signal.SIGHUP, handle_signal)
|
2020-03-06 14:42:47 +03:00
|
|
|
keys = sorted(stats.get().keys())
|
|
|
|
if options.csv:
|
|
|
|
frmt = CSVFormat(keys)
|
|
|
|
else:
|
|
|
|
frmt = StdFormat(keys)
|
|
|
|
log(stats, options, frmt, keys)
|
2016-05-18 14:26:21 +03:00
|
|
|
elif not options.once:
|
2020-03-06 14:42:46 +03:00
|
|
|
with Tui(stats, options) as tui:
|
2016-05-18 14:26:21 +03:00
|
|
|
tui.show_stats()
|
|
|
|
else:
|
|
|
|
batch(stats)
|
|
|
|
|
2020-03-06 14:42:44 +03:00
|
|
|
|
2016-05-18 14:26:21 +03:00
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|