s390: Fix misspellings in comments
Signed-off-by: Adam Buchbinder <adam.buchbinder@gmail.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
1e133ab296
Коммит
7eb792bf7c
|
@ -21,7 +21,7 @@
|
|||
#define PMU_F_ERR_LSDA 0x0200
|
||||
#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
|
||||
|
||||
/* Perf defintions for PMU event attributes in sysfs */
|
||||
/* Perf definitions for PMU event attributes in sysfs */
|
||||
extern __init const struct attribute_group **cpumf_cf_event_group(void);
|
||||
extern ssize_t cpumf_events_sysfs_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
* This should be totally fair - if anything is waiting, a process that wants a
|
||||
* lock will go to the back of the queue. When the currently active lock is
|
||||
* released, if there's a writer at the front of the queue, then that and only
|
||||
* that will be woken up; if there's a bunch of consequtive readers at the
|
||||
* that will be woken up; if there's a bunch of consecutive readers at the
|
||||
* front, then they'll all be woken up, but no other readers will be.
|
||||
*/
|
||||
|
||||
|
|
|
@ -383,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|||
|
||||
/* Validate the counter that is assigned to this event.
|
||||
* Because the counter facility can use numerous counters at the
|
||||
* same time without constraints, it is not necessary to explicity
|
||||
* same time without constraints, it is not necessary to explicitly
|
||||
* validate event groups (event->group_leader != event).
|
||||
*/
|
||||
err = validate_event(hwc);
|
||||
|
|
|
@ -238,7 +238,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|||
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
|
||||
}
|
||||
|
||||
/* Perf defintions for PMU event attributes in sysfs */
|
||||
/* Perf definitions for PMU event attributes in sysfs */
|
||||
ssize_t cpumf_events_sysfs_show(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
/*
|
||||
* Extends the address range given by *start and *stop to include the address
|
||||
* range starting with estart and the length len. Takes care of overflowing
|
||||
* intervals and tries to minimize the overall intervall size.
|
||||
* intervals and tries to minimize the overall interval size.
|
||||
*/
|
||||
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
|
||||
{
|
||||
|
@ -72,7 +72,7 @@ static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
|
|||
return;
|
||||
|
||||
/*
|
||||
* If the guest is not interrested in branching events, we can savely
|
||||
* If the guest is not interested in branching events, we can safely
|
||||
* limit them to the PER address range.
|
||||
*/
|
||||
if (!(*cr9 & PER_EVENT_BRANCH))
|
||||
|
|
Загрузка…
Ссылка в новой задаче