kprobes: Use bool type for functions which returns boolean value

Use the 'bool' type instead of 'int' for the functions which
returns a boolean value, because this makes clear that those
functions don't return any error code.

Link: https://lkml.kernel.org/r/163163041649.489837.17311187321419747536.stgit@devnote2

Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Masami Hiramatsu 2021-09-14 23:40:16 +09:00 коммит произвёл Steven Rostedt (VMware)
Родитель c42421e205
Коммит 29e8077ae2
3 изменённых файлов: 18 добавлений и 18 удалений

Просмотреть файл

@ -104,25 +104,25 @@ struct kprobe {
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
/* Has this kprobe gone ? */ /* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p) static inline bool kprobe_gone(struct kprobe *p)
{ {
return p->flags & KPROBE_FLAG_GONE; return p->flags & KPROBE_FLAG_GONE;
} }
/* Is this kprobe disabled ? */ /* Is this kprobe disabled ? */
static inline int kprobe_disabled(struct kprobe *p) static inline bool kprobe_disabled(struct kprobe *p)
{ {
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
} }
/* Is this kprobe really running optimized path ? */ /* Is this kprobe really running optimized path ? */
static inline int kprobe_optimized(struct kprobe *p) static inline bool kprobe_optimized(struct kprobe *p)
{ {
return p->flags & KPROBE_FLAG_OPTIMIZED; return p->flags & KPROBE_FLAG_OPTIMIZED;
} }
/* Is this kprobe uses ftrace ? */ /* Is this kprobe uses ftrace ? */
static inline int kprobe_ftrace(struct kprobe *p) static inline bool kprobe_ftrace(struct kprobe *p)
{ {
return p->flags & KPROBE_FLAG_FTRACE; return p->flags & KPROBE_FLAG_FTRACE;
} }

Просмотреть файл

@ -198,8 +198,8 @@ out:
return slot; return slot;
} }
/* Return 1 if all garbages are collected, otherwise 0. */ /* Return true if all garbages are collected, otherwise false. */
static int collect_one_slot(struct kprobe_insn_page *kip, int idx) static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
{ {
kip->slot_used[idx] = SLOT_CLEAN; kip->slot_used[idx] = SLOT_CLEAN;
kip->nused--; kip->nused--;
@ -223,9 +223,9 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
kip->cache->free(kip->insns); kip->cache->free(kip->insns);
kfree(kip); kfree(kip);
} }
return 1; return true;
} }
return 0; return false;
} }
static int collect_garbage_slots(struct kprobe_insn_cache *c) static int collect_garbage_slots(struct kprobe_insn_cache *c)
@ -389,13 +389,13 @@ NOKPROBE_SYMBOL(get_kprobe);
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
/* Return true if 'p' is an aggregator */ /* Return true if 'p' is an aggregator */
static inline int kprobe_aggrprobe(struct kprobe *p) static inline bool kprobe_aggrprobe(struct kprobe *p)
{ {
return p->pre_handler == aggr_pre_handler; return p->pre_handler == aggr_pre_handler;
} }
/* Return true if 'p' is unused */ /* Return true if 'p' is unused */
static inline int kprobe_unused(struct kprobe *p) static inline bool kprobe_unused(struct kprobe *p)
{ {
return kprobe_aggrprobe(p) && kprobe_disabled(p) && return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
list_empty(&p->list); list_empty(&p->list);
@ -455,7 +455,7 @@ static inline int kprobe_optready(struct kprobe *p)
} }
/* Return true if the kprobe is disarmed. Note: p must be on hash list */ /* Return true if the kprobe is disarmed. Note: p must be on hash list */
static inline int kprobe_disarmed(struct kprobe *p) static inline bool kprobe_disarmed(struct kprobe *p)
{ {
struct optimized_kprobe *op; struct optimized_kprobe *op;
@ -469,16 +469,16 @@ static inline int kprobe_disarmed(struct kprobe *p)
} }
/* Return true if the probe is queued on (un)optimizing lists */ /* Return true if the probe is queued on (un)optimizing lists */
static int kprobe_queued(struct kprobe *p) static bool kprobe_queued(struct kprobe *p)
{ {
struct optimized_kprobe *op; struct optimized_kprobe *op;
if (kprobe_aggrprobe(p)) { if (kprobe_aggrprobe(p)) {
op = container_of(p, struct optimized_kprobe, kp); op = container_of(p, struct optimized_kprobe, kp);
if (!list_empty(&op->list)) if (!list_empty(&op->list))
return 1; return true;
} }
return 0; return false;
} }
/* /*
@ -1678,7 +1678,7 @@ out:
EXPORT_SYMBOL_GPL(register_kprobe); EXPORT_SYMBOL_GPL(register_kprobe);
/* Check if all probes on the 'ap' are disabled. */ /* Check if all probes on the 'ap' are disabled. */
static int aggr_kprobe_disabled(struct kprobe *ap) static bool aggr_kprobe_disabled(struct kprobe *ap)
{ {
struct kprobe *kp; struct kprobe *kp;
@ -1690,9 +1690,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
* Since there is an active probe on the list, * Since there is an active probe on the list,
* we can't disable this 'ap'. * we can't disable this 'ap'.
*/ */
return 0; return false;
return 1; return true;
} }
static struct kprobe *__disable_kprobe(struct kprobe *p) static struct kprobe *__disable_kprobe(struct kprobe *p)

Просмотреть файл

@ -97,7 +97,7 @@ static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk
static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
{ {
return !!(kprobe_gone(&tk->rp.kp)); return kprobe_gone(&tk->rp.kp);
} }
static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,