workqueue: implement several utility APIs
Implement the following utility APIs. workqueue_set_max_active() : adjust max_active of a wq workqueue_congested() : test whether a wq is contested work_cpu() : determine the last / current cpu of a work work_busy() : query whether a work is busy * Anton Blanchard fixed missing ret initialization in work_busy(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Anton Blanchard <anton@samba.org>
This commit is contained in:
Родитель
d320c03830
Коммит
dcd989cb73
|
@ -61,6 +61,10 @@ enum {
|
|||
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
|
||||
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
|
||||
WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS,
|
||||
|
||||
/* bit mask for work_busy() return values */
|
||||
WORK_BUSY_PENDING = 1 << 0,
|
||||
WORK_BUSY_RUNNING = 1 << 1,
|
||||
};
|
||||
|
||||
struct work_struct {
|
||||
|
@ -307,9 +311,14 @@ extern void init_workqueues(void);
|
|||
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
||||
|
||||
extern int flush_work(struct work_struct *work);
|
||||
|
||||
extern int cancel_work_sync(struct work_struct *work);
|
||||
|
||||
extern void workqueue_set_max_active(struct workqueue_struct *wq,
|
||||
int max_active);
|
||||
extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
|
||||
extern unsigned int work_cpu(struct work_struct *work);
|
||||
extern unsigned int work_busy(struct work_struct *work);
|
||||
|
||||
/*
|
||||
* Kill off a pending schedule_delayed_work(). Note that the work callback
|
||||
* function may still be running on return from cancel_delayed_work(), unless
|
||||
|
|
|
@ -203,7 +203,7 @@ struct workqueue_struct {
|
|||
cpumask_var_t mayday_mask; /* cpus requesting rescue */
|
||||
struct worker *rescuer; /* I: rescue worker */
|
||||
|
||||
int saved_max_active; /* I: saved cwq max_active */
|
||||
int saved_max_active; /* W: saved cwq max_active */
|
||||
const char *name; /* I: workqueue name */
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
struct lockdep_map lockdep_map;
|
||||
|
@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(destroy_workqueue);
|
||||
|
||||
/**
|
||||
* workqueue_set_max_active - adjust max_active of a workqueue
|
||||
* @wq: target workqueue
|
||||
* @max_active: new max_active value.
|
||||
*
|
||||
* Set max_active of @wq to @max_active.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Don't call from IRQ context.
|
||||
*/
|
||||
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
max_active = wq_clamp_max_active(max_active, wq->name);
|
||||
|
||||
spin_lock(&workqueue_lock);
|
||||
|
||||
wq->saved_max_active = max_active;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct global_cwq *gcwq = get_gcwq(cpu);
|
||||
|
||||
spin_lock_irq(&gcwq->lock);
|
||||
|
||||
if (!(wq->flags & WQ_FREEZEABLE) ||
|
||||
!(gcwq->flags & GCWQ_FREEZING))
|
||||
get_cwq(gcwq->cpu, wq)->max_active = max_active;
|
||||
|
||||
spin_unlock_irq(&gcwq->lock);
|
||||
}
|
||||
|
||||
spin_unlock(&workqueue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
|
||||
|
||||
/**
|
||||
* workqueue_congested - test whether a workqueue is congested
|
||||
* @cpu: CPU in question
|
||||
* @wq: target workqueue
|
||||
*
|
||||
* Test whether @wq's cpu workqueue for @cpu is congested. There is
|
||||
* no synchronization around this function and the test result is
|
||||
* unreliable and only useful as advisory hints or for debugging.
|
||||
*
|
||||
* RETURNS:
|
||||
* %true if congested, %false otherwise.
|
||||
*/
|
||||
bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
|
||||
{
|
||||
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
||||
|
||||
return !list_empty(&cwq->delayed_works);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(workqueue_congested);
|
||||
|
||||
/**
|
||||
* work_cpu - return the last known associated cpu for @work
|
||||
* @work: the work of interest
|
||||
*
|
||||
* RETURNS:
|
||||
* CPU number if @work was ever queued. NR_CPUS otherwise.
|
||||
*/
|
||||
unsigned int work_cpu(struct work_struct *work)
|
||||
{
|
||||
struct global_cwq *gcwq = get_work_gcwq(work);
|
||||
|
||||
return gcwq ? gcwq->cpu : NR_CPUS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(work_cpu);
|
||||
|
||||
/**
|
||||
* work_busy - test whether a work is currently pending or running
|
||||
* @work: the work to be tested
|
||||
*
|
||||
* Test whether @work is currently pending or running. There is no
|
||||
* synchronization around this function and the test result is
|
||||
* unreliable and only useful as advisory hints or for debugging.
|
||||
* Especially for reentrant wqs, the pending state might hide the
|
||||
* running state.
|
||||
*
|
||||
* RETURNS:
|
||||
* OR'd bitmask of WORK_BUSY_* bits.
|
||||
*/
|
||||
unsigned int work_busy(struct work_struct *work)
|
||||
{
|
||||
struct global_cwq *gcwq = get_work_gcwq(work);
|
||||
unsigned long flags;
|
||||
unsigned int ret = 0;
|
||||
|
||||
if (!gcwq)
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&gcwq->lock, flags);
|
||||
|
||||
if (work_pending(work))
|
||||
ret |= WORK_BUSY_PENDING;
|
||||
if (find_worker_executing_work(gcwq, work))
|
||||
ret |= WORK_BUSY_RUNNING;
|
||||
|
||||
spin_unlock_irqrestore(&gcwq->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(work_busy);
|
||||
|
||||
/*
|
||||
* CPU hotplug.
|
||||
*
|
||||
|
|
Загрузка…
Ссылка в новой задаче