s390/cio: Delay scan for newly available I/O devices
The CIO layer scans for newly available I/O devices by performing a scan of available subchannels using the Store Subchannel (STSCH) instruction. Performing too many STSCH instructions in a tight loop can cause high Hypervisor overhead which can negatively impact the performance of the virtual machine as a whole. A subchannel scan is triggered for example during a hardware event that indicates that a channel path has become available. It is also triggered by the DASD device driver for each device that is set online. This patch reduces the number of STSCH instructions being performed by delaying the start of the actual subchannel scan by 1 second. Multiple scan requests that are scheduled during this time will be merged into a single scan loop. The trade-off consists of a short delay that is introduced between the time that the event is processed and a newly available device becoming usable. This delay should be acceptable since it only affects devices that have not been in use before. Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com> Reviewed-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
b207f5a8f9
Коммит
175746eb06
|
@ -269,7 +269,7 @@ static int blacklist_parse_proc_parameters(char *buf)
|
|||
else
|
||||
return -EINVAL;
|
||||
|
||||
css_schedule_reprobe();
|
||||
css_schedule_eval_all_unreg(0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -575,7 +575,7 @@ static void css_slow_path_func(struct work_struct *unused)
|
|||
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(slow_path_work, css_slow_path_func);
|
||||
static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
|
||||
struct workqueue_struct *cio_work_q;
|
||||
|
||||
void css_schedule_eval(struct subchannel_id schid)
|
||||
|
@ -585,7 +585,7 @@ void css_schedule_eval(struct subchannel_id schid)
|
|||
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
||||
idset_sch_add(slow_subchannel_set, schid);
|
||||
atomic_set(&css_eval_scheduled, 1);
|
||||
queue_work(cio_work_q, &slow_path_work);
|
||||
queue_delayed_work(cio_work_q, &slow_path_work, 0);
|
||||
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -596,7 +596,7 @@ void css_schedule_eval_all(void)
|
|||
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
||||
idset_fill(slow_subchannel_set);
|
||||
atomic_set(&css_eval_scheduled, 1);
|
||||
queue_work(cio_work_q, &slow_path_work);
|
||||
queue_delayed_work(cio_work_q, &slow_path_work, 0);
|
||||
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -609,7 +609,7 @@ static int __unset_registered(struct device *dev, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void css_schedule_eval_all_unreg(void)
|
||||
void css_schedule_eval_all_unreg(unsigned long delay)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct idset *unreg_set;
|
||||
|
@ -627,7 +627,7 @@ static void css_schedule_eval_all_unreg(void)
|
|||
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
||||
idset_add_set(slow_subchannel_set, unreg_set);
|
||||
atomic_set(&css_eval_scheduled, 1);
|
||||
queue_work(cio_work_q, &slow_path_work);
|
||||
queue_delayed_work(cio_work_q, &slow_path_work, delay);
|
||||
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
||||
idset_free(unreg_set);
|
||||
}
|
||||
|
@ -640,7 +640,8 @@ void css_wait_for_slow_path(void)
|
|||
/* Schedule reprobing of all unregistered subchannels. */
|
||||
void css_schedule_reprobe(void)
|
||||
{
|
||||
css_schedule_eval_all_unreg();
|
||||
/* Schedule with a delay to allow merging of subsequent calls. */
|
||||
css_schedule_eval_all_unreg(1 * HZ);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
|
||||
|
||||
|
|
|
@ -133,6 +133,7 @@ extern struct channel_subsystem *channel_subsystems[];
|
|||
/* Helper functions to build lists for the slow path. */
|
||||
void css_schedule_eval(struct subchannel_id schid);
|
||||
void css_schedule_eval_all(void);
|
||||
void css_schedule_eval_all_unreg(unsigned long delay);
|
||||
int css_complete_work(void);
|
||||
|
||||
int sch_is_pseudo_sch(struct subchannel *);
|
||||
|
|
Загрузка…
Ссылка в новой задаче