s390/cio: remove pm support from ccw bus driver
As part of removing broken pm-support from s390 arch, remove
the pm callbacks from ccw-bus driver.The power-management functions
are unused since the 'commit 394216275c
("s390: remove broken
hibernate / power management support")'.
Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
Родитель
ef2eea78a6
Коммит
8cc0dcfdc1
|
@ -124,11 +124,6 @@ enum uc_todo {
|
|||
* @notify: notify driver of device state changes
|
||||
* @path_event: notify driver of channel path events
|
||||
* @shutdown: called at device shutdown
|
||||
* @prepare: prepare for pm state transition
|
||||
* @complete: undo work done in @prepare
|
||||
* @freeze: callback for freezing during hibernation snapshotting
|
||||
* @thaw: undo work done in @freeze
|
||||
* @restore: callback for restoring after hibernation
|
||||
* @uc_handler: callback for unit check handler
|
||||
* @driver: embedded device driver structure
|
||||
* @int_class: interruption class to use for accounting interrupts
|
||||
|
@ -142,11 +137,6 @@ struct ccw_driver {
|
|||
int (*notify) (struct ccw_device *, int);
|
||||
void (*path_event) (struct ccw_device *, int *);
|
||||
void (*shutdown) (struct ccw_device *);
|
||||
int (*prepare) (struct ccw_device *);
|
||||
void (*complete) (struct ccw_device *);
|
||||
int (*freeze)(struct ccw_device *);
|
||||
int (*thaw) (struct ccw_device *);
|
||||
int (*restore)(struct ccw_device *);
|
||||
enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
|
||||
struct device_driver driver;
|
||||
enum interruption_class int_class;
|
||||
|
|
|
@ -1109,11 +1109,6 @@ static ssize_t cmb_enable_store(struct device *dev,
|
|||
}
|
||||
DEVICE_ATTR_RW(cmb_enable);
|
||||
|
||||
int ccw_set_cmf(struct ccw_device *cdev, int enable)
|
||||
{
|
||||
return cmbops->set(cdev, enable ? 2 : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* enable_cmf() - switch on the channel measurement for a specific device
|
||||
* @cdev: The ccw device to be enabled
|
||||
|
|
|
@ -1408,7 +1408,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
|
|||
}
|
||||
if (device_is_disconnected(cdev))
|
||||
return IO_SCH_REPROBE;
|
||||
if (cdev->online && !cdev->private->flags.resuming)
|
||||
if (cdev->online)
|
||||
return IO_SCH_VERIFY;
|
||||
if (cdev->private->state == DEV_STATE_NOT_OPER)
|
||||
return IO_SCH_UNREG_ATTACH;
|
||||
|
@ -1500,11 +1500,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
|
|||
break;
|
||||
case IO_SCH_UNREG_ATTACH:
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
if (cdev->private->flags.resuming) {
|
||||
/* Device will be handled later. */
|
||||
rc = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
sch_set_cdev(sch, NULL);
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
/* Unregister ccw device. */
|
||||
|
@ -1517,7 +1512,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
|
|||
switch (action) {
|
||||
case IO_SCH_ORPH_UNREG:
|
||||
case IO_SCH_UNREG:
|
||||
if (!cdev || !cdev->private->flags.resuming)
|
||||
if (!cdev)
|
||||
css_sch_device_unregister(sch);
|
||||
break;
|
||||
case IO_SCH_ORPH_ATTACH:
|
||||
|
@ -1676,14 +1671,6 @@ void ccw_device_wait_idle(struct ccw_device *cdev)
|
|||
udelay_simple(100);
|
||||
}
|
||||
}
|
||||
|
||||
static int ccw_device_pm_restore(struct device *dev);
|
||||
|
||||
int ccw_device_force_console(struct ccw_device *cdev)
|
||||
{
|
||||
return ccw_device_pm_restore(&cdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ccw_device_force_console);
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -1784,235 +1771,6 @@ static void ccw_device_shutdown(struct device *dev)
|
|||
__disable_cmf(cdev);
|
||||
}
|
||||
|
||||
static int ccw_device_pm_prepare(struct device *dev)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
|
||||
if (work_pending(&cdev->private->todo_work))
|
||||
return -EAGAIN;
|
||||
/* Fail while device is being set online/offline. */
|
||||
if (atomic_read(&cdev->private->onoff))
|
||||
return -EAGAIN;
|
||||
|
||||
if (cdev->online && cdev->drv && cdev->drv->prepare)
|
||||
return cdev->drv->prepare(cdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ccw_device_pm_complete(struct device *dev)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
|
||||
if (cdev->online && cdev->drv && cdev->drv->complete)
|
||||
cdev->drv->complete(cdev);
|
||||
}
|
||||
|
||||
static int ccw_device_pm_freeze(struct device *dev)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
int ret, cm_enabled;
|
||||
|
||||
/* Fail suspend while device is in transistional state. */
|
||||
if (!dev_fsm_final_state(cdev))
|
||||
return -EAGAIN;
|
||||
if (!cdev->online)
|
||||
return 0;
|
||||
if (cdev->drv && cdev->drv->freeze) {
|
||||
ret = cdev->drv->freeze(cdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
cm_enabled = cdev->private->cmb != NULL;
|
||||
spin_unlock_irq(sch->lock);
|
||||
if (cm_enabled) {
|
||||
/* Don't have the css write on memory. */
|
||||
ret = ccw_set_cmf(cdev, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/* From here on, disallow device driver I/O. */
|
||||
spin_lock_irq(sch->lock);
|
||||
ret = cio_disable_subchannel(sch);
|
||||
spin_unlock_irq(sch->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ccw_device_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
int ret, cm_enabled;
|
||||
|
||||
if (!cdev->online)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
/* Allow device driver I/O again. */
|
||||
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
|
||||
cm_enabled = cdev->private->cmb != NULL;
|
||||
spin_unlock_irq(sch->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cm_enabled) {
|
||||
ret = ccw_set_cmf(cdev, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (cdev->drv && cdev->drv->thaw)
|
||||
ret = cdev->drv->thaw(cdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __ccw_device_pm_restore(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
if (cio_is_console(sch->schid)) {
|
||||
cio_enable_subchannel(sch, (u32)(addr_t)sch);
|
||||
goto out_unlock;
|
||||
}
|
||||
/*
|
||||
* While we were sleeping, devices may have gone or become
|
||||
* available again. Kick re-detection.
|
||||
*/
|
||||
cdev->private->flags.resuming = 1;
|
||||
cdev->private->path_new_mask = LPM_ANYPATH;
|
||||
css_sched_sch_todo(sch, SCH_TODO_EVAL);
|
||||
spin_unlock_irq(sch->lock);
|
||||
css_wait_for_slow_path();
|
||||
|
||||
/* cdev may have been moved to a different subchannel. */
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
spin_lock_irq(sch->lock);
|
||||
if (cdev->private->state != DEV_STATE_ONLINE &&
|
||||
cdev->private->state != DEV_STATE_OFFLINE)
|
||||
goto out_unlock;
|
||||
|
||||
ccw_device_recognition(cdev);
|
||||
spin_unlock_irq(sch->lock);
|
||||
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
|
||||
cdev->private->state == DEV_STATE_DISCONNECTED);
|
||||
spin_lock_irq(sch->lock);
|
||||
|
||||
out_unlock:
|
||||
cdev->private->flags.resuming = 0;
|
||||
spin_unlock_irq(sch->lock);
|
||||
}
|
||||
|
||||
static int resume_handle_boxed(struct ccw_device *cdev)
|
||||
{
|
||||
cdev->private->state = DEV_STATE_BOXED;
|
||||
if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
|
||||
return 0;
|
||||
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int resume_handle_disc(struct ccw_device *cdev)
|
||||
{
|
||||
cdev->private->state = DEV_STATE_DISCONNECTED;
|
||||
if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
|
||||
return 0;
|
||||
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int ccw_device_pm_restore(struct device *dev)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct subchannel *sch;
|
||||
int ret = 0;
|
||||
|
||||
__ccw_device_pm_restore(cdev);
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
spin_lock_irq(sch->lock);
|
||||
if (cio_is_console(sch->schid))
|
||||
goto out_restore;
|
||||
|
||||
/* check recognition results */
|
||||
switch (cdev->private->state) {
|
||||
case DEV_STATE_OFFLINE:
|
||||
case DEV_STATE_ONLINE:
|
||||
cdev->private->flags.donotify = 0;
|
||||
break;
|
||||
case DEV_STATE_BOXED:
|
||||
ret = resume_handle_boxed(cdev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
goto out_restore;
|
||||
default:
|
||||
ret = resume_handle_disc(cdev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
goto out_restore;
|
||||
}
|
||||
/* check if the device type has changed */
|
||||
if (!ccw_device_test_sense_data(cdev)) {
|
||||
ccw_device_update_sense_data(cdev);
|
||||
ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
|
||||
ret = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!cdev->online)
|
||||
goto out_unlock;
|
||||
|
||||
if (ccw_device_online(cdev)) {
|
||||
ret = resume_handle_disc(cdev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
goto out_restore;
|
||||
}
|
||||
spin_unlock_irq(sch->lock);
|
||||
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
|
||||
spin_lock_irq(sch->lock);
|
||||
|
||||
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
|
||||
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
|
||||
ret = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* reenable cmf, if needed */
|
||||
if (cdev->private->cmb) {
|
||||
spin_unlock_irq(sch->lock);
|
||||
ret = ccw_set_cmf(cdev, 1);
|
||||
spin_lock_irq(sch->lock);
|
||||
if (ret) {
|
||||
CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
|
||||
"(rc=%d)\n", cdev->private->dev_id.ssid,
|
||||
cdev->private->dev_id.devno, ret);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out_restore:
|
||||
spin_unlock_irq(sch->lock);
|
||||
if (cdev->online && cdev->drv && cdev->drv->restore)
|
||||
ret = cdev->drv->restore(cdev);
|
||||
return ret;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(sch->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops ccw_pm_ops = {
|
||||
.prepare = ccw_device_pm_prepare,
|
||||
.complete = ccw_device_pm_complete,
|
||||
.freeze = ccw_device_pm_freeze,
|
||||
.thaw = ccw_device_pm_thaw,
|
||||
.restore = ccw_device_pm_restore,
|
||||
};
|
||||
|
||||
static struct bus_type ccw_bus_type = {
|
||||
.name = "ccw",
|
||||
.match = ccw_bus_match,
|
||||
|
@ -2020,7 +1778,6 @@ static struct bus_type ccw_bus_type = {
|
|||
.probe = ccw_device_probe,
|
||||
.remove = ccw_device_remove,
|
||||
.shutdown = ccw_device_shutdown,
|
||||
.pm = &ccw_pm_ops,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -143,6 +143,5 @@ void retry_set_schib(struct ccw_device *cdev);
|
|||
void cmf_retry_copy_block(struct ccw_device *);
|
||||
int cmf_reenable(struct ccw_device *);
|
||||
void cmf_reactivate(void);
|
||||
int ccw_set_cmf(struct ccw_device *cdev, int enable);
|
||||
extern struct device_attribute dev_attr_cmb_enable;
|
||||
#endif
|
||||
|
|
|
@ -224,12 +224,6 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
|
|||
wake_up(&cdev->private->wait_q);
|
||||
return;
|
||||
}
|
||||
if (cdev->private->flags.resuming) {
|
||||
cdev->private->state = state;
|
||||
cdev->private->flags.recog_done = 1;
|
||||
wake_up(&cdev->private->wait_q);
|
||||
return;
|
||||
}
|
||||
switch (state) {
|
||||
case DEV_STATE_NOT_OPER:
|
||||
break;
|
||||
|
|
|
@ -160,7 +160,6 @@ struct ccw_device_private {
|
|||
unsigned int donotify:1; /* call notify function */
|
||||
unsigned int recog_done:1; /* dev. recog. complete */
|
||||
unsigned int fake_irb:2; /* deliver faked irb */
|
||||
unsigned int resuming:1; /* recognition while resume */
|
||||
unsigned int pgroup:1; /* pathgroup is set up */
|
||||
unsigned int mpath:1; /* multipathing is set up */
|
||||
unsigned int pgid_unknown:1;/* unknown pgid state */
|
||||
|
|
Загрузка…
Ссылка в новой задаче