Driver core updates for 5.11-rc1
Here is the big driver core updates for 5.11-rc1 This time there was a lot of different work happening here for some reason: - redo of the fwnode link logic, speeding it up greatly - auxiliary bus added (this was a tag that will be pulled in from other trees/maintainers this merge window as well, as driver subsystems started to rely on it) - platform driver core cleanups on the way to fixing some long-time api updates in future releases - minor fixes and tweaks. All have been in linux-next with no (finally) reported issues. Testing there did helped in shaking issues out a lot :) Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCX9iEUQ8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ynBJwCgjBAtVWXquZz4m/pyjn0HoTC7tdYAnAlQIj9s vRbPjOgH9R+YRJzFs1Kx =X6UP -----END PGP SIGNATURE----- Merge tag 'driver-core-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core Pull driver core updates from Greg KH: "Here is the big driver core updates for 5.11-rc1 This time there was a lot of different work happening here for some reason: - redo of the fwnode link logic, speeding it up greatly - auxiliary bus added (this was a tag that will be pulled in from other trees/maintainers this merge window as well, as driver subsystems started to rely on it) - platform driver core cleanups on the way to fixing some long-time api updates in future releases - minor fixes and tweaks. All have been in linux-next with no (finally) reported issues. Testing there did helped in shaking issues out a lot :)" * tag 'driver-core-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (39 commits) driver core: platform: don't oops in platform_shutdown() on unbound devices ACPI: Use fwnode_init() to set up fwnode misc: pvpanic: Replace OF headers by mod_devicetable.h misc: pvpanic: Combine ACPI and platform drivers usb: host: sl811: Switch to use platform_get_mem_or_io() vfio: platform: Switch to use platform_get_mem_or_io() driver core: platform: Introduce platform_get_mem_or_io() dyndbg: fix use before null check soc: fix comment for freeing soc_dev_attr driver core: platform: use bus_type functions driver core: platform: change logic implementing platform_driver_probe driver core: platform: reorder functions driver core: make driver_probe_device() static driver core: Fix a couple of typos driver core: Reorder devices on successful probe driver core: Delete pointless parameter in fwnode_operations.add_links driver core: Refactor fw_devlink feature efi: Update implementation of add_links() to create fwnode links of: property: Update implementation of add_links() to create fwnode links driver core: Use device's fwnode to check if it is waiting for suppliers ...
This commit is contained in:
Коммит
7240153a9b
|
@ -76,7 +76,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
|
|||
return false;
|
||||
|
||||
dn->name = link->package.elements[0].string.pointer;
|
||||
dn->fwnode.ops = &acpi_data_fwnode_ops;
|
||||
fwnode_init(&dn->fwnode, &acpi_data_fwnode_ops);
|
||||
dn->parent = parent;
|
||||
INIT_LIST_HEAD(&dn->data.properties);
|
||||
INIT_LIST_HEAD(&dn->data.subnodes);
|
||||
|
|
|
@ -1589,7 +1589,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
|
|||
device->device_type = type;
|
||||
device->handle = handle;
|
||||
device->parent = acpi_bus_get_parent(handle);
|
||||
device->fwnode.ops = &acpi_device_fwnode_ops;
|
||||
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
|
||||
acpi_set_device_status(device, sta);
|
||||
acpi_device_get_busid(device);
|
||||
acpi_set_pnp_ids(handle, &device->pnp, type);
|
||||
|
|
|
@ -92,10 +92,15 @@ static int auxiliary_bus_remove(struct device *dev)
|
|||
|
||||
static void auxiliary_bus_shutdown(struct device *dev)
|
||||
{
|
||||
struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
|
||||
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
|
||||
struct auxiliary_driver *auxdrv = NULL;
|
||||
struct auxiliary_device *auxdev;
|
||||
|
||||
if (auxdrv->shutdown)
|
||||
if (dev->driver) {
|
||||
auxdrv = to_auxiliary_drv(dev->driver);
|
||||
auxdev = to_auxiliary_dev(dev);
|
||||
}
|
||||
|
||||
if (auxdrv && auxdrv->shutdown)
|
||||
auxdrv->shutdown(auxdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,6 @@ extern void device_release_driver_internal(struct device *dev,
|
|||
struct device *parent);
|
||||
|
||||
extern void driver_detach(struct device_driver *drv);
|
||||
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
|
||||
extern void driver_deferred_probe_del(struct device *dev);
|
||||
extern void device_set_deferred_probe_reason(const struct device *dev,
|
||||
struct va_format *vaf);
|
||||
|
|
|
@ -210,7 +210,7 @@ static void class_create_release(struct class *cls)
|
|||
}
|
||||
|
||||
/**
|
||||
* class_create - create a struct class structure
|
||||
* __class_create - create a struct class structure
|
||||
* @owner: pointer to the module that is to "own" this struct class
|
||||
* @name: pointer to a string for the name of this class.
|
||||
* @key: the lock_class_key for this class; used by mutex lock debugging
|
||||
|
|
|
@ -46,15 +46,108 @@ early_param("sysfs.deprecated", sysfs_deprecated_setup);
|
|||
#endif
|
||||
|
||||
/* Device links support. */
|
||||
static LIST_HEAD(wait_for_suppliers);
|
||||
static DEFINE_MUTEX(wfs_lock);
|
||||
static LIST_HEAD(deferred_sync);
|
||||
static unsigned int defer_sync_state_count = 1;
|
||||
static unsigned int defer_fw_devlink_count;
|
||||
static LIST_HEAD(deferred_fw_devlink);
|
||||
static DEFINE_MUTEX(defer_fw_devlink_lock);
|
||||
static DEFINE_MUTEX(fwnode_link_lock);
|
||||
static bool fw_devlink_is_permissive(void);
|
||||
|
||||
/**
|
||||
* fwnode_link_add - Create a link between two fwnode_handles.
|
||||
* @con: Consumer end of the link.
|
||||
* @sup: Supplier end of the link.
|
||||
*
|
||||
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link
|
||||
* represents the detail that the firmware lists @sup fwnode as supplying a
|
||||
* resource to @con.
|
||||
*
|
||||
* The driver core will use the fwnode link to create a device link between the
|
||||
* two device objects corresponding to @con and @sup when they are created. The
|
||||
* driver core will automatically delete the fwnode link between @con and @sup
|
||||
* after doing that.
|
||||
*
|
||||
* Attempts to create duplicate links between the same pair of fwnode handles
|
||||
* are ignored and there is no reference counting.
|
||||
*/
|
||||
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
|
||||
{
|
||||
struct fwnode_link *link;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fwnode_link_lock);
|
||||
|
||||
list_for_each_entry(link, &sup->consumers, s_hook)
|
||||
if (link->consumer == con)
|
||||
goto out;
|
||||
|
||||
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
||||
if (!link) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
link->supplier = sup;
|
||||
INIT_LIST_HEAD(&link->s_hook);
|
||||
link->consumer = con;
|
||||
INIT_LIST_HEAD(&link->c_hook);
|
||||
|
||||
list_add(&link->s_hook, &sup->consumers);
|
||||
list_add(&link->c_hook, &con->suppliers);
|
||||
out:
|
||||
mutex_unlock(&fwnode_link_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
|
||||
* @fwnode: fwnode whose supplier links need to be deleted
|
||||
*
|
||||
* Deletes all supplier links connecting directly to @fwnode.
|
||||
*/
|
||||
static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct fwnode_link *link, *tmp;
|
||||
|
||||
mutex_lock(&fwnode_link_lock);
|
||||
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
|
||||
list_del(&link->s_hook);
|
||||
list_del(&link->c_hook);
|
||||
kfree(link);
|
||||
}
|
||||
mutex_unlock(&fwnode_link_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
|
||||
* @fwnode: fwnode whose consumer links need to be deleted
|
||||
*
|
||||
* Deletes all consumer links connecting directly to @fwnode.
|
||||
*/
|
||||
static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct fwnode_link *link, *tmp;
|
||||
|
||||
mutex_lock(&fwnode_link_lock);
|
||||
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
|
||||
list_del(&link->s_hook);
|
||||
list_del(&link->c_hook);
|
||||
kfree(link);
|
||||
}
|
||||
mutex_unlock(&fwnode_link_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fwnode_links_purge - Delete all links connected to a fwnode_handle.
|
||||
* @fwnode: fwnode whose links needs to be deleted
|
||||
*
|
||||
* Deletes all links connecting directly to a fwnode.
|
||||
*/
|
||||
void fwnode_links_purge(struct fwnode_handle *fwnode)
|
||||
{
|
||||
fwnode_links_purge_suppliers(fwnode);
|
||||
fwnode_links_purge_consumers(fwnode);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SRCU
|
||||
static DEFINE_MUTEX(device_links_lock);
|
||||
DEFINE_STATIC_SRCU(device_links_srcu);
|
||||
|
@ -468,7 +561,7 @@ postcore_initcall(devlink_class_init);
|
|||
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
|
||||
* runtime PM framework to take the link into account. Second, if the
|
||||
* DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
|
||||
* be forced into the active metastate and reference-counted upon the creation
|
||||
* be forced into the active meta state and reference-counted upon the creation
|
||||
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
|
||||
* ignored.
|
||||
*
|
||||
|
@ -491,7 +584,7 @@ postcore_initcall(devlink_class_init);
|
|||
* Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
|
||||
* DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
|
||||
* managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
|
||||
* be used to request the driver core to automaticall probe for a consmer
|
||||
* be used to request the driver core to automatically probe for a consumer
|
||||
* driver after successfully binding a driver to the supplier device.
|
||||
*
|
||||
* The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
|
||||
|
@ -555,6 +648,17 @@ struct device_link *device_link_add(struct device *consumer,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* SYNC_STATE_ONLY links are useless once a consumer device has probed.
|
||||
* So, only create it if the consumer hasn't probed yet.
|
||||
*/
|
||||
if (flags & DL_FLAG_SYNC_STATE_ONLY &&
|
||||
consumer->links.status != DL_DEV_NO_DRIVER &&
|
||||
consumer->links.status != DL_DEV_PROBING) {
|
||||
link = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
|
||||
* longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
|
||||
|
@ -697,74 +801,6 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(device_link_add);
|
||||
|
||||
/**
|
||||
* device_link_wait_for_supplier - Add device to wait_for_suppliers list
|
||||
* @consumer: Consumer device
|
||||
*
|
||||
* Marks the @consumer device as waiting for suppliers to become available by
|
||||
* adding it to the wait_for_suppliers list. The consumer device will never be
|
||||
* probed until it's removed from the wait_for_suppliers list.
|
||||
*
|
||||
* The caller is responsible for adding the links to the supplier devices once
|
||||
* they are available and removing the @consumer device from the
|
||||
* wait_for_suppliers list once links to all the suppliers have been created.
|
||||
*
|
||||
* This function is NOT meant to be called from the probe function of the
|
||||
* consumer but rather from code that creates/adds the consumer device.
|
||||
*/
|
||||
static void device_link_wait_for_supplier(struct device *consumer,
|
||||
bool need_for_probe)
|
||||
{
|
||||
mutex_lock(&wfs_lock);
|
||||
list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers);
|
||||
consumer->links.need_for_probe = need_for_probe;
|
||||
mutex_unlock(&wfs_lock);
|
||||
}
|
||||
|
||||
static void device_link_wait_for_mandatory_supplier(struct device *consumer)
|
||||
{
|
||||
device_link_wait_for_supplier(consumer, true);
|
||||
}
|
||||
|
||||
static void device_link_wait_for_optional_supplier(struct device *consumer)
|
||||
{
|
||||
device_link_wait_for_supplier(consumer, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* device_link_add_missing_supplier_links - Add links from consumer devices to
|
||||
* supplier devices, leaving any
|
||||
* consumer with inactive suppliers on
|
||||
* the wait_for_suppliers list
|
||||
*
|
||||
* Loops through all consumers waiting on suppliers and tries to add all their
|
||||
* supplier links. If that succeeds, the consumer device is removed from
|
||||
* wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers
|
||||
* list. Devices left on the wait_for_suppliers list will not be probed.
|
||||
*
|
||||
* The fwnode add_links callback is expected to return 0 if it has found and
|
||||
* added all the supplier links for the consumer device. It should return an
|
||||
* error if it isn't able to do so.
|
||||
*
|
||||
* The caller of device_link_wait_for_supplier() is expected to call this once
|
||||
* it's aware of potential suppliers becoming available.
|
||||
*/
|
||||
static void device_link_add_missing_supplier_links(void)
|
||||
{
|
||||
struct device *dev, *tmp;
|
||||
|
||||
mutex_lock(&wfs_lock);
|
||||
list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
|
||||
links.needs_suppliers) {
|
||||
int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
|
||||
if (!ret)
|
||||
list_del_init(&dev->links.needs_suppliers);
|
||||
else if (ret != -ENODEV || fw_devlink_is_permissive())
|
||||
dev->links.need_for_probe = false;
|
||||
}
|
||||
mutex_unlock(&wfs_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SRCU
|
||||
static void __device_link_del(struct kref *kref)
|
||||
{
|
||||
|
@ -890,13 +926,13 @@ int device_links_check_suppliers(struct device *dev)
|
|||
* Device waiting for supplier to become available is not allowed to
|
||||
* probe.
|
||||
*/
|
||||
mutex_lock(&wfs_lock);
|
||||
if (!list_empty(&dev->links.needs_suppliers) &&
|
||||
dev->links.need_for_probe) {
|
||||
mutex_unlock(&wfs_lock);
|
||||
mutex_lock(&fwnode_link_lock);
|
||||
if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
|
||||
!fw_devlink_is_permissive()) {
|
||||
mutex_unlock(&fwnode_link_lock);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
mutex_unlock(&wfs_lock);
|
||||
mutex_unlock(&fwnode_link_lock);
|
||||
|
||||
device_links_write_lock();
|
||||
|
||||
|
@ -960,11 +996,11 @@ static void __device_links_queue_sync_state(struct device *dev,
|
|||
*/
|
||||
dev->state_synced = true;
|
||||
|
||||
if (WARN_ON(!list_empty(&dev->links.defer_hook)))
|
||||
if (WARN_ON(!list_empty(&dev->links.defer_sync)))
|
||||
return;
|
||||
|
||||
get_device(dev);
|
||||
list_add_tail(&dev->links.defer_hook, list);
|
||||
list_add_tail(&dev->links.defer_sync, list);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -982,8 +1018,8 @@ static void device_links_flush_sync_list(struct list_head *list,
|
|||
{
|
||||
struct device *dev, *tmp;
|
||||
|
||||
list_for_each_entry_safe(dev, tmp, list, links.defer_hook) {
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
|
||||
list_del_init(&dev->links.defer_sync);
|
||||
|
||||
if (dev != dont_lock_dev)
|
||||
device_lock(dev);
|
||||
|
@ -1021,12 +1057,12 @@ void device_links_supplier_sync_state_resume(void)
|
|||
if (defer_sync_state_count)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) {
|
||||
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
|
||||
/*
|
||||
* Delete from deferred_sync list before queuing it to
|
||||
* sync_list because defer_hook is used for both lists.
|
||||
* sync_list because defer_sync is used for both lists.
|
||||
*/
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
list_del_init(&dev->links.defer_sync);
|
||||
__device_links_queue_sync_state(dev, &sync_list);
|
||||
}
|
||||
out:
|
||||
|
@ -1044,8 +1080,8 @@ late_initcall(sync_state_resume_initcall);
|
|||
|
||||
static void __device_links_supplier_defer_sync(struct device *sup)
|
||||
{
|
||||
if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup))
|
||||
list_add_tail(&sup->links.defer_hook, &deferred_sync);
|
||||
if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
|
||||
list_add_tail(&sup->links.defer_sync, &deferred_sync);
|
||||
}
|
||||
|
||||
static void device_link_drop_managed(struct device_link *link)
|
||||
|
@ -1062,10 +1098,7 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
|
|||
bool val;
|
||||
|
||||
device_lock(dev);
|
||||
mutex_lock(&wfs_lock);
|
||||
val = !list_empty(&dev->links.needs_suppliers)
|
||||
&& dev->links.need_for_probe;
|
||||
mutex_unlock(&wfs_lock);
|
||||
val = !list_empty(&dev->fwnode->suppliers);
|
||||
device_unlock(dev);
|
||||
return sysfs_emit(buf, "%u\n", val);
|
||||
}
|
||||
|
@ -1092,9 +1125,8 @@ void device_links_driver_bound(struct device *dev)
|
|||
* the device links it needs to or make new device links as it needs
|
||||
* them. So, it no longer needs to wait on any suppliers.
|
||||
*/
|
||||
mutex_lock(&wfs_lock);
|
||||
list_del_init(&dev->links.needs_suppliers);
|
||||
mutex_unlock(&wfs_lock);
|
||||
if (dev->fwnode && dev->fwnode->dev == dev)
|
||||
fwnode_links_purge_suppliers(dev->fwnode);
|
||||
device_remove_file(dev, &dev_attr_waiting_for_supplier);
|
||||
|
||||
device_links_write_lock();
|
||||
|
@ -1275,7 +1307,7 @@ void device_links_driver_cleanup(struct device *dev)
|
|||
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||
}
|
||||
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
list_del_init(&dev->links.defer_sync);
|
||||
__device_links_no_driver(dev);
|
||||
|
||||
device_links_write_unlock();
|
||||
|
@ -1385,10 +1417,6 @@ static void device_links_purge(struct device *dev)
|
|||
if (dev->class == &devlink_class)
|
||||
return;
|
||||
|
||||
mutex_lock(&wfs_lock);
|
||||
list_del(&dev->links.needs_suppliers);
|
||||
mutex_unlock(&wfs_lock);
|
||||
|
||||
/*
|
||||
* Delete all of the remaining links from this device to any other
|
||||
* devices (either consumers or suppliers).
|
||||
|
@ -1439,139 +1467,267 @@ static bool fw_devlink_is_permissive(void)
|
|||
return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
|
||||
}
|
||||
|
||||
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
|
||||
{
|
||||
if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
|
||||
return;
|
||||
|
||||
fwnode_call_int_op(fwnode, add_links);
|
||||
fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
|
||||
}
|
||||
|
||||
static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct fwnode_handle *child = NULL;
|
||||
|
||||
fw_devlink_parse_fwnode(fwnode);
|
||||
|
||||
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
|
||||
fw_devlink_parse_fwtree(child);
|
||||
}
|
||||
|
||||
/**
|
||||
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
|
||||
* @con - Consumer device for the device link
|
||||
* @sup_handle - fwnode handle of supplier
|
||||
*
|
||||
* This function will try to create a device link between the consumer device
|
||||
* @con and the supplier device represented by @sup_handle.
|
||||
*
|
||||
* The supplier has to be provided as a fwnode because incorrect cycles in
|
||||
* fwnode links can sometimes cause the supplier device to never be created.
|
||||
* This function detects such cases and returns an error if it cannot create a
|
||||
* device link from the consumer to a missing supplier.
|
||||
*
|
||||
* Returns,
|
||||
* 0 on successfully creating a device link
|
||||
* -EINVAL if the device link cannot be created as expected
|
||||
* -EAGAIN if the device link cannot be created right now, but it may be
|
||||
* possible to do that in the future
|
||||
*/
|
||||
static int fw_devlink_create_devlink(struct device *con,
|
||||
struct fwnode_handle *sup_handle, u32 flags)
|
||||
{
|
||||
struct device *sup_dev;
|
||||
int ret = 0;
|
||||
|
||||
sup_dev = get_dev_from_fwnode(sup_handle);
|
||||
if (sup_dev) {
|
||||
/*
|
||||
* If this fails, it is due to cycles in device links. Just
|
||||
* give up on this link and treat it as invalid.
|
||||
*/
|
||||
if (!device_link_add(con, sup_dev, flags))
|
||||
ret = -EINVAL;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
|
||||
* cycles. So cycle detection isn't necessary and shouldn't be
|
||||
* done.
|
||||
*/
|
||||
if (flags & DL_FLAG_SYNC_STATE_ONLY)
|
||||
return -EAGAIN;
|
||||
|
||||
/*
|
||||
* If we can't find the supplier device from its fwnode, it might be
|
||||
* due to a cyclic dependency between fwnodes. Some of these cycles can
|
||||
* be broken by applying logic. Check for these types of cycles and
|
||||
* break them so that devices in the cycle probe properly.
|
||||
*
|
||||
* If the supplier's parent is dependent on the consumer, then
|
||||
* the consumer-supplier dependency is a false dependency. So,
|
||||
* treat it as an invalid link.
|
||||
*/
|
||||
sup_dev = fwnode_get_next_parent_dev(sup_handle);
|
||||
if (sup_dev && device_is_dependent(con, sup_dev)) {
|
||||
dev_dbg(con, "Not linking to %pfwP - False link\n",
|
||||
sup_handle);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
/*
|
||||
* Can't check for cycles or no cycles. So let's try
|
||||
* again later.
|
||||
*/
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
out:
|
||||
put_device(sup_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __fw_devlink_link_to_consumers - Create device links to consumers of a device
|
||||
* @dev - Device that needs to be linked to its consumers
|
||||
*
|
||||
* This function looks at all the consumer fwnodes of @dev and creates device
|
||||
* links between the consumer device and @dev (supplier).
|
||||
*
|
||||
* If the consumer device has not been added yet, then this function creates a
|
||||
* SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
|
||||
* of the consumer fwnode. This is necessary to make sure @dev doesn't get a
|
||||
* sync_state() callback before the real consumer device gets to be added and
|
||||
* then probed.
|
||||
*
|
||||
* Once device links are created from the real consumer to @dev (supplier), the
|
||||
* fwnode links are deleted.
|
||||
*/
|
||||
static void __fw_devlink_link_to_consumers(struct device *dev)
|
||||
{
|
||||
struct fwnode_handle *fwnode = dev->fwnode;
|
||||
struct fwnode_link *link, *tmp;
|
||||
|
||||
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
|
||||
u32 dl_flags = fw_devlink_get_flags();
|
||||
struct device *con_dev;
|
||||
bool own_link = true;
|
||||
int ret;
|
||||
|
||||
con_dev = get_dev_from_fwnode(link->consumer);
|
||||
/*
|
||||
* If consumer device is not available yet, make a "proxy"
|
||||
* SYNC_STATE_ONLY link from the consumer's parent device to
|
||||
* the supplier device. This is necessary to make sure the
|
||||
* supplier doesn't get a sync_state() callback before the real
|
||||
* consumer can create a device link to the supplier.
|
||||
*
|
||||
* This proxy link step is needed to handle the case where the
|
||||
* consumer's parent device is added before the supplier.
|
||||
*/
|
||||
if (!con_dev) {
|
||||
con_dev = fwnode_get_next_parent_dev(link->consumer);
|
||||
/*
|
||||
* However, if the consumer's parent device is also the
|
||||
* parent of the supplier, don't create a
|
||||
* consumer-supplier link from the parent to its child
|
||||
* device. Such a dependency is impossible.
|
||||
*/
|
||||
if (con_dev &&
|
||||
fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
|
||||
put_device(con_dev);
|
||||
con_dev = NULL;
|
||||
} else {
|
||||
own_link = false;
|
||||
dl_flags = DL_FLAG_SYNC_STATE_ONLY;
|
||||
}
|
||||
}
|
||||
|
||||
if (!con_dev)
|
||||
continue;
|
||||
|
||||
ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
|
||||
put_device(con_dev);
|
||||
if (!own_link || ret == -EAGAIN)
|
||||
continue;
|
||||
|
||||
list_del(&link->s_hook);
|
||||
list_del(&link->c_hook);
|
||||
kfree(link);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
|
||||
* @dev - The consumer device that needs to be linked to its suppliers
|
||||
* @fwnode - Root of the fwnode tree that is used to create device links
|
||||
*
|
||||
* This function looks at all the supplier fwnodes of fwnode tree rooted at
|
||||
* @fwnode and creates device links between @dev (consumer) and all the
|
||||
* supplier devices of the entire fwnode tree at @fwnode.
|
||||
*
|
||||
* The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
|
||||
* and the real suppliers of @dev. Once these device links are created, the
|
||||
* fwnode links are deleted. When such device links are successfully created,
|
||||
* this function is called recursively on those supplier devices. This is
|
||||
* needed to detect and break some invalid cycles in fwnode links. See
|
||||
* fw_devlink_create_devlink() for more details.
|
||||
*
|
||||
* In addition, it also looks at all the suppliers of the entire fwnode tree
|
||||
* because some of the child devices of @dev that have not been added yet
|
||||
* (because @dev hasn't probed) might already have their suppliers added to
|
||||
* driver core. So, this function creates SYNC_STATE_ONLY device links between
|
||||
* @dev (consumer) and these suppliers to make sure they don't execute their
|
||||
* sync_state() callbacks before these child devices have a chance to create
|
||||
* their device links. The fwnode links that correspond to the child devices
|
||||
* aren't delete because they are needed later to create the device links
|
||||
* between the real consumer and supplier devices.
|
||||
*/
|
||||
static void __fw_devlink_link_to_suppliers(struct device *dev,
|
||||
struct fwnode_handle *fwnode)
|
||||
{
|
||||
bool own_link = (dev->fwnode == fwnode);
|
||||
struct fwnode_link *link, *tmp;
|
||||
struct fwnode_handle *child = NULL;
|
||||
u32 dl_flags;
|
||||
|
||||
if (own_link)
|
||||
dl_flags = fw_devlink_get_flags();
|
||||
else
|
||||
dl_flags = DL_FLAG_SYNC_STATE_ONLY;
|
||||
|
||||
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
|
||||
int ret;
|
||||
struct device *sup_dev;
|
||||
struct fwnode_handle *sup = link->supplier;
|
||||
|
||||
ret = fw_devlink_create_devlink(dev, sup, dl_flags);
|
||||
if (!own_link || ret == -EAGAIN)
|
||||
continue;
|
||||
|
||||
list_del(&link->s_hook);
|
||||
list_del(&link->c_hook);
|
||||
kfree(link);
|
||||
|
||||
/* If no device link was created, nothing more to do. */
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If a device link was successfully created to a supplier, we
|
||||
* now need to try and link the supplier to all its suppliers.
|
||||
*
|
||||
* This is needed to detect and delete false dependencies in
|
||||
* fwnode links that haven't been converted to a device link
|
||||
* yet. See comments in fw_devlink_create_devlink() for more
|
||||
* details on the false dependency.
|
||||
*
|
||||
* Without deleting these false dependencies, some devices will
|
||||
* never probe because they'll keep waiting for their false
|
||||
* dependency fwnode links to be converted to device links.
|
||||
*/
|
||||
sup_dev = get_dev_from_fwnode(sup);
|
||||
__fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
|
||||
put_device(sup_dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
|
||||
* all the descendants. This proxy link step is needed to handle the
|
||||
* case where the supplier is added before the consumer's parent device
|
||||
* (@dev).
|
||||
*/
|
||||
while ((child = fwnode_get_next_available_child_node(fwnode, child)))
|
||||
__fw_devlink_link_to_suppliers(dev, child);
|
||||
}
|
||||
|
||||
static void fw_devlink_link_device(struct device *dev)
|
||||
{
|
||||
int fw_ret;
|
||||
struct fwnode_handle *fwnode = dev->fwnode;
|
||||
|
||||
if (!fw_devlink_flags)
|
||||
return;
|
||||
|
||||
mutex_lock(&defer_fw_devlink_lock);
|
||||
if (!defer_fw_devlink_count)
|
||||
device_link_add_missing_supplier_links();
|
||||
fw_devlink_parse_fwtree(fwnode);
|
||||
|
||||
/*
|
||||
* The device's fwnode not having add_links() doesn't affect if other
|
||||
* consumers can find this device as a supplier. So, this check is
|
||||
* intentionally placed after device_link_add_missing_supplier_links().
|
||||
*/
|
||||
if (!fwnode_has_op(dev->fwnode, add_links))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If fw_devlink is being deferred, assume all devices have mandatory
|
||||
* suppliers they need to link to later. Then, when the fw_devlink is
|
||||
* resumed, all these devices will get a chance to try and link to any
|
||||
* suppliers they have.
|
||||
*/
|
||||
if (!defer_fw_devlink_count) {
|
||||
fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
|
||||
if (fw_ret == -ENODEV && fw_devlink_is_permissive())
|
||||
fw_ret = -EAGAIN;
|
||||
} else {
|
||||
fw_ret = -ENODEV;
|
||||
/*
|
||||
* defer_hook is not used to add device to deferred_sync list
|
||||
* until device is bound. Since deferred fw devlink also blocks
|
||||
* probing, same list hook can be used for deferred_fw_devlink.
|
||||
*/
|
||||
list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink);
|
||||
}
|
||||
|
||||
if (fw_ret == -ENODEV)
|
||||
device_link_wait_for_mandatory_supplier(dev);
|
||||
else if (fw_ret)
|
||||
device_link_wait_for_optional_supplier(dev);
|
||||
|
||||
out:
|
||||
mutex_unlock(&defer_fw_devlink_lock);
|
||||
mutex_lock(&fwnode_link_lock);
|
||||
__fw_devlink_link_to_consumers(dev);
|
||||
__fw_devlink_link_to_suppliers(dev, fwnode);
|
||||
mutex_unlock(&fwnode_link_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fw_devlink_pause - Pause parsing of fwnode to create device links
|
||||
*
|
||||
* Calling this function defers any fwnode parsing to create device links until
|
||||
* fw_devlink_resume() is called. Both these functions are ref counted and the
|
||||
* caller needs to match the calls.
|
||||
*
|
||||
* While fw_devlink is paused:
|
||||
* - Any device that is added won't have its fwnode parsed to create device
|
||||
* links.
|
||||
* - The probe of the device will also be deferred during this period.
|
||||
* - Any devices that were already added, but waiting for suppliers won't be
|
||||
* able to link to newly added devices.
|
||||
*
|
||||
* Once fw_devlink_resume():
|
||||
* - All the fwnodes that was not parsed will be parsed.
|
||||
* - All the devices that were deferred probing will be reattempted if they
|
||||
* aren't waiting for any more suppliers.
|
||||
*
|
||||
* This pair of functions, is mainly meant to optimize the parsing of fwnodes
|
||||
* when a lot of devices that need to link to each other are added in a short
|
||||
* interval of time. For example, adding all the top level devices in a system.
|
||||
*
|
||||
* For example, if N devices are added and:
|
||||
* - All the consumers are added before their suppliers
|
||||
* - All the suppliers of the N devices are part of the N devices
|
||||
*
|
||||
* Then:
|
||||
*
|
||||
* - With the use of fw_devlink_pause() and fw_devlink_resume(), each device
|
||||
* will only need one parsing of its fwnode because it is guaranteed to find
|
||||
* all the supplier devices already registered and ready to link to. It won't
|
||||
* have to do another pass later to find one or more suppliers it couldn't
|
||||
* find in the first parse of the fwnode. So, we'll only need O(N) fwnode
|
||||
* parses.
|
||||
*
|
||||
* - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would
|
||||
* end up doing O(N^2) parses of fwnodes because every device that's added is
|
||||
* guaranteed to trigger a parse of the fwnode of every device added before
|
||||
* it. This O(N^2) parse is made worse by the fact that when a fwnode of a
|
||||
* device is parsed, all it descendant devices might need to have their
|
||||
* fwnodes parsed too (even if the devices themselves aren't added).
|
||||
*/
|
||||
void fw_devlink_pause(void)
|
||||
{
|
||||
mutex_lock(&defer_fw_devlink_lock);
|
||||
defer_fw_devlink_count++;
|
||||
mutex_unlock(&defer_fw_devlink_lock);
|
||||
}
|
||||
|
||||
/** fw_devlink_resume - Resume parsing of fwnode to create device links
|
||||
*
|
||||
* This function is used in conjunction with fw_devlink_pause() and is ref
|
||||
* counted. See documentation for fw_devlink_pause() for more details.
|
||||
*/
|
||||
void fw_devlink_resume(void)
|
||||
{
|
||||
struct device *dev, *tmp;
|
||||
LIST_HEAD(probe_list);
|
||||
|
||||
mutex_lock(&defer_fw_devlink_lock);
|
||||
if (!defer_fw_devlink_count) {
|
||||
WARN(true, "Unmatched fw_devlink pause/resume!");
|
||||
goto out;
|
||||
}
|
||||
|
||||
defer_fw_devlink_count--;
|
||||
if (defer_fw_devlink_count)
|
||||
goto out;
|
||||
|
||||
device_link_add_missing_supplier_links();
|
||||
list_splice_tail_init(&deferred_fw_devlink, &probe_list);
|
||||
out:
|
||||
mutex_unlock(&defer_fw_devlink_lock);
|
||||
|
||||
/*
|
||||
* bus_probe_device() can cause new devices to get added and they'll
|
||||
* try to grab defer_fw_devlink_lock. So, this needs to be done outside
|
||||
* the defer_fw_devlink_lock.
|
||||
*/
|
||||
list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) {
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
bus_probe_device(dev);
|
||||
}
|
||||
}
|
||||
/* Device links support end. */
|
||||
|
||||
int (*platform_notify)(struct device *dev) = NULL;
|
||||
|
@ -2196,7 +2352,7 @@ static int device_add_attrs(struct device *dev)
|
|||
goto err_remove_dev_groups;
|
||||
}
|
||||
|
||||
if (fw_devlink_flags && !fw_devlink_is_permissive()) {
|
||||
if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
|
||||
error = device_create_file(dev, &dev_attr_waiting_for_supplier);
|
||||
if (error)
|
||||
goto err_remove_dev_online;
|
||||
|
@ -2427,8 +2583,7 @@ void device_initialize(struct device *dev)
|
|||
#endif
|
||||
INIT_LIST_HEAD(&dev->links.consumers);
|
||||
INIT_LIST_HEAD(&dev->links.suppliers);
|
||||
INIT_LIST_HEAD(&dev->links.needs_suppliers);
|
||||
INIT_LIST_HEAD(&dev->links.defer_hook);
|
||||
INIT_LIST_HEAD(&dev->links.defer_sync);
|
||||
dev->links.status = DL_DEV_NO_DRIVER;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_initialize);
|
||||
|
|
|
@ -370,6 +370,13 @@ static void driver_bound(struct device *dev)
|
|||
|
||||
device_pm_check_callbacks(dev);
|
||||
|
||||
/*
|
||||
* Reorder successfully probed devices to the end of the device list.
|
||||
* This ensures that suspend/resume order matches probe order, which
|
||||
* is usually what drivers rely on.
|
||||
*/
|
||||
device_pm_move_to_tail(dev);
|
||||
|
||||
/*
|
||||
* Make sure the device is no longer in one of the deferred lists and
|
||||
* kick off retrying all pending devices
|
||||
|
@ -717,7 +724,7 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
|
|||
*
|
||||
* If the device has a parent, runtime-resume the parent before driver probing.
|
||||
*/
|
||||
int driver_probe_device(struct device_driver *drv, struct device *dev)
|
||||
static int driver_probe_device(struct device_driver *drv, struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid
|
|||
EXPORT_SYMBOL_GPL(__devres_alloc_node);
|
||||
#else
|
||||
/**
|
||||
* devres_alloc - Allocate device resource data
|
||||
* devres_alloc_node - Allocate device resource data
|
||||
* @release: Release function devres will be associated with
|
||||
* @size: Allocation size
|
||||
* @gfp: Allocation flags
|
||||
|
|
|
@ -128,7 +128,7 @@ static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
|
|||
}
|
||||
|
||||
/**
|
||||
* firmware_timeout_store() - set number of seconds to wait for firmware
|
||||
* timeout_store() - set number of seconds to wait for firmware
|
||||
* @class: device class pointer
|
||||
* @attr: device attribute pointer
|
||||
* @buf: buffer to scan for timeout value
|
||||
|
|
|
@ -63,6 +63,21 @@ struct resource *platform_get_resource(struct platform_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(platform_get_resource);
|
||||
|
||||
struct resource *platform_get_mem_or_io(struct platform_device *dev,
|
||||
unsigned int num)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < dev->num_resources; i++) {
|
||||
struct resource *r = &dev->resource[i];
|
||||
|
||||
if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
|
||||
return r;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
|
||||
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
/**
|
||||
* devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
|
||||
|
@ -743,62 +758,6 @@ err:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(platform_device_register_full);
|
||||
|
||||
static int platform_drv_probe(struct device *_dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
int ret;
|
||||
|
||||
ret = of_clk_set_defaults(_dev->of_node, false);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = dev_pm_domain_attach(_dev, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (drv->probe) {
|
||||
ret = drv->probe(dev);
|
||||
if (ret)
|
||||
dev_pm_domain_detach(_dev, true);
|
||||
}
|
||||
|
||||
out:
|
||||
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
|
||||
dev_warn(_dev, "probe deferral not supported\n");
|
||||
ret = -ENXIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int platform_drv_probe_fail(struct device *_dev)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int platform_drv_remove(struct device *_dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
int ret = 0;
|
||||
|
||||
if (drv->remove)
|
||||
ret = drv->remove(dev);
|
||||
dev_pm_domain_detach(_dev, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void platform_drv_shutdown(struct device *_dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
|
||||
if (drv->shutdown)
|
||||
drv->shutdown(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* __platform_driver_register - register a driver for platform-level devices
|
||||
* @drv: platform driver structure
|
||||
|
@ -809,9 +768,6 @@ int __platform_driver_register(struct platform_driver *drv,
|
|||
{
|
||||
drv->driver.owner = owner;
|
||||
drv->driver.bus = &platform_bus_type;
|
||||
drv->driver.probe = platform_drv_probe;
|
||||
drv->driver.remove = platform_drv_remove;
|
||||
drv->driver.shutdown = platform_drv_shutdown;
|
||||
|
||||
return driver_register(&drv->driver);
|
||||
}
|
||||
|
@ -827,6 +783,11 @@ void platform_driver_unregister(struct platform_driver *drv)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(platform_driver_unregister);
|
||||
|
||||
static int platform_probe_fail(struct platform_device *pdev)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* __platform_driver_probe - register driver for non-hotpluggable device
|
||||
* @drv: platform driver structure
|
||||
|
@ -887,10 +848,9 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
|
|||
* new devices fail.
|
||||
*/
|
||||
spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
|
||||
drv->probe = NULL;
|
||||
drv->probe = platform_probe_fail;
|
||||
if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
|
||||
retval = -ENODEV;
|
||||
drv->driver.probe = platform_drv_probe_fail;
|
||||
spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
|
||||
|
||||
if (code != retval)
|
||||
|
@ -1017,129 +977,6 @@ void platform_unregister_drivers(struct platform_driver * const *drivers,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(platform_unregister_drivers);
|
||||
|
||||
/* modalias support enables more hands-off userspace setup:
|
||||
* (a) environment variable lets new-style hotplug events work once system is
|
||||
* fully running: "modprobe $MODALIAS"
|
||||
* (b) sysfs attribute lets new-style coldplug recover from hotplug events
|
||||
* mishandled before system is fully running: "modprobe $(cat modalias)"
|
||||
*/
|
||||
static ssize_t modalias_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
int len;
|
||||
|
||||
len = of_device_modalias(dev, buf, PAGE_SIZE);
|
||||
if (len != -ENODEV)
|
||||
return len;
|
||||
|
||||
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
|
||||
if (len != -ENODEV)
|
||||
return len;
|
||||
|
||||
return sysfs_emit(buf, "platform:%s\n", pdev->name);
|
||||
}
|
||||
static DEVICE_ATTR_RO(modalias);
|
||||
|
||||
static ssize_t driver_override_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
char *driver_override, *old, *cp;
|
||||
|
||||
/* We need to keep extra room for a newline */
|
||||
if (count >= (PAGE_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
driver_override = kstrndup(buf, count, GFP_KERNEL);
|
||||
if (!driver_override)
|
||||
return -ENOMEM;
|
||||
|
||||
cp = strchr(driver_override, '\n');
|
||||
if (cp)
|
||||
*cp = '\0';
|
||||
|
||||
device_lock(dev);
|
||||
old = pdev->driver_override;
|
||||
if (strlen(driver_override)) {
|
||||
pdev->driver_override = driver_override;
|
||||
} else {
|
||||
kfree(driver_override);
|
||||
pdev->driver_override = NULL;
|
||||
}
|
||||
device_unlock(dev);
|
||||
|
||||
kfree(old);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t driver_override_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
ssize_t len;
|
||||
|
||||
device_lock(dev);
|
||||
len = sysfs_emit(buf, "%s\n", pdev->driver_override);
|
||||
device_unlock(dev);
|
||||
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(driver_override);
|
||||
|
||||
static ssize_t numa_node_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d\n", dev_to_node(dev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(numa_node);
|
||||
|
||||
static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
|
||||
int n)
|
||||
{
|
||||
struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
||||
|
||||
if (a == &dev_attr_numa_node.attr &&
|
||||
dev_to_node(dev) == NUMA_NO_NODE)
|
||||
return 0;
|
||||
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
static struct attribute *platform_dev_attrs[] = {
|
||||
&dev_attr_modalias.attr,
|
||||
&dev_attr_numa_node.attr,
|
||||
&dev_attr_driver_override.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group platform_dev_group = {
|
||||
.attrs = platform_dev_attrs,
|
||||
.is_visible = platform_dev_attrs_visible,
|
||||
};
|
||||
__ATTRIBUTE_GROUPS(platform_dev);
|
||||
|
||||
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
int rc;
|
||||
|
||||
/* Some devices have extra OF data and an OF-style MODALIAS */
|
||||
rc = of_device_uevent_modalias(dev, env);
|
||||
if (rc != -ENODEV)
|
||||
return rc;
|
||||
|
||||
rc = acpi_device_uevent_modalias(dev, env);
|
||||
if (rc != -ENODEV)
|
||||
return rc;
|
||||
|
||||
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
|
||||
pdev->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id *platform_match_id(
|
||||
const struct platform_device_id *id,
|
||||
struct platform_device *pdev)
|
||||
|
@ -1154,44 +991,6 @@ static const struct platform_device_id *platform_match_id(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* platform_match - bind platform device to platform driver.
|
||||
* @dev: device.
|
||||
* @drv: driver.
|
||||
*
|
||||
* Platform device IDs are assumed to be encoded like this:
|
||||
* "<name><instance>", where <name> is a short description of the type of
|
||||
* device, like "pci" or "floppy", and <instance> is the enumerated
|
||||
* instance of the device, like '0' or '42'. Driver IDs are simply
|
||||
* "<name>". So, extract the <name> from the platform_device structure,
|
||||
* and compare it against the name of the driver. Return whether they match
|
||||
* or not.
|
||||
*/
|
||||
static int platform_match(struct device *dev, struct device_driver *drv)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct platform_driver *pdrv = to_platform_driver(drv);
|
||||
|
||||
/* When driver_override is set, only bind to the matching driver */
|
||||
if (pdev->driver_override)
|
||||
return !strcmp(pdev->driver_override, drv->name);
|
||||
|
||||
/* Attempt an OF style match first */
|
||||
if (of_driver_match_device(dev, drv))
|
||||
return 1;
|
||||
|
||||
/* Then try ACPI style match */
|
||||
if (acpi_driver_match_device(dev, drv))
|
||||
return 1;
|
||||
|
||||
/* Then try to match against the id table */
|
||||
if (pdrv->id_table)
|
||||
return platform_match_id(pdrv->id_table, pdev) != NULL;
|
||||
|
||||
/* fall-back to driver name match */
|
||||
return (strcmp(pdev->name, drv->name) == 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
|
||||
|
@ -1336,6 +1135,234 @@ int platform_pm_restore(struct device *dev)
|
|||
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
/* modalias support enables more hands-off userspace setup:
|
||||
* (a) environment variable lets new-style hotplug events work once system is
|
||||
* fully running: "modprobe $MODALIAS"
|
||||
* (b) sysfs attribute lets new-style coldplug recover from hotplug events
|
||||
* mishandled before system is fully running: "modprobe $(cat modalias)"
|
||||
*/
|
||||
static ssize_t modalias_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
int len;
|
||||
|
||||
len = of_device_modalias(dev, buf, PAGE_SIZE);
|
||||
if (len != -ENODEV)
|
||||
return len;
|
||||
|
||||
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
|
||||
if (len != -ENODEV)
|
||||
return len;
|
||||
|
||||
return sysfs_emit(buf, "platform:%s\n", pdev->name);
|
||||
}
|
||||
static DEVICE_ATTR_RO(modalias);
|
||||
|
||||
static ssize_t numa_node_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d\n", dev_to_node(dev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(numa_node);
|
||||
|
||||
static ssize_t driver_override_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
ssize_t len;
|
||||
|
||||
device_lock(dev);
|
||||
len = sysfs_emit(buf, "%s\n", pdev->driver_override);
|
||||
device_unlock(dev);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t driver_override_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
char *driver_override, *old, *cp;
|
||||
|
||||
/* We need to keep extra room for a newline */
|
||||
if (count >= (PAGE_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
driver_override = kstrndup(buf, count, GFP_KERNEL);
|
||||
if (!driver_override)
|
||||
return -ENOMEM;
|
||||
|
||||
cp = strchr(driver_override, '\n');
|
||||
if (cp)
|
||||
*cp = '\0';
|
||||
|
||||
device_lock(dev);
|
||||
old = pdev->driver_override;
|
||||
if (strlen(driver_override)) {
|
||||
pdev->driver_override = driver_override;
|
||||
} else {
|
||||
kfree(driver_override);
|
||||
pdev->driver_override = NULL;
|
||||
}
|
||||
device_unlock(dev);
|
||||
|
||||
kfree(old);
|
||||
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(driver_override);
|
||||
|
||||
static struct attribute *platform_dev_attrs[] = {
|
||||
&dev_attr_modalias.attr,
|
||||
&dev_attr_numa_node.attr,
|
||||
&dev_attr_driver_override.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
|
||||
int n)
|
||||
{
|
||||
struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
||||
|
||||
if (a == &dev_attr_numa_node.attr &&
|
||||
dev_to_node(dev) == NUMA_NO_NODE)
|
||||
return 0;
|
||||
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group platform_dev_group = {
|
||||
.attrs = platform_dev_attrs,
|
||||
.is_visible = platform_dev_attrs_visible,
|
||||
};
|
||||
__ATTRIBUTE_GROUPS(platform_dev);
|
||||
|
||||
|
||||
/**
|
||||
* platform_match - bind platform device to platform driver.
|
||||
* @dev: device.
|
||||
* @drv: driver.
|
||||
*
|
||||
* Platform device IDs are assumed to be encoded like this:
|
||||
* "<name><instance>", where <name> is a short description of the type of
|
||||
* device, like "pci" or "floppy", and <instance> is the enumerated
|
||||
* instance of the device, like '0' or '42'. Driver IDs are simply
|
||||
* "<name>". So, extract the <name> from the platform_device structure,
|
||||
* and compare it against the name of the driver. Return whether they match
|
||||
* or not.
|
||||
*/
|
||||
static int platform_match(struct device *dev, struct device_driver *drv)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct platform_driver *pdrv = to_platform_driver(drv);
|
||||
|
||||
/* When driver_override is set, only bind to the matching driver */
|
||||
if (pdev->driver_override)
|
||||
return !strcmp(pdev->driver_override, drv->name);
|
||||
|
||||
/* Attempt an OF style match first */
|
||||
if (of_driver_match_device(dev, drv))
|
||||
return 1;
|
||||
|
||||
/* Then try ACPI style match */
|
||||
if (acpi_driver_match_device(dev, drv))
|
||||
return 1;
|
||||
|
||||
/* Then try to match against the id table */
|
||||
if (pdrv->id_table)
|
||||
return platform_match_id(pdrv->id_table, pdev) != NULL;
|
||||
|
||||
/* fall-back to driver name match */
|
||||
return (strcmp(pdev->name, drv->name) == 0);
|
||||
}
|
||||
|
||||
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
int rc;
|
||||
|
||||
/* Some devices have extra OF data and an OF-style MODALIAS */
|
||||
rc = of_device_uevent_modalias(dev, env);
|
||||
if (rc != -ENODEV)
|
||||
return rc;
|
||||
|
||||
rc = acpi_device_uevent_modalias(dev, env);
|
||||
if (rc != -ENODEV)
|
||||
return rc;
|
||||
|
||||
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
|
||||
pdev->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int platform_probe(struct device *_dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* A driver registered using platform_driver_probe() cannot be bound
|
||||
* again later because the probe function usually lives in __init code
|
||||
* and so is gone. For these drivers .probe is set to
|
||||
* platform_probe_fail in __platform_driver_probe(). Don't even prepare
|
||||
* clocks and PM domains for these to match the traditional behaviour.
|
||||
*/
|
||||
if (unlikely(drv->probe == platform_probe_fail))
|
||||
return -ENXIO;
|
||||
|
||||
ret = of_clk_set_defaults(_dev->of_node, false);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = dev_pm_domain_attach(_dev, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (drv->probe) {
|
||||
ret = drv->probe(dev);
|
||||
if (ret)
|
||||
dev_pm_domain_detach(_dev, true);
|
||||
}
|
||||
|
||||
out:
|
||||
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
|
||||
dev_warn(_dev, "probe deferral not supported\n");
|
||||
ret = -ENXIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int platform_remove(struct device *_dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
int ret = 0;
|
||||
|
||||
if (drv->remove)
|
||||
ret = drv->remove(dev);
|
||||
dev_pm_domain_detach(_dev, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void platform_shutdown(struct device *_dev)
|
||||
{
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
struct platform_driver *drv;
|
||||
|
||||
if (!_dev->driver)
|
||||
return;
|
||||
|
||||
drv = to_platform_driver(_dev->driver);
|
||||
if (drv->shutdown)
|
||||
drv->shutdown(dev);
|
||||
}
|
||||
|
||||
|
||||
int platform_dma_configure(struct device *dev)
|
||||
{
|
||||
enum dev_dma_attr attr;
|
||||
|
@ -1362,6 +1389,9 @@ struct bus_type platform_bus_type = {
|
|||
.dev_groups = platform_dev_groups,
|
||||
.match = platform_match,
|
||||
.uevent = platform_uevent,
|
||||
.probe = platform_probe,
|
||||
.remove = platform_remove,
|
||||
.shutdown = platform_shutdown,
|
||||
.dma_configure = platform_dma_configure,
|
||||
.pm = &platform_dev_pm_ops,
|
||||
};
|
||||
|
|
|
@ -614,6 +614,31 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
|
||||
|
||||
/**
|
||||
* fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
|
||||
* @fwnode: firmware node
|
||||
*
|
||||
* Given a firmware node (@fwnode), this function finds its closest ancestor
|
||||
* firmware node that has a corresponding struct device and returns that struct
|
||||
* device.
|
||||
*
|
||||
* The caller of this function is expected to call put_device() on the returned
|
||||
* device when they are done.
|
||||
*/
|
||||
struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct device *dev = NULL;
|
||||
|
||||
fwnode_handle_get(fwnode);
|
||||
do {
|
||||
fwnode = fwnode_get_next_parent(fwnode);
|
||||
if (fwnode)
|
||||
dev = get_dev_from_fwnode(fwnode);
|
||||
} while (fwnode && !dev);
|
||||
fwnode_handle_put(fwnode);
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* fwnode_count_parents - Return the number of parents a node has
|
||||
* @fwnode: The node the parents of which are to be counted
|
||||
|
@ -660,6 +685,33 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
|
||||
|
||||
/**
|
||||
* fwnode_is_ancestor_of - Test if @test_ancestor is ancestor of @test_child
|
||||
* @test_ancestor: Firmware which is tested for being an ancestor
|
||||
* @test_child: Firmware which is tested for being the child
|
||||
*
|
||||
* A node is considered an ancestor of itself too.
|
||||
*
|
||||
* Returns true if @test_ancestor is an ancestor of @test_child.
|
||||
* Otherwise, returns false.
|
||||
*/
|
||||
bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor,
|
||||
struct fwnode_handle *test_child)
|
||||
{
|
||||
if (!test_ancestor)
|
||||
return false;
|
||||
|
||||
fwnode_handle_get(test_child);
|
||||
while (test_child) {
|
||||
if (test_child == test_ancestor) {
|
||||
fwnode_handle_put(test_child);
|
||||
return true;
|
||||
}
|
||||
test_child = fwnode_get_next_parent(test_child);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fwnode_get_next_child_node - Return the next child node handle for a node
|
||||
* @fwnode: Firmware node to find the next child node for.
|
||||
|
|
|
@ -168,7 +168,7 @@ out1:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(soc_device_register);
|
||||
|
||||
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
|
||||
/* Ensure soc_dev->attr is freed after calling soc_device_unregister. */
|
||||
void soc_device_unregister(struct soc_device *soc_dev)
|
||||
{
|
||||
device_unregister(&soc_dev->dev);
|
||||
|
|
|
@ -653,7 +653,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
|
|||
swnode->parent = parent;
|
||||
swnode->allocated = allocated;
|
||||
swnode->kobj.kset = swnode_kset;
|
||||
swnode->fwnode.ops = &software_node_ops;
|
||||
fwnode_init(&swnode->fwnode, &software_node_ops);
|
||||
|
||||
ida_init(&swnode->child_ids);
|
||||
INIT_LIST_HEAD(&swnode->entry);
|
||||
|
|
|
@ -316,11 +316,9 @@ static struct device_node *find_pci_overlap_node(void)
|
|||
* resource reservation conflict on the memory window that the efifb
|
||||
* framebuffer steals from the PCIe host bridge.
|
||||
*/
|
||||
static int efifb_add_links(const struct fwnode_handle *fwnode,
|
||||
struct device *dev)
|
||||
static int efifb_add_links(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct device_node *sup_np;
|
||||
struct device *sup_dev;
|
||||
|
||||
sup_np = find_pci_overlap_node();
|
||||
|
||||
|
@ -331,27 +329,9 @@ static int efifb_add_links(const struct fwnode_handle *fwnode,
|
|||
if (!sup_np)
|
||||
return 0;
|
||||
|
||||
sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
|
||||
fwnode_link_add(fwnode, of_fwnode_handle(sup_np));
|
||||
of_node_put(sup_np);
|
||||
|
||||
/*
|
||||
* Return -ENODEV if the PCI graphics controller device hasn't been
|
||||
* registered yet. This ensures that efifb isn't allowed to probe
|
||||
* and this function is retried again when new devices are
|
||||
* registered.
|
||||
*/
|
||||
if (!sup_dev)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If this fails, retrying this function at a later point won't
|
||||
* change anything. So, don't return an error after this.
|
||||
*/
|
||||
if (!device_link_add(dev, sup_dev, fw_devlink_get_flags()))
|
||||
dev_warn(dev, "device_link_add() failed\n");
|
||||
|
||||
put_device(sup_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -359,9 +339,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
|
|||
.add_links = efifb_add_links,
|
||||
};
|
||||
|
||||
static struct fwnode_handle efifb_fwnode = {
|
||||
.ops = &efifb_fwnode_ops,
|
||||
};
|
||||
static struct fwnode_handle efifb_fwnode;
|
||||
|
||||
static int __init register_gop_device(void)
|
||||
{
|
||||
|
@ -375,8 +353,10 @@ static int __init register_gop_device(void)
|
|||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI))
|
||||
if (IS_ENABLED(CONFIG_PCI)) {
|
||||
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
|
||||
pd->dev.fwnode = &efifb_fwnode;
|
||||
}
|
||||
|
||||
err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
|
||||
if (err)
|
||||
|
|
|
@ -8,14 +8,14 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <uapi/misc/pvpanic.h>
|
||||
|
||||
static void __iomem *base;
|
||||
|
@ -49,101 +49,16 @@ static struct notifier_block pvpanic_panic_nb = {
|
|||
.priority = 1, /* let this called before broken drm_fb_helper */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int pvpanic_add(struct acpi_device *device);
|
||||
static int pvpanic_remove(struct acpi_device *device);
|
||||
|
||||
static const struct acpi_device_id pvpanic_device_ids[] = {
|
||||
{ "QEMU0001", 0 },
|
||||
{ "", 0 }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
|
||||
|
||||
static struct acpi_driver pvpanic_driver = {
|
||||
.name = "pvpanic",
|
||||
.class = "QEMU",
|
||||
.ids = pvpanic_device_ids,
|
||||
.ops = {
|
||||
.add = pvpanic_add,
|
||||
.remove = pvpanic_remove,
|
||||
},
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static acpi_status
|
||||
pvpanic_walk_resources(struct acpi_resource *res, void *context)
|
||||
{
|
||||
struct resource r;
|
||||
|
||||
if (acpi_dev_resource_io(res, &r)) {
|
||||
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||
base = ioport_map(r.start, resource_size(&r));
|
||||
return AE_OK;
|
||||
#else
|
||||
return AE_ERROR;
|
||||
#endif
|
||||
} else if (acpi_dev_resource_memory(res, &r)) {
|
||||
base = ioremap(r.start, resource_size(&r));
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
return AE_ERROR;
|
||||
}
|
||||
|
||||
static int pvpanic_add(struct acpi_device *device)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = acpi_bus_get_status(device);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!device->status.enabled || !device->status.functional)
|
||||
return -ENODEV;
|
||||
|
||||
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
|
||||
pvpanic_walk_resources, NULL);
|
||||
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&pvpanic_panic_nb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pvpanic_remove(struct acpi_device *device)
|
||||
{
|
||||
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&pvpanic_panic_nb);
|
||||
iounmap(base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pvpanic_register_acpi_driver(void)
|
||||
{
|
||||
return acpi_bus_register_driver(&pvpanic_driver);
|
||||
}
|
||||
|
||||
static void pvpanic_unregister_acpi_driver(void)
|
||||
{
|
||||
acpi_bus_unregister_driver(&pvpanic_driver);
|
||||
}
|
||||
#else
|
||||
static int pvpanic_register_acpi_driver(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void pvpanic_unregister_acpi_driver(void) {}
|
||||
#endif
|
||||
|
||||
static int pvpanic_mmio_probe(struct platform_device *pdev)
|
||||
{
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_mem_or_io(pdev, 0);
|
||||
if (res && resource_type(res) == IORESOURCE_IO)
|
||||
base = devm_ioport_map(dev, res->start, resource_size(res));
|
||||
else
|
||||
base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
|
@ -167,30 +82,19 @@ static const struct of_device_id pvpanic_mmio_match[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct acpi_device_id pvpanic_device_ids[] = {
|
||||
{ "QEMU0001", 0 },
|
||||
{ "", 0 }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
|
||||
|
||||
static struct platform_driver pvpanic_mmio_driver = {
|
||||
.driver = {
|
||||
.name = "pvpanic-mmio",
|
||||
.of_match_table = pvpanic_mmio_match,
|
||||
.acpi_match_table = pvpanic_device_ids,
|
||||
},
|
||||
.probe = pvpanic_mmio_probe,
|
||||
.remove = pvpanic_mmio_remove,
|
||||
};
|
||||
|
||||
static int __init pvpanic_mmio_init(void)
|
||||
{
|
||||
if (acpi_disabled)
|
||||
return platform_driver_register(&pvpanic_mmio_driver);
|
||||
else
|
||||
return pvpanic_register_acpi_driver();
|
||||
}
|
||||
|
||||
static void __exit pvpanic_mmio_exit(void)
|
||||
{
|
||||
if (acpi_disabled)
|
||||
platform_driver_unregister(&pvpanic_mmio_driver);
|
||||
else
|
||||
pvpanic_unregister_acpi_driver();
|
||||
}
|
||||
|
||||
module_init(pvpanic_mmio_init);
|
||||
module_exit(pvpanic_mmio_exit);
|
||||
module_platform_driver(pvpanic_mmio_driver);
|
||||
|
|
|
@ -356,6 +356,7 @@ void of_node_release(struct kobject *kobj)
|
|||
|
||||
property_list_free(node->properties);
|
||||
property_list_free(node->deadprops);
|
||||
fwnode_links_purge(of_fwnode_handle(node));
|
||||
|
||||
kfree(node->full_name);
|
||||
kfree(node->data);
|
||||
|
|
|
@ -538,9 +538,7 @@ static int __init of_platform_default_populate_init(void)
|
|||
}
|
||||
|
||||
/* Populate everything else. */
|
||||
fw_devlink_pause();
|
||||
of_platform_default_populate(NULL, NULL, NULL);
|
||||
fw_devlink_resume();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1038,33 +1038,9 @@ static bool of_is_ancestor_of(struct device_node *test_ancestor,
|
|||
}
|
||||
|
||||
/**
|
||||
* of_get_next_parent_dev - Add device link to supplier from supplier phandle
|
||||
* @np: device tree node
|
||||
*
|
||||
* Given a device tree node (@np), this function finds its closest ancestor
|
||||
* device tree node that has a corresponding struct device.
|
||||
*
|
||||
* The caller of this function is expected to call put_device() on the returned
|
||||
* device when they are done.
|
||||
*/
|
||||
static struct device *of_get_next_parent_dev(struct device_node *np)
|
||||
{
|
||||
struct device *dev = NULL;
|
||||
|
||||
of_node_get(np);
|
||||
do {
|
||||
np = of_get_next_parent(np);
|
||||
if (np)
|
||||
dev = get_dev_from_fwnode(&np->fwnode);
|
||||
} while (np && !dev);
|
||||
of_node_put(np);
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* of_link_to_phandle - Add device link to supplier from supplier phandle
|
||||
* @dev: consumer device
|
||||
* @sup_np: phandle to supplier device tree node
|
||||
* of_link_to_phandle - Add fwnode link to supplier from supplier phandle
|
||||
* @con_np: consumer device tree node
|
||||
* @sup_np: supplier device tree node
|
||||
*
|
||||
* Given a phandle to a supplier device tree node (@sup_np), this function
|
||||
* finds the device that owns the supplier device tree node and creates a
|
||||
|
@ -1074,16 +1050,14 @@ static struct device *of_get_next_parent_dev(struct device_node *np)
|
|||
* cases, it returns an error.
|
||||
*
|
||||
* Returns:
|
||||
* - 0 if link successfully created to supplier
|
||||
* - -EAGAIN if linking to the supplier should be reattempted
|
||||
* - 0 if fwnode link successfully created to supplier
|
||||
* - -EINVAL if the supplier link is invalid and should not be created
|
||||
* - -ENODEV if there is no device that corresponds to the supplier phandle
|
||||
* - -ENODEV if struct device will never be create for supplier
|
||||
*/
|
||||
static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
|
||||
u32 dl_flags)
|
||||
static int of_link_to_phandle(struct device_node *con_np,
|
||||
struct device_node *sup_np)
|
||||
{
|
||||
struct device *sup_dev, *sup_par_dev;
|
||||
int ret = 0;
|
||||
struct device *sup_dev;
|
||||
struct device_node *tmp_np = sup_np;
|
||||
|
||||
of_node_get(sup_np);
|
||||
|
@ -1106,7 +1080,8 @@ static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
|
|||
}
|
||||
|
||||
if (!sup_np) {
|
||||
dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np);
|
||||
pr_debug("Not linking %pOFP to %pOFP - No device\n",
|
||||
con_np, tmp_np);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -1115,53 +1090,30 @@ static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
|
|||
* descendant nodes. By definition, a child node can't be a functional
|
||||
* dependency for the parent node.
|
||||
*/
|
||||
if (of_is_ancestor_of(dev->of_node, sup_np)) {
|
||||
dev_dbg(dev, "Not linking to %pOFP - is descendant\n", sup_np);
|
||||
if (of_is_ancestor_of(con_np, sup_np)) {
|
||||
pr_debug("Not linking %pOFP to %pOFP - is descendant\n",
|
||||
con_np, sup_np);
|
||||
of_node_put(sup_np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't create links to "early devices" that won't have struct devices
|
||||
* created for them.
|
||||
*/
|
||||
sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
|
||||
if (!sup_dev && of_node_check_flag(sup_np, OF_POPULATED)) {
|
||||
/* Early device without struct device. */
|
||||
dev_dbg(dev, "Not linking to %pOFP - No struct device\n",
|
||||
sup_np);
|
||||
pr_debug("Not linking %pOFP to %pOFP - No struct device\n",
|
||||
con_np, sup_np);
|
||||
of_node_put(sup_np);
|
||||
return -ENODEV;
|
||||
} else if (!sup_dev) {
|
||||
/*
|
||||
* DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
|
||||
* cycles. So cycle detection isn't necessary and shouldn't be
|
||||
* done.
|
||||
*/
|
||||
if (dl_flags & DL_FLAG_SYNC_STATE_ONLY) {
|
||||
of_node_put(sup_np);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
sup_par_dev = of_get_next_parent_dev(sup_np);
|
||||
|
||||
if (sup_par_dev && device_is_dependent(dev, sup_par_dev)) {
|
||||
/* Cyclic dependency detected, don't try to link */
|
||||
dev_dbg(dev, "Not linking to %pOFP - cycle detected\n",
|
||||
sup_np);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
/*
|
||||
* Can't check for cycles or no cycles. So let's try
|
||||
* again later.
|
||||
*/
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
of_node_put(sup_np);
|
||||
put_device(sup_par_dev);
|
||||
return ret;
|
||||
}
|
||||
of_node_put(sup_np);
|
||||
if (!device_link_add(dev, sup_dev, dl_flags))
|
||||
ret = -EINVAL;
|
||||
put_device(sup_dev);
|
||||
return ret;
|
||||
|
||||
fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
|
||||
of_node_put(sup_np);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1361,37 +1313,29 @@ static const struct supplier_bindings of_supplier_bindings[] = {
|
|||
* that list phandles to suppliers. If @prop_name isn't one, this function
|
||||
* doesn't do anything.
|
||||
*
|
||||
* If @prop_name is one, this function attempts to create device links from the
|
||||
* consumer device @dev to all the devices of the suppliers listed in
|
||||
* @prop_name.
|
||||
* If @prop_name is one, this function attempts to create fwnode links from the
|
||||
* consumer device tree node @con_np to all the suppliers device tree nodes
|
||||
* listed in @prop_name.
|
||||
*
|
||||
* Any failed attempt to create a device link will NOT result in an immediate
|
||||
* Any failed attempt to create a fwnode link will NOT result in an immediate
|
||||
* return. of_link_property() must create links to all the available supplier
|
||||
* devices even when attempts to create a link to one or more suppliers fail.
|
||||
* device tree nodes even when attempts to create a link to one or more
|
||||
* suppliers fail.
|
||||
*/
|
||||
static int of_link_property(struct device *dev, struct device_node *con_np,
|
||||
const char *prop_name)
|
||||
static int of_link_property(struct device_node *con_np, const char *prop_name)
|
||||
{
|
||||
struct device_node *phandle;
|
||||
const struct supplier_bindings *s = of_supplier_bindings;
|
||||
unsigned int i = 0;
|
||||
bool matched = false;
|
||||
int ret = 0;
|
||||
u32 dl_flags;
|
||||
|
||||
if (dev->of_node == con_np)
|
||||
dl_flags = fw_devlink_get_flags();
|
||||
else
|
||||
dl_flags = DL_FLAG_SYNC_STATE_ONLY;
|
||||
|
||||
/* Do not stop at first failed link, link all available suppliers. */
|
||||
while (!matched && s->parse_prop) {
|
||||
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
|
||||
matched = true;
|
||||
i++;
|
||||
if (of_link_to_phandle(dev, phandle, dl_flags)
|
||||
== -EAGAIN)
|
||||
ret = -EAGAIN;
|
||||
of_link_to_phandle(con_np, phandle);
|
||||
of_node_put(phandle);
|
||||
}
|
||||
s++;
|
||||
|
@ -1399,31 +1343,18 @@ static int of_link_property(struct device *dev, struct device_node *con_np,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int of_link_to_suppliers(struct device *dev,
|
||||
struct device_node *con_np)
|
||||
static int of_fwnode_add_links(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct device_node *child;
|
||||
struct property *p;
|
||||
int ret = 0;
|
||||
struct device_node *con_np = to_of_node(fwnode);
|
||||
|
||||
if (!con_np)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_property_of_node(con_np, p)
|
||||
if (of_link_property(dev, con_np, p->name))
|
||||
ret = -ENODEV;
|
||||
of_link_property(con_np, p->name);
|
||||
|
||||
for_each_available_child_of_node(con_np, child)
|
||||
if (of_link_to_suppliers(dev, child) && !ret)
|
||||
ret = -EAGAIN;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int of_fwnode_add_links(const struct fwnode_handle *fwnode,
|
||||
struct device *dev)
|
||||
{
|
||||
if (unlikely(!is_of_node(fwnode)))
|
||||
return 0;
|
||||
|
||||
return of_link_to_suppliers(dev, to_of_node(fwnode));
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct fwnode_operations of_fwnode_ops = {
|
||||
|
|
|
@ -1614,12 +1614,18 @@ sl811h_probe(struct platform_device *dev)
|
|||
void __iomem *addr_reg;
|
||||
void __iomem *data_reg;
|
||||
int retval;
|
||||
u8 tmp, ioaddr = 0;
|
||||
u8 tmp, ioaddr;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (usb_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
/* the chip may be wired for either kind of addressing */
|
||||
addr = platform_get_mem_or_io(dev, 0);
|
||||
data = platform_get_mem_or_io(dev, 1);
|
||||
if (!addr || !data || resource_type(addr) != resource_type(data))
|
||||
return -ENODEV;
|
||||
|
||||
/* basic sanity checks first. board-specific init logic should
|
||||
* have initialized these three resources and probably board
|
||||
* specific platform_data. we don't probe for IRQs, and do only
|
||||
|
@ -1632,16 +1638,8 @@ sl811h_probe(struct platform_device *dev)
|
|||
irq = ires->start;
|
||||
irqflags = ires->flags & IRQF_TRIGGER_MASK;
|
||||
|
||||
/* the chip may be wired for either kind of addressing */
|
||||
addr = platform_get_resource(dev, IORESOURCE_MEM, 0);
|
||||
data = platform_get_resource(dev, IORESOURCE_MEM, 1);
|
||||
retval = -EBUSY;
|
||||
if (!addr || !data) {
|
||||
addr = platform_get_resource(dev, IORESOURCE_IO, 0);
|
||||
data = platform_get_resource(dev, IORESOURCE_IO, 1);
|
||||
if (!addr || !data)
|
||||
return -ENODEV;
|
||||
ioaddr = 1;
|
||||
ioaddr = resource_type(addr) == IORESOURCE_IO;
|
||||
if (ioaddr) {
|
||||
/*
|
||||
* NOTE: 64-bit resource->start is getting truncated
|
||||
* to avoid compiler warning, assuming that ->start
|
||||
|
|
|
@ -25,19 +25,8 @@ static struct resource *get_platform_resource(struct vfio_platform_device *vdev,
|
|||
int num)
|
||||
{
|
||||
struct platform_device *dev = (struct platform_device *) vdev->opaque;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->num_resources; i++) {
|
||||
struct resource *r = &dev->resource[i];
|
||||
|
||||
if (resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) {
|
||||
if (!num)
|
||||
return r;
|
||||
|
||||
num--;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return platform_get_mem_or_io(dev, num);
|
||||
}
|
||||
|
||||
static int get_platform_irq(struct vfio_platform_device *vdev, int i)
|
||||
|
|
|
@ -604,8 +604,7 @@ const struct dentry_operations kernfs_dops = {
|
|||
*/
|
||||
struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
|
||||
{
|
||||
if (dentry->d_sb->s_op == &kernfs_sops &&
|
||||
!d_really_is_negative(dentry))
|
||||
if (dentry->d_sb->s_op == &kernfs_sops)
|
||||
return kernfs_dentry_node(dentry);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1599,7 +1598,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
|
|||
return error;
|
||||
}
|
||||
|
||||
/* Relationship between s_mode and the DT_xxx types */
|
||||
/* Relationship between mode and the DT_xxx types */
|
||||
static inline unsigned char dt_type(struct kernfs_node *kn)
|
||||
{
|
||||
return (kn->mode >> 12) & 15;
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
|
|||
if (!fwnode)
|
||||
return NULL;
|
||||
|
||||
fwnode->ops = &acpi_static_fwnode_ops;
|
||||
fwnode_init(fwnode, &acpi_static_fwnode_ops);
|
||||
|
||||
return fwnode;
|
||||
}
|
||||
|
|
|
@ -351,19 +351,13 @@ enum dl_dev_state {
|
|||
* struct dev_links_info - Device data related to device links.
|
||||
* @suppliers: List of links to supplier devices.
|
||||
* @consumers: List of links to consumer devices.
|
||||
* @needs_suppliers: Hook to global list of devices waiting for suppliers.
|
||||
* @defer_hook: Hook to global list of devices that have deferred sync_state or
|
||||
* deferred fw_devlink.
|
||||
* @need_for_probe: If needs_suppliers is on a list, this indicates if the
|
||||
* suppliers are needed for probe or not.
|
||||
* @defer_sync: Hook to global list of devices that have deferred sync_state.
|
||||
* @status: Driver status information.
|
||||
*/
|
||||
struct dev_links_info {
|
||||
struct list_head suppliers;
|
||||
struct list_head consumers;
|
||||
struct list_head needs_suppliers;
|
||||
struct list_head defer_hook;
|
||||
bool need_for_probe;
|
||||
struct list_head defer_sync;
|
||||
enum dl_dev_state status;
|
||||
};
|
||||
|
||||
|
|
|
@ -256,6 +256,20 @@ extern void class_destroy(struct class *cls);
|
|||
|
||||
/* This is a #define to keep the compiler from merging different
|
||||
* instances of the __key variable */
|
||||
|
||||
/**
|
||||
* class_create - create a struct class structure
|
||||
* @owner: pointer to the module that is to "own" this struct class
|
||||
* @name: pointer to a string for the name of this class.
|
||||
*
|
||||
* This is used to create a struct class pointer that can then be used
|
||||
* in calls to device_create().
|
||||
*
|
||||
* Returns &struct class pointer on success, or ERR_PTR() on error.
|
||||
*
|
||||
* Note, the pointer created here is to be destroyed when finished by
|
||||
* making a call to class_destroy().
|
||||
*/
|
||||
#define class_create(owner, name) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
|
|
|
@ -10,14 +10,32 @@
|
|||
#define _LINUX_FWNODE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct fwnode_operations;
|
||||
struct device;
|
||||
|
||||
/*
|
||||
* fwnode link flags
|
||||
*
|
||||
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
|
||||
*/
|
||||
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
|
||||
|
||||
struct fwnode_handle {
|
||||
struct fwnode_handle *secondary;
|
||||
const struct fwnode_operations *ops;
|
||||
struct device *dev;
|
||||
struct list_head suppliers;
|
||||
struct list_head consumers;
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
struct fwnode_link {
|
||||
struct fwnode_handle *supplier;
|
||||
struct list_head s_hook;
|
||||
struct fwnode_handle *consumer;
|
||||
struct list_head c_hook;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -68,44 +86,8 @@ struct fwnode_reference_args {
|
|||
* endpoint node.
|
||||
* @graph_get_port_parent: Return the parent node of a port node.
|
||||
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
|
||||
* @add_links: Called after the device corresponding to the fwnode is added
|
||||
* using device_add(). The function is expected to create device
|
||||
* links to all the suppliers of the device that are available at
|
||||
* the time this function is called. The function must NOT stop
|
||||
* at the first failed device link if other unlinked supplier
|
||||
* devices are present in the system. This is necessary for the
|
||||
* driver/bus sync_state() callbacks to work correctly.
|
||||
*
|
||||
* For example, say Device-C depends on suppliers Device-S1 and
|
||||
* Device-S2 and the dependency is listed in that order in the
|
||||
* firmware. Say, S1 gets populated from the firmware after
|
||||
* late_initcall_sync(). Say S2 is populated and probed way
|
||||
* before that in device_initcall(). When C is populated, if this
|
||||
* add_links() function doesn't continue past a "failed linking to
|
||||
* S1" and continue linking C to S2, then S2 will get a
|
||||
* sync_state() callback before C is probed. This is because from
|
||||
* the perspective of S2, C was never a consumer when its
|
||||
* sync_state() evaluation is done. To avoid this, the add_links()
|
||||
* function has to go through all available suppliers of the
|
||||
* device (that corresponds to this fwnode) and link to them
|
||||
* before returning.
|
||||
*
|
||||
* If some suppliers are not yet available (indicated by an error
|
||||
* return value), this function will be called again when other
|
||||
* devices are added to allow creating device links to any newly
|
||||
* available suppliers.
|
||||
*
|
||||
* Return 0 if device links have been successfully created to all
|
||||
* the known suppliers of this device or if the supplier
|
||||
* information is not known.
|
||||
*
|
||||
* Return -ENODEV if the suppliers needed for probing this device
|
||||
* have not been registered yet (because device links can only be
|
||||
* created to devices registered with the driver core).
|
||||
*
|
||||
* Return -EAGAIN if some of the suppliers of this device have not
|
||||
* been registered yet, but none of those suppliers are necessary
|
||||
* for probing the device.
|
||||
* @add_links: Create fwnode links to all the suppliers of the fwnode. Return
|
||||
* zero on success, a negative error code otherwise.
|
||||
*/
|
||||
struct fwnode_operations {
|
||||
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
|
||||
|
@ -145,8 +127,7 @@ struct fwnode_operations {
|
|||
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
|
||||
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
|
||||
struct fwnode_endpoint *endpoint);
|
||||
int (*add_links)(const struct fwnode_handle *fwnode,
|
||||
struct device *dev);
|
||||
int (*add_links)(struct fwnode_handle *fwnode);
|
||||
};
|
||||
|
||||
#define fwnode_has_op(fwnode, op) \
|
||||
|
@ -170,8 +151,16 @@ struct fwnode_operations {
|
|||
} while (false)
|
||||
#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
|
||||
|
||||
static inline void fwnode_init(struct fwnode_handle *fwnode,
|
||||
const struct fwnode_operations *ops)
|
||||
{
|
||||
fwnode->ops = ops;
|
||||
INIT_LIST_HEAD(&fwnode->consumers);
|
||||
INIT_LIST_HEAD(&fwnode->suppliers);
|
||||
}
|
||||
|
||||
extern u32 fw_devlink_get_flags(void);
|
||||
void fw_devlink_pause(void);
|
||||
void fw_devlink_resume(void);
|
||||
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
|
||||
void fwnode_links_purge(struct fwnode_handle *fwnode);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -116,7 +116,7 @@ struct kernfs_elem_attr {
|
|||
* kernfs node is represented by single kernfs_node. Most fields are
|
||||
* private to kernfs and shouldn't be accessed directly by kernfs users.
|
||||
*
|
||||
* As long as s_count reference is held, the kernfs_node itself is
|
||||
* As long as count reference is held, the kernfs_node itself is
|
||||
* accessible. Dereferencing elem or any other outer entity requires
|
||||
* active reference.
|
||||
*/
|
||||
|
|
|
@ -108,7 +108,7 @@ static inline void of_node_init(struct device_node *node)
|
|||
#if defined(CONFIG_OF_KOBJ)
|
||||
kobject_init(&node->kobj, &of_node_ktype);
|
||||
#endif
|
||||
node->fwnode.ops = &of_fwnode_ops;
|
||||
fwnode_init(&node->fwnode, &of_fwnode_ops);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_OF_KOBJ)
|
||||
|
@ -1307,6 +1307,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
|
|||
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
|
||||
static const struct of_device_id __of_table_##name \
|
||||
__used __section("__" #table "_of_table") \
|
||||
__aligned(__alignof__(struct of_device_id)) \
|
||||
= { .compatible = compat, \
|
||||
.data = (fn == (fn_type)NULL) ? fn : fn }
|
||||
#else
|
||||
|
|
|
@ -52,6 +52,9 @@ extern struct device platform_bus;
|
|||
|
||||
extern struct resource *platform_get_resource(struct platform_device *,
|
||||
unsigned int, unsigned int);
|
||||
extern struct resource *platform_get_mem_or_io(struct platform_device *,
|
||||
unsigned int);
|
||||
|
||||
extern struct device *
|
||||
platform_find_device_by_driver(struct device *start,
|
||||
const struct device_driver *drv);
|
||||
|
|
|
@ -85,9 +85,12 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
|
|||
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
|
||||
struct fwnode_handle *fwnode_get_next_parent(
|
||||
struct fwnode_handle *fwnode);
|
||||
struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode);
|
||||
unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
|
||||
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
|
||||
unsigned int depth);
|
||||
bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor,
|
||||
struct fwnode_handle *test_child);
|
||||
struct fwnode_handle *fwnode_get_next_child_node(
|
||||
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
|
||||
struct fwnode_handle *fwnode_get_next_available_child_node(
|
||||
|
|
|
@ -100,7 +100,7 @@ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
|
|||
fwid->type = type;
|
||||
fwid->name = n;
|
||||
fwid->pa = pa;
|
||||
fwid->fwnode.ops = &irqchip_fwnode_ops;
|
||||
fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops);
|
||||
return &fwid->fwnode;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
|
||||
|
|
|
@ -561,9 +561,14 @@ static int ddebug_exec_queries(char *query, const char *modname)
|
|||
int dynamic_debug_exec_queries(const char *query, const char *modname)
|
||||
{
|
||||
int rc;
|
||||
char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
|
||||
char *qry; /* writable copy of query */
|
||||
|
||||
if (!query)
|
||||
if (!query) {
|
||||
pr_err("non-null query/command string expected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
|
||||
if (!qry)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = ddebug_exec_queries(qry, modname);
|
||||
|
|
Загрузка…
Ссылка в новой задаче