2018-05-11 20:03:17 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2011-02-17 20:52:03 +03:00
|
|
|
/*
|
|
|
|
* OMAP hardware spinlock driver
|
|
|
|
*
|
2021-02-09 20:22:40 +03:00
|
|
|
* Copyright (C) 2010-2021 Texas Instruments Incorporated - https://www.ti.com
|
2011-02-17 20:52:03 +03:00
|
|
|
*
|
|
|
|
* Contact: Simon Que <sque@ti.com>
|
|
|
|
* Hari Kanigeri <h-kanigeri2@ti.com>
|
|
|
|
* Ohad Ben-Cohen <ohad@wizery.com>
|
2021-02-09 20:22:40 +03:00
|
|
|
* Suman Anna <s-anna@ti.com>
|
2011-02-17 20:52:03 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/hwspinlock.h>
|
2015-03-05 05:01:16 +03:00
|
|
|
#include <linux/of.h>
|
2011-02-17 20:52:03 +03:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
|
|
|
#include "hwspinlock_internal.h"
|
|
|
|
|
|
|
|
/* Spinlock register offsets */
|
|
|
|
#define SYSSTATUS_OFFSET 0x0014
|
|
|
|
#define LOCK_BASE_OFFSET 0x0800
|
|
|
|
|
|
|
|
#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
|
|
|
|
|
|
|
|
/* Possible values of SPINLOCK_LOCK_REG */
|
|
|
|
#define SPINLOCK_NOTTAKEN (0) /* free */
|
|
|
|
#define SPINLOCK_TAKEN (1) /* locked */
|
|
|
|
|
|
|
|
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
|
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
void __iomem *lock_addr = lock->priv;
|
2011-02-17 20:52:03 +03:00
|
|
|
|
|
|
|
/* attempt to acquire the lock by reading its value */
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
|
2011-02-17 20:52:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
|
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
void __iomem *lock_addr = lock->priv;
|
2011-02-17 20:52:03 +03:00
|
|
|
|
|
|
|
/* release the lock by writing 0 to it */
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
writel(SPINLOCK_NOTTAKEN, lock_addr);
|
2011-02-17 20:52:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* relax the OMAP interconnect while spinning on it.
|
|
|
|
*
|
|
|
|
* The specs recommended that the retry delay time will be
|
|
|
|
* just over half of the time that a requester would be
|
|
|
|
* expected to hold the lock.
|
|
|
|
*
|
|
|
|
* The number below is taken from an hardware specs example,
|
|
|
|
* obviously it is somewhat arbitrary.
|
|
|
|
*/
|
|
|
|
static void omap_hwspinlock_relax(struct hwspinlock *lock)
|
|
|
|
{
|
|
|
|
ndelay(50);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hwspinlock_ops omap_hwspinlock_ops = {
|
|
|
|
.trylock = omap_hwspinlock_trylock,
|
|
|
|
.unlock = omap_hwspinlock_unlock,
|
|
|
|
.relax = omap_hwspinlock_relax,
|
|
|
|
};
|
|
|
|
|
2012-11-19 22:23:22 +04:00
|
|
|
static int omap_hwspinlock_probe(struct platform_device *pdev)
|
2011-02-17 20:52:03 +03:00
|
|
|
{
|
2015-03-05 05:01:16 +03:00
|
|
|
struct device_node *node = pdev->dev.of_node;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
struct hwspinlock_device *bank;
|
|
|
|
struct hwspinlock *hwlock;
|
2011-02-17 20:52:03 +03:00
|
|
|
void __iomem *io_base;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
int num_locks, i, ret;
|
2015-03-05 05:01:16 +03:00
|
|
|
/* Only a single hwspinlock block device is supported */
|
|
|
|
int base_id = 0;
|
2011-02-17 20:52:03 +03:00
|
|
|
|
2015-03-05 05:01:16 +03:00
|
|
|
if (!node)
|
hwspinlock/core/omap: fix id issues on multiple hwspinlock devices
hwspinlock devices provide system-wide hardware locks that are used
by remote processors that have no other way to achieve synchronization.
To achieve that, each physical lock must have a system-wide id number
that is agreed upon, otherwise remote processors can't possibly assume
they're using the same hardware lock.
Usually boards have a single hwspinlock device, which provides several
hwspinlocks, and in this case, they can be trivially numbered 0 to
(num-of-locks - 1).
In case boards have several hwspinlocks devices, a different base id
should be used for each hwspinlock device (they can't all use 0 as
a starting id!).
While this is certainly not common, it's just plain wrong to just
silently use 0 as a base id whenever the hwspinlock driver is probed.
This patch provides a hwspinlock_pdata structure, that boards can use
to set a different base id for each of the hwspinlock devices they may
have, and demonstrates how to use it with the omap hwspinlock driver.
While we're at it, make sure the hwspinlock core prints an explicit
error message in case an hwspinlock is registered with an id number
that already exists; this will help users catch such base id issues.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Acked-by: Tony Lindgren <tony@atomide.com>
2011-09-06 00:15:06 +04:00
|
|
|
return -ENODEV;
|
|
|
|
|
2020-01-08 06:13:59 +03:00
|
|
|
io_base = devm_platform_ioremap_resource(pdev, 0);
|
|
|
|
if (IS_ERR(io_base))
|
|
|
|
return PTR_ERR(io_base);
|
2011-02-17 20:52:03 +03:00
|
|
|
|
2014-07-03 03:00:59 +04:00
|
|
|
/*
|
|
|
|
* make sure the module is enabled and clocked before reading
|
|
|
|
* the module SYSSTATUS register
|
|
|
|
*/
|
|
|
|
pm_runtime_enable(&pdev->dev);
|
2022-04-18 13:55:08 +03:00
|
|
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
|
|
|
if (ret < 0)
|
2020-01-08 06:13:59 +03:00
|
|
|
goto runtime_err;
|
2014-07-03 03:00:59 +04:00
|
|
|
|
2011-02-17 20:52:03 +03:00
|
|
|
/* Determine number of locks */
|
|
|
|
i = readl(io_base + SYSSTATUS_OFFSET);
|
|
|
|
i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
|
|
|
|
|
2014-07-03 03:00:59 +04:00
|
|
|
/*
|
|
|
|
* runtime PM will make sure the clock of this module is
|
|
|
|
* enabled again iff at least one lock is requested
|
|
|
|
*/
|
|
|
|
ret = pm_runtime_put(&pdev->dev);
|
|
|
|
if (ret < 0)
|
2020-01-08 06:13:59 +03:00
|
|
|
goto runtime_err;
|
2014-07-03 03:00:59 +04:00
|
|
|
|
2011-02-17 20:52:03 +03:00
|
|
|
/* one of the four lsb's must be set, and nothing else */
|
|
|
|
if (hweight_long(i & 0xf) != 1 || i > 8) {
|
|
|
|
ret = -EINVAL;
|
2020-01-08 06:13:59 +03:00
|
|
|
goto runtime_err;
|
2011-02-17 20:52:03 +03:00
|
|
|
}
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
num_locks = i * 32; /* actual number of locks in this device */
|
2011-09-05 18:30:34 +04:00
|
|
|
|
2020-01-08 06:14:00 +03:00
|
|
|
bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
|
|
|
|
GFP_KERNEL);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
if (!bank) {
|
2011-09-05 18:30:34 +04:00
|
|
|
ret = -ENOMEM;
|
2020-01-08 06:13:59 +03:00
|
|
|
goto runtime_err;
|
2011-09-05 18:30:34 +04:00
|
|
|
}
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
platform_set_drvdata(pdev, bank);
|
2011-02-17 20:52:03 +03:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
|
|
|
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
2011-02-17 20:52:03 +03:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
|
2015-03-05 05:01:16 +03:00
|
|
|
base_id, num_locks);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
if (ret)
|
2020-01-08 06:14:00 +03:00
|
|
|
goto runtime_err;
|
2011-02-17 20:52:03 +03:00
|
|
|
|
2019-05-31 05:13:21 +03:00
|
|
|
dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
|
|
|
|
num_locks);
|
|
|
|
|
2011-02-17 20:52:03 +03:00
|
|
|
return 0;
|
|
|
|
|
2020-01-08 06:13:59 +03:00
|
|
|
runtime_err:
|
2014-07-03 03:00:59 +04:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
2011-02-17 20:52:03 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-19 22:25:52 +04:00
|
|
|
static int omap_hwspinlock_remove(struct platform_device *pdev)
|
2011-02-17 20:52:03 +03:00
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 16:39:21 +04:00
|
|
|
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hwspin_lock_unregister(bank);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
|
|
|
return ret;
|
2011-02-17 20:52:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-05 05:01:16 +03:00
|
|
|
static const struct of_device_id omap_hwspinlock_of_match[] = {
|
|
|
|
{ .compatible = "ti,omap4-hwspinlock", },
|
2021-02-09 20:22:40 +03:00
|
|
|
{ .compatible = "ti,am64-hwspinlock", },
|
2019-05-31 05:13:20 +03:00
|
|
|
{ .compatible = "ti,am654-hwspinlock", },
|
2015-03-05 05:01:16 +03:00
|
|
|
{ /* end */ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
|
|
|
|
|
2011-02-17 20:52:03 +03:00
|
|
|
static struct platform_driver omap_hwspinlock_driver = {
|
|
|
|
.probe = omap_hwspinlock_probe,
|
2012-11-19 22:20:13 +04:00
|
|
|
.remove = omap_hwspinlock_remove,
|
2011-02-17 20:52:03 +03:00
|
|
|
.driver = {
|
|
|
|
.name = "omap_hwspinlock",
|
2015-03-05 05:01:16 +03:00
|
|
|
.of_match_table = of_match_ptr(omap_hwspinlock_of_match),
|
2011-02-17 20:52:03 +03:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init omap_hwspinlock_init(void)
|
|
|
|
{
|
|
|
|
return platform_driver_register(&omap_hwspinlock_driver);
|
|
|
|
}
|
|
|
|
/* board init code might need to reserve hwspinlocks for predefined purposes */
|
|
|
|
postcore_initcall(omap_hwspinlock_init);
|
|
|
|
|
|
|
|
static void __exit omap_hwspinlock_exit(void)
|
|
|
|
{
|
|
|
|
platform_driver_unregister(&omap_hwspinlock_driver);
|
|
|
|
}
|
|
|
|
module_exit(omap_hwspinlock_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
|
|
|
|
MODULE_AUTHOR("Simon Que <sque@ti.com>");
|
|
|
|
MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
|
|
|
|
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
|