regmap: Updates for v3.10
In user visible terms just a couple of enhancements here, though there was a moderate amount of refactoring required in order to support the register cache sync performance improvements. - Support for block and asynchronous I/O during register cache syncing; this provides a use case dependant performance improvement. - Additional debugfs information on the memory consuption and register set. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIbBAABAgAGBQJRfoZRAAoJELSic+t+oim9gXAP+JhAihmIQJlhUxZkXojFhClD SKNWuFHmFC6VGndv52HPZR7nLN6hIlT4VUqk/rEw58R/RTqGuuWGc0KnKJf7ipid 6CdutuOP6q8mgs02kGKFAWRbSl++IXJ4TwvBbiyDMBmmngFoJY+gnmtnpP+PzcAd LA3fn54jDWzBKCSlFBEC5acYxOMPmzm2uW13mO8Gy1RJrUkXfOemEFsyP0NVNJys N0Zslp4nUUWmEu41UujuAUGZ7xXnnNQF5R4/RdS3+p22+sCEe7/mhLU1AxalUT4c m9h9U2UKoXqRBuFQ9kRGwM2Gufjg33DoB0ExqIDEgaD2kRdAdAo/WhTHLxTiQEfq 6YXGZYwl0QUC1KcUwUWJZIq/nECibaYDAoyooNzLQNPAbbO6gdjsTIVCaZK8U/k6 D8bWAM4eRbv6xwXEd8rKW5+2f41dnsb5O3OgbdEEBZnbQ8UizI9KDGbPB3ARV2RI Xqn+lYZV/q/99Bb3Pn0oS6Ud/tz5BqN4w3N84H0KcvcRHXvYjkdQ6ulsterRykOa gYWfsCKTbm2C1zBLGDPXkDablodLZmzoCs4ajeIt6zIELNzuIsI3trprpT85RtrS cjYl61ECuypPYBIW4uzxxBk/FeiEjQ4ndgQ4MgVnUfx0NpmG2N9LlDc2r6i+UgV/ EBxvYlPsEzQYLKoiJl8= =RG1W -----END PGP SIGNATURE----- Merge tag 'regmap-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap Pull regmap updates from Mark Brown: "In user visible terms just a couple of enhancements here, though there was a moderate amount of refactoring required in order to support the register cache sync performance improvements. - Support for block and asynchronous I/O during register cache syncing; this provides a use case dependant performance improvement. - Additional debugfs information on the memory consuption and register set" * tag 'regmap-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap: (23 commits) regmap: don't corrupt work buffer in _regmap_raw_write() regmap: cache: Fix format specifier in dev_dbg regmap: cache: Make regcache_sync_block_raw static regmap: cache: Write consecutive registers in a single block write regmap: cache: Split raw and non-raw syncs regmap: cache: Factor out block sync regmap: cache: Factor out reg_present support from rbtree cache regmap: cache: Use raw I/O to sync rbtrees if we can regmap: core: Provide regmap_can_raw_write() operation regmap: cache: Provide a get address of value operation regmap: Cut down on the average # of nodes in the rbtree cache regmap: core: Make raw write available to regcache regmap: core: Warn on invalid operation combinations regmap: irq: Clarify error message when we fail to request primary IRQ regmap: rbtree Expose total memory consumption in the rbtree debugfs entry regmap: debugfs: Add a registers `range' file regmap: debugfs: Simplify calculation of `c->max_reg' regmap: cache: Store caches in native register format where possible regmap: core: Split out in place value parsing regmap: cache: Use regcache_get_value() to check if we updated ...
This commit is contained in:
Коммит
7b053842b9
|
@ -38,7 +38,8 @@ struct regmap_format {
|
|||
unsigned int reg, unsigned int val);
|
||||
void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
|
||||
void (*format_val)(void *buf, unsigned int val, unsigned int shift);
|
||||
unsigned int (*parse_val)(void *buf);
|
||||
unsigned int (*parse_val)(const void *buf);
|
||||
void (*parse_inplace)(void *buf);
|
||||
};
|
||||
|
||||
struct regmap_async {
|
||||
|
@ -76,6 +77,7 @@ struct regmap {
|
|||
unsigned int debugfs_tot_len;
|
||||
|
||||
struct list_head debugfs_off_cache;
|
||||
struct mutex cache_lock;
|
||||
#endif
|
||||
|
||||
unsigned int max_register;
|
||||
|
@ -125,6 +127,9 @@ struct regmap {
|
|||
void *cache;
|
||||
u32 cache_dirty;
|
||||
|
||||
unsigned long *cache_present;
|
||||
unsigned int cache_present_nbits;
|
||||
|
||||
struct reg_default *patch;
|
||||
int patch_regs;
|
||||
|
||||
|
@ -187,12 +192,35 @@ int regcache_read(struct regmap *map,
|
|||
int regcache_write(struct regmap *map,
|
||||
unsigned int reg, unsigned int value);
|
||||
int regcache_sync(struct regmap *map);
|
||||
int regcache_sync_block(struct regmap *map, void *block,
|
||||
unsigned int block_base, unsigned int start,
|
||||
unsigned int end);
|
||||
|
||||
unsigned int regcache_get_val(const void *base, unsigned int idx,
|
||||
unsigned int word_size);
|
||||
bool regcache_set_val(void *base, unsigned int idx,
|
||||
unsigned int val, unsigned int word_size);
|
||||
static inline const void *regcache_get_val_addr(struct regmap *map,
|
||||
const void *base,
|
||||
unsigned int idx)
|
||||
{
|
||||
return base + (map->cache_word_size * idx);
|
||||
}
|
||||
|
||||
unsigned int regcache_get_val(struct regmap *map, const void *base,
|
||||
unsigned int idx);
|
||||
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
|
||||
unsigned int val);
|
||||
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
|
||||
int regcache_set_reg_present(struct regmap *map, unsigned int reg);
|
||||
|
||||
static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
|
||||
{
|
||||
if (!map->cache_present)
|
||||
return true;
|
||||
if (reg > map->cache_present_nbits)
|
||||
return false;
|
||||
return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
|
||||
}
|
||||
|
||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len, bool async);
|
||||
|
||||
void regmap_async_complete_cb(struct regmap_async *async, int ret);
|
||||
|
||||
|
|
|
@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map,
|
|||
ret = regcache_lzo_decompress_cache_block(map, lzo_block);
|
||||
if (ret >= 0)
|
||||
/* fetch the value from the cache */
|
||||
*value = regcache_get_val(lzo_block->dst, blkpos,
|
||||
map->cache_word_size);
|
||||
*value = regcache_get_val(map, lzo_block->dst, blkpos);
|
||||
|
||||
kfree(lzo_block->dst);
|
||||
/* restore the pointer and length of the compressed block */
|
||||
|
@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map,
|
|||
}
|
||||
|
||||
/* write the new value to the cache */
|
||||
if (regcache_set_val(lzo_block->dst, blkpos, value,
|
||||
map->cache_word_size)) {
|
||||
if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
|
||||
kfree(lzo_block->dst);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -47,18 +47,17 @@ static inline void regcache_rbtree_get_base_top_reg(
|
|||
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
|
||||
}
|
||||
|
||||
static unsigned int regcache_rbtree_get_register(
|
||||
struct regcache_rbtree_node *rbnode, unsigned int idx,
|
||||
unsigned int word_size)
|
||||
static unsigned int regcache_rbtree_get_register(struct regmap *map,
|
||||
struct regcache_rbtree_node *rbnode, unsigned int idx)
|
||||
{
|
||||
return regcache_get_val(rbnode->block, idx, word_size);
|
||||
return regcache_get_val(map, rbnode->block, idx);
|
||||
}
|
||||
|
||||
static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
|
||||
unsigned int idx, unsigned int val,
|
||||
unsigned int word_size)
|
||||
static void regcache_rbtree_set_register(struct regmap *map,
|
||||
struct regcache_rbtree_node *rbnode,
|
||||
unsigned int idx, unsigned int val)
|
||||
{
|
||||
regcache_set_val(rbnode->block, idx, val, word_size);
|
||||
regcache_set_val(map, rbnode->block, idx, val);
|
||||
}
|
||||
|
||||
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
|
||||
|
@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored)
|
|||
struct regcache_rbtree_node *n;
|
||||
struct rb_node *node;
|
||||
unsigned int base, top;
|
||||
size_t mem_size;
|
||||
int nodes = 0;
|
||||
int registers = 0;
|
||||
int this_registers, average;
|
||||
|
||||
map->lock(map);
|
||||
|
||||
mem_size = sizeof(*rbtree_ctx);
|
||||
mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
|
||||
|
||||
for (node = rb_first(&rbtree_ctx->root); node != NULL;
|
||||
node = rb_next(node)) {
|
||||
n = container_of(node, struct regcache_rbtree_node, node);
|
||||
mem_size += sizeof(*n);
|
||||
mem_size += (n->blklen * map->cache_word_size);
|
||||
|
||||
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
|
||||
this_registers = ((top - base) / map->reg_stride) + 1;
|
||||
|
@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored)
|
|||
else
|
||||
average = 0;
|
||||
|
||||
seq_printf(s, "%d nodes, %d registers, average %d registers\n",
|
||||
nodes, registers, average);
|
||||
seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
|
||||
nodes, registers, average, mem_size);
|
||||
|
||||
map->unlock(map);
|
||||
|
||||
|
@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map,
|
|||
rbnode = regcache_rbtree_lookup(map, reg);
|
||||
if (rbnode) {
|
||||
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
|
||||
*value = regcache_rbtree_get_register(rbnode, reg_tmp,
|
||||
map->cache_word_size);
|
||||
if (!regcache_reg_present(map, reg))
|
||||
return -ENOENT;
|
||||
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
|
||||
} else {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map,
|
|||
}
|
||||
|
||||
|
||||
static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
|
||||
static int regcache_rbtree_insert_to_block(struct regmap *map,
|
||||
struct regcache_rbtree_node *rbnode,
|
||||
unsigned int pos, unsigned int reg,
|
||||
unsigned int value, unsigned int word_size)
|
||||
unsigned int value)
|
||||
{
|
||||
u8 *blk;
|
||||
|
||||
blk = krealloc(rbnode->block,
|
||||
(rbnode->blklen + 1) * word_size, GFP_KERNEL);
|
||||
(rbnode->blklen + 1) * map->cache_word_size,
|
||||
GFP_KERNEL);
|
||||
if (!blk)
|
||||
return -ENOMEM;
|
||||
|
||||
/* insert the register value in the correct place in the rbnode block */
|
||||
memmove(blk + (pos + 1) * word_size,
|
||||
blk + pos * word_size,
|
||||
(rbnode->blklen - pos) * word_size);
|
||||
memmove(blk + (pos + 1) * map->cache_word_size,
|
||||
blk + pos * map->cache_word_size,
|
||||
(rbnode->blklen - pos) * map->cache_word_size);
|
||||
|
||||
/* update the rbnode block, its size and the base register */
|
||||
rbnode->block = blk;
|
||||
|
@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
|
|||
if (!pos)
|
||||
rbnode->base_reg = reg;
|
||||
|
||||
regcache_rbtree_set_register(rbnode, pos, value, word_size);
|
||||
regcache_rbtree_set_register(map, rbnode, pos, value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|||
struct regcache_rbtree_ctx *rbtree_ctx;
|
||||
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
|
||||
struct rb_node *node;
|
||||
unsigned int val;
|
||||
unsigned int reg_tmp;
|
||||
unsigned int pos;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
rbtree_ctx = map->cache;
|
||||
/* update the reg_present bitmap, make space if necessary */
|
||||
ret = regcache_set_reg_present(map, reg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* if we can't locate it in the cached rbnode we'll have
|
||||
* to traverse the rbtree looking for it.
|
||||
*/
|
||||
rbnode = regcache_rbtree_lookup(map, reg);
|
||||
if (rbnode) {
|
||||
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
|
||||
val = regcache_rbtree_get_register(rbnode, reg_tmp,
|
||||
map->cache_word_size);
|
||||
if (val == value)
|
||||
return 0;
|
||||
regcache_rbtree_set_register(rbnode, reg_tmp, value,
|
||||
map->cache_word_size);
|
||||
regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
|
||||
} else {
|
||||
/* look for an adjacent register to the one we are about to add */
|
||||
for (node = rb_first(&rbtree_ctx->root); node;
|
||||
|
@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|||
pos = i + 1;
|
||||
else
|
||||
pos = i;
|
||||
ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
|
||||
reg, value,
|
||||
map->cache_word_size);
|
||||
ret = regcache_rbtree_insert_to_block(map,
|
||||
rbnode_tmp,
|
||||
pos, reg,
|
||||
value);
|
||||
if (ret)
|
||||
return ret;
|
||||
rbtree_ctx->cached_rbnode = rbnode_tmp;
|
||||
|
@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|||
rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
|
||||
if (!rbnode)
|
||||
return -ENOMEM;
|
||||
rbnode->blklen = 1;
|
||||
rbnode->blklen = sizeof(*rbnode);
|
||||
rbnode->base_reg = reg;
|
||||
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
|
||||
GFP_KERNEL);
|
||||
|
@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|||
kfree(rbnode);
|
||||
return -ENOMEM;
|
||||
}
|
||||
regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
|
||||
regcache_rbtree_set_register(map, rbnode, 0, value);
|
||||
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
|
||||
rbtree_ctx->cached_rbnode = rbnode;
|
||||
}
|
||||
|
@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
|
|||
struct regcache_rbtree_ctx *rbtree_ctx;
|
||||
struct rb_node *node;
|
||||
struct regcache_rbtree_node *rbnode;
|
||||
unsigned int regtmp;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
int i, base, end;
|
||||
int base, end;
|
||||
|
||||
rbtree_ctx = map->cache;
|
||||
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
|
||||
|
@ -402,27 +408,13 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
|
|||
else
|
||||
end = rbnode->blklen;
|
||||
|
||||
for (i = base; i < end; i++) {
|
||||
regtmp = rbnode->base_reg + (i * map->reg_stride);
|
||||
val = regcache_rbtree_get_register(rbnode, i,
|
||||
map->cache_word_size);
|
||||
|
||||
/* Is this the hardware default? If so skip. */
|
||||
ret = regcache_lookup_reg(map, regtmp);
|
||||
if (ret >= 0 && val == map->reg_defaults[ret].def)
|
||||
continue;
|
||||
|
||||
map->cache_bypass = 1;
|
||||
ret = _regmap_write(map, regtmp, val);
|
||||
map->cache_bypass = 0;
|
||||
if (ret)
|
||||
ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
|
||||
base, end);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
|
||||
regtmp, val);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return regmap_async_complete(map);
|
||||
}
|
||||
|
||||
struct regcache_ops regcache_rbtree_ops = {
|
||||
|
|
|
@ -45,7 +45,7 @@ static int regcache_hw_init(struct regmap *map)
|
|||
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
|
||||
if (!tmp_buf)
|
||||
return -EINVAL;
|
||||
ret = regmap_bulk_read(map, 0, tmp_buf,
|
||||
ret = regmap_raw_read(map, 0, tmp_buf,
|
||||
map->num_reg_defaults_raw);
|
||||
map->cache_bypass = cache_bypass;
|
||||
if (ret < 0) {
|
||||
|
@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map)
|
|||
|
||||
/* calculate the size of reg_defaults */
|
||||
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
|
||||
val = regcache_get_val(map->reg_defaults_raw,
|
||||
i, map->cache_word_size);
|
||||
val = regcache_get_val(map, map->reg_defaults_raw, i);
|
||||
if (regmap_volatile(map, i * map->reg_stride))
|
||||
continue;
|
||||
count++;
|
||||
|
@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map)
|
|||
/* fill the reg_defaults */
|
||||
map->num_reg_defaults = count;
|
||||
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
|
||||
val = regcache_get_val(map->reg_defaults_raw,
|
||||
i, map->cache_word_size);
|
||||
val = regcache_get_val(map, map->reg_defaults_raw, i);
|
||||
if (regmap_volatile(map, i * map->reg_stride))
|
||||
continue;
|
||||
map->reg_defaults[j].reg = i * map->reg_stride;
|
||||
|
@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
|
|||
map->reg_defaults_raw = config->reg_defaults_raw;
|
||||
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
|
||||
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
|
||||
map->cache_present = NULL;
|
||||
map->cache_present_nbits = 0;
|
||||
|
||||
map->cache = NULL;
|
||||
map->cache_ops = cache_types[i];
|
||||
|
@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map)
|
|||
|
||||
BUG_ON(!map->cache_ops);
|
||||
|
||||
kfree(map->cache_present);
|
||||
kfree(map->reg_defaults);
|
||||
if (map->cache_free)
|
||||
kfree(map->reg_defaults_raw);
|
||||
|
@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
|
||||
|
||||
bool regcache_set_val(void *base, unsigned int idx,
|
||||
unsigned int val, unsigned int word_size)
|
||||
int regcache_set_reg_present(struct regmap *map, unsigned int reg)
|
||||
{
|
||||
switch (word_size) {
|
||||
unsigned long *cache_present;
|
||||
unsigned int cache_present_size;
|
||||
unsigned int nregs;
|
||||
int i;
|
||||
|
||||
nregs = reg + 1;
|
||||
cache_present_size = BITS_TO_LONGS(nregs);
|
||||
cache_present_size *= sizeof(long);
|
||||
|
||||
if (!map->cache_present) {
|
||||
cache_present = kmalloc(cache_present_size, GFP_KERNEL);
|
||||
if (!cache_present)
|
||||
return -ENOMEM;
|
||||
bitmap_zero(cache_present, nregs);
|
||||
map->cache_present = cache_present;
|
||||
map->cache_present_nbits = nregs;
|
||||
}
|
||||
|
||||
if (nregs > map->cache_present_nbits) {
|
||||
cache_present = krealloc(map->cache_present,
|
||||
cache_present_size, GFP_KERNEL);
|
||||
if (!cache_present)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < nregs; i++)
|
||||
if (i >= map->cache_present_nbits)
|
||||
clear_bit(i, cache_present);
|
||||
map->cache_present = cache_present;
|
||||
map->cache_present_nbits = nregs;
|
||||
}
|
||||
|
||||
set_bit(reg, map->cache_present);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
|
||||
unsigned int val)
|
||||
{
|
||||
if (regcache_get_val(map, base, idx) == val)
|
||||
return true;
|
||||
|
||||
/* Use device native format if possible */
|
||||
if (map->format.format_val) {
|
||||
map->format.format_val(base + (map->cache_word_size * idx),
|
||||
val, 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (map->cache_word_size) {
|
||||
case 1: {
|
||||
u8 *cache = base;
|
||||
if (cache[idx] == val)
|
||||
return true;
|
||||
cache[idx] = val;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
u16 *cache = base;
|
||||
if (cache[idx] == val)
|
||||
return true;
|
||||
cache[idx] = val;
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
u32 *cache = base;
|
||||
if (cache[idx] == val)
|
||||
return true;
|
||||
cache[idx] = val;
|
||||
break;
|
||||
}
|
||||
|
@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx,
|
|||
return false;
|
||||
}
|
||||
|
||||
unsigned int regcache_get_val(const void *base, unsigned int idx,
|
||||
unsigned int word_size)
|
||||
unsigned int regcache_get_val(struct regmap *map, const void *base,
|
||||
unsigned int idx)
|
||||
{
|
||||
if (!base)
|
||||
return -EINVAL;
|
||||
|
||||
switch (word_size) {
|
||||
/* Use device native format if possible */
|
||||
if (map->format.parse_val)
|
||||
return map->format.parse_val(regcache_get_val_addr(map, base,
|
||||
idx));
|
||||
|
||||
switch (map->cache_word_size) {
|
||||
case 1: {
|
||||
const u8 *cache = base;
|
||||
return cache[idx];
|
||||
|
@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
|
|||
else
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int regcache_sync_block_single(struct regmap *map, void *block,
|
||||
unsigned int block_base,
|
||||
unsigned int start, unsigned int end)
|
||||
{
|
||||
unsigned int i, regtmp, val;
|
||||
int ret;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
regtmp = block_base + (i * map->reg_stride);
|
||||
|
||||
if (!regcache_reg_present(map, regtmp))
|
||||
continue;
|
||||
|
||||
val = regcache_get_val(map, block, i);
|
||||
|
||||
/* Is this the hardware default? If so skip. */
|
||||
ret = regcache_lookup_reg(map, regtmp);
|
||||
if (ret >= 0 && val == map->reg_defaults[ret].def)
|
||||
continue;
|
||||
|
||||
map->cache_bypass = 1;
|
||||
|
||||
ret = _regmap_write(map, regtmp, val);
|
||||
|
||||
map->cache_bypass = 0;
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
|
||||
regtmp, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
|
||||
unsigned int base, unsigned int cur)
|
||||
{
|
||||
size_t val_bytes = map->format.val_bytes;
|
||||
int ret, count;
|
||||
|
||||
if (*data == NULL)
|
||||
return 0;
|
||||
|
||||
count = cur - base;
|
||||
|
||||
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
|
||||
count * val_bytes, count, base, cur - 1);
|
||||
|
||||
map->cache_bypass = 1;
|
||||
|
||||
ret = _regmap_raw_write(map, base, *data, count * val_bytes,
|
||||
false);
|
||||
|
||||
map->cache_bypass = 0;
|
||||
|
||||
*data = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int regcache_sync_block_raw(struct regmap *map, void *block,
|
||||
unsigned int block_base, unsigned int start,
|
||||
unsigned int end)
|
||||
{
|
||||
unsigned int i, val;
|
||||
unsigned int regtmp = 0;
|
||||
unsigned int base = 0;
|
||||
const void *data = NULL;
|
||||
int ret;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
regtmp = block_base + (i * map->reg_stride);
|
||||
|
||||
if (!regcache_reg_present(map, regtmp)) {
|
||||
ret = regcache_sync_block_raw_flush(map, &data,
|
||||
base, regtmp);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
continue;
|
||||
}
|
||||
|
||||
val = regcache_get_val(map, block, i);
|
||||
|
||||
/* Is this the hardware default? If so skip. */
|
||||
ret = regcache_lookup_reg(map, regtmp);
|
||||
if (ret >= 0 && val == map->reg_defaults[ret].def) {
|
||||
ret = regcache_sync_block_raw_flush(map, &data,
|
||||
base, regtmp);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!data) {
|
||||
data = regcache_get_val_addr(map, block, i);
|
||||
base = regtmp;
|
||||
}
|
||||
}
|
||||
|
||||
return regcache_sync_block_raw_flush(map, &data, base, regtmp);
|
||||
}
|
||||
|
||||
int regcache_sync_block(struct regmap *map, void *block,
|
||||
unsigned int block_base, unsigned int start,
|
||||
unsigned int end)
|
||||
{
|
||||
if (regmap_can_raw_write(map))
|
||||
return regcache_sync_block_raw(map, block, block_base,
|
||||
start, end);
|
||||
else
|
||||
return regcache_sync_block_single(map, block, block_base,
|
||||
start, end);
|
||||
}
|
||||
|
|
|
@ -88,16 +88,16 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
|
|||
* If we don't have a cache build one so we don't have to do a
|
||||
* linear scan each time.
|
||||
*/
|
||||
mutex_lock(&map->cache_lock);
|
||||
i = base;
|
||||
if (list_empty(&map->debugfs_off_cache)) {
|
||||
for (i = base; i <= map->max_register; i += map->reg_stride) {
|
||||
for (; i <= map->max_register; i += map->reg_stride) {
|
||||
/* Skip unprinted registers, closing off cache entry */
|
||||
if (!regmap_readable(map, i) ||
|
||||
regmap_precious(map, i)) {
|
||||
if (c) {
|
||||
c->max = p - 1;
|
||||
fpos_offset = c->max - c->min;
|
||||
reg_offset = fpos_offset / map->debugfs_tot_len;
|
||||
c->max_reg = c->base_reg + reg_offset;
|
||||
c->max_reg = i - map->reg_stride;
|
||||
list_add_tail(&c->list,
|
||||
&map->debugfs_off_cache);
|
||||
c = NULL;
|
||||
|
@ -111,6 +111,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
|
|||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c) {
|
||||
regmap_debugfs_free_dump_cache(map);
|
||||
mutex_unlock(&map->cache_lock);
|
||||
return base;
|
||||
}
|
||||
c->min = p;
|
||||
|
@ -124,9 +125,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
|
|||
/* Close the last entry off if we didn't scan beyond it */
|
||||
if (c) {
|
||||
c->max = p - 1;
|
||||
fpos_offset = c->max - c->min;
|
||||
reg_offset = fpos_offset / map->debugfs_tot_len;
|
||||
c->max_reg = c->base_reg + reg_offset;
|
||||
c->max_reg = i - map->reg_stride;
|
||||
list_add_tail(&c->list,
|
||||
&map->debugfs_off_cache);
|
||||
}
|
||||
|
@ -145,12 +144,14 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
|
|||
fpos_offset = from - c->min;
|
||||
reg_offset = fpos_offset / map->debugfs_tot_len;
|
||||
*pos = c->min + (reg_offset * map->debugfs_tot_len);
|
||||
mutex_unlock(&map->cache_lock);
|
||||
return c->base_reg + reg_offset;
|
||||
}
|
||||
|
||||
*pos = c->max;
|
||||
ret = c->max_reg;
|
||||
}
|
||||
mutex_unlock(&map->cache_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -311,6 +312,79 @@ static const struct file_operations regmap_range_fops = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t regmap_reg_ranges_read_file(struct file *file,
|
||||
char __user *user_buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct regmap *map = file->private_data;
|
||||
struct regmap_debugfs_off_cache *c;
|
||||
loff_t p = 0;
|
||||
size_t buf_pos = 0;
|
||||
char *buf;
|
||||
char *entry;
|
||||
int ret;
|
||||
|
||||
if (*ppos < 0 || !count)
|
||||
return -EINVAL;
|
||||
|
||||
buf = kmalloc(count, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!entry) {
|
||||
kfree(buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* While we are at it, build the register dump cache
|
||||
* now so the read() operation on the `registers' file
|
||||
* can benefit from using the cache. We do not care
|
||||
* about the file position information that is contained
|
||||
* in the cache, just about the actual register blocks */
|
||||
regmap_calc_tot_len(map, buf, count);
|
||||
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
|
||||
|
||||
/* Reset file pointer as the fixed-format of the `registers'
|
||||
* file is not compatible with the `range' file */
|
||||
p = 0;
|
||||
mutex_lock(&map->cache_lock);
|
||||
list_for_each_entry(c, &map->debugfs_off_cache, list) {
|
||||
snprintf(entry, PAGE_SIZE, "%x-%x",
|
||||
c->base_reg, c->max_reg);
|
||||
if (p >= *ppos) {
|
||||
if (buf_pos + 1 + strlen(entry) > count)
|
||||
break;
|
||||
snprintf(buf + buf_pos, count - buf_pos,
|
||||
"%s", entry);
|
||||
buf_pos += strlen(entry);
|
||||
buf[buf_pos] = '\n';
|
||||
buf_pos++;
|
||||
}
|
||||
p += strlen(entry) + 1;
|
||||
}
|
||||
mutex_unlock(&map->cache_lock);
|
||||
|
||||
kfree(entry);
|
||||
ret = buf_pos;
|
||||
|
||||
if (copy_to_user(user_buf, buf, buf_pos)) {
|
||||
ret = -EFAULT;
|
||||
goto out_buf;
|
||||
}
|
||||
|
||||
*ppos += buf_pos;
|
||||
out_buf:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations regmap_reg_ranges_fops = {
|
||||
.open = simple_open,
|
||||
.read = regmap_reg_ranges_read_file,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static ssize_t regmap_access_read_file(struct file *file,
|
||||
char __user *user_buf, size_t count,
|
||||
loff_t *ppos)
|
||||
|
@ -385,6 +459,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
|||
struct regmap_range_node *range_node;
|
||||
|
||||
INIT_LIST_HEAD(&map->debugfs_off_cache);
|
||||
mutex_init(&map->cache_lock);
|
||||
|
||||
if (name) {
|
||||
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
|
||||
|
@ -403,6 +478,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
|||
debugfs_create_file("name", 0400, map->debugfs,
|
||||
map, ®map_name_fops);
|
||||
|
||||
debugfs_create_file("range", 0400, map->debugfs,
|
||||
map, ®map_reg_ranges_fops);
|
||||
|
||||
if (map->max_register) {
|
||||
debugfs_create_file("registers", 0400, map->debugfs,
|
||||
map, ®map_map_fops);
|
||||
|
@ -435,7 +513,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
|||
void regmap_debugfs_exit(struct regmap *map)
|
||||
{
|
||||
debugfs_remove_recursive(map->debugfs);
|
||||
mutex_lock(&map->cache_lock);
|
||||
regmap_debugfs_free_dump_cache(map);
|
||||
mutex_unlock(&map->cache_lock);
|
||||
kfree(map->debugfs_name);
|
||||
}
|
||||
|
||||
|
|
|
@ -460,7 +460,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
|
|||
ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
|
||||
chip->name, d);
|
||||
if (ret != 0) {
|
||||
dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
|
||||
dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
|
||||
irq, chip->name, ret);
|
||||
goto err_domain;
|
||||
}
|
||||
|
||||
|
|
|
@ -228,30 +228,39 @@ static void regmap_format_32_native(void *buf, unsigned int val,
|
|||
*(u32 *)buf = val << shift;
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_8(void *buf)
|
||||
static void regmap_parse_inplace_noop(void *buf)
|
||||
{
|
||||
u8 *b = buf;
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_8(const void *buf)
|
||||
{
|
||||
const u8 *b = buf;
|
||||
|
||||
return b[0];
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_16_be(void *buf)
|
||||
static unsigned int regmap_parse_16_be(const void *buf)
|
||||
{
|
||||
const __be16 *b = buf;
|
||||
|
||||
return be16_to_cpu(b[0]);
|
||||
}
|
||||
|
||||
static void regmap_parse_16_be_inplace(void *buf)
|
||||
{
|
||||
__be16 *b = buf;
|
||||
|
||||
b[0] = be16_to_cpu(b[0]);
|
||||
|
||||
return b[0];
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_16_native(void *buf)
|
||||
static unsigned int regmap_parse_16_native(const void *buf)
|
||||
{
|
||||
return *(u16 *)buf;
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_24(void *buf)
|
||||
static unsigned int regmap_parse_24(const void *buf)
|
||||
{
|
||||
u8 *b = buf;
|
||||
const u8 *b = buf;
|
||||
unsigned int ret = b[2];
|
||||
ret |= ((unsigned int)b[1]) << 8;
|
||||
ret |= ((unsigned int)b[0]) << 16;
|
||||
|
@ -259,16 +268,21 @@ static unsigned int regmap_parse_24(void *buf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_32_be(void *buf)
|
||||
static unsigned int regmap_parse_32_be(const void *buf)
|
||||
{
|
||||
const __be32 *b = buf;
|
||||
|
||||
return be32_to_cpu(b[0]);
|
||||
}
|
||||
|
||||
static void regmap_parse_32_be_inplace(void *buf)
|
||||
{
|
||||
__be32 *b = buf;
|
||||
|
||||
b[0] = be32_to_cpu(b[0]);
|
||||
|
||||
return b[0];
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_32_native(void *buf)
|
||||
static unsigned int regmap_parse_32_native(const void *buf)
|
||||
{
|
||||
return *(u32 *)buf;
|
||||
}
|
||||
|
@ -555,16 +569,21 @@ struct regmap *regmap_init(struct device *dev,
|
|||
goto err_map;
|
||||
}
|
||||
|
||||
if (val_endian == REGMAP_ENDIAN_NATIVE)
|
||||
map->format.parse_inplace = regmap_parse_inplace_noop;
|
||||
|
||||
switch (config->val_bits) {
|
||||
case 8:
|
||||
map->format.format_val = regmap_format_8;
|
||||
map->format.parse_val = regmap_parse_8;
|
||||
map->format.parse_inplace = regmap_parse_inplace_noop;
|
||||
break;
|
||||
case 16:
|
||||
switch (val_endian) {
|
||||
case REGMAP_ENDIAN_BIG:
|
||||
map->format.format_val = regmap_format_16_be;
|
||||
map->format.parse_val = regmap_parse_16_be;
|
||||
map->format.parse_inplace = regmap_parse_16_be_inplace;
|
||||
break;
|
||||
case REGMAP_ENDIAN_NATIVE:
|
||||
map->format.format_val = regmap_format_16_native;
|
||||
|
@ -585,6 +604,7 @@ struct regmap *regmap_init(struct device *dev,
|
|||
case REGMAP_ENDIAN_BIG:
|
||||
map->format.format_val = regmap_format_32_be;
|
||||
map->format.parse_val = regmap_parse_32_be;
|
||||
map->format.parse_inplace = regmap_parse_32_be_inplace;
|
||||
break;
|
||||
case REGMAP_ENDIAN_NATIVE:
|
||||
map->format.format_val = regmap_format_32_native;
|
||||
|
@ -917,7 +937,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len, bool async)
|
||||
{
|
||||
struct regmap_range_node *range;
|
||||
|
@ -930,7 +950,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
size_t len;
|
||||
int i;
|
||||
|
||||
BUG_ON(!map->bus);
|
||||
WARN_ON(!map->bus);
|
||||
|
||||
/* Check for unwritable registers before we start */
|
||||
if (map->writeable_reg)
|
||||
|
@ -943,8 +963,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
unsigned int ival;
|
||||
int val_bytes = map->format.val_bytes;
|
||||
for (i = 0; i < val_len / val_bytes; i++) {
|
||||
memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
|
||||
ival = map->format.parse_val(map->work_buf);
|
||||
ival = map->format.parse_val(val + (i * val_bytes));
|
||||
ret = regcache_write(map, reg + (i * map->reg_stride),
|
||||
ival);
|
||||
if (ret) {
|
||||
|
@ -999,6 +1018,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
if (!async)
|
||||
return -ENOMEM;
|
||||
|
||||
trace_regmap_async_write_start(map->dev, reg, val_len);
|
||||
|
||||
async->work_buf = kzalloc(map->format.buf_size,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!async->work_buf) {
|
||||
|
@ -1079,6 +1100,17 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* regmap_can_raw_write - Test if regmap_raw_write() is supported
|
||||
*
|
||||
* @map: Map to check.
|
||||
*/
|
||||
bool regmap_can_raw_write(struct regmap *map)
|
||||
{
|
||||
return map->bus && map->format.format_val && map->format.format_reg;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
|
||||
|
||||
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
|
||||
unsigned int val)
|
||||
{
|
||||
|
@ -1086,7 +1118,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
|
|||
struct regmap_range_node *range;
|
||||
struct regmap *map = context;
|
||||
|
||||
BUG_ON(!map->bus || !map->format.format_write);
|
||||
WARN_ON(!map->bus || !map->format.format_write);
|
||||
|
||||
range = _regmap_range_lookup(map, reg);
|
||||
if (range) {
|
||||
|
@ -1112,7 +1144,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
|
|||
{
|
||||
struct regmap *map = context;
|
||||
|
||||
BUG_ON(!map->bus || !map->format.format_val);
|
||||
WARN_ON(!map->bus || !map->format.format_val);
|
||||
|
||||
map->format.format_val(map->work_buf + map->format.reg_bytes
|
||||
+ map->format.pad_bytes, val, 0);
|
||||
|
@ -1202,12 +1234,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!map->bus)
|
||||
if (!regmap_can_raw_write(map))
|
||||
return -EINVAL;
|
||||
if (val_len % map->format.val_bytes)
|
||||
return -EINVAL;
|
||||
if (reg % map->reg_stride)
|
||||
return -EINVAL;
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
|
@ -1242,7 +1272,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
|
|||
|
||||
if (!map->bus)
|
||||
return -EINVAL;
|
||||
if (!map->format.parse_val)
|
||||
if (!map->format.parse_inplace)
|
||||
return -EINVAL;
|
||||
if (reg % map->reg_stride)
|
||||
return -EINVAL;
|
||||
|
@ -1260,7 +1290,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
|
|||
goto out;
|
||||
}
|
||||
for (i = 0; i < val_count * val_bytes; i += val_bytes)
|
||||
map->format.parse_val(wval + i);
|
||||
map->format.parse_inplace(wval + i);
|
||||
}
|
||||
/*
|
||||
* Some devices does not support bulk write, for
|
||||
|
@ -1338,7 +1368,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
|||
u8 *u8 = map->work_buf;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!map->bus);
|
||||
WARN_ON(!map->bus);
|
||||
|
||||
range = _regmap_range_lookup(map, reg);
|
||||
if (range) {
|
||||
|
@ -1393,7 +1423,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
|
|||
int ret;
|
||||
void *context = _regmap_map_get_context(map);
|
||||
|
||||
BUG_ON(!map->reg_read);
|
||||
WARN_ON(!map->reg_read);
|
||||
|
||||
if (!map->cache_bypass) {
|
||||
ret = regcache_read(map, reg, val);
|
||||
|
@ -1521,7 +1551,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
|||
|
||||
if (!map->bus)
|
||||
return -EINVAL;
|
||||
if (!map->format.parse_val)
|
||||
if (!map->format.parse_inplace)
|
||||
return -EINVAL;
|
||||
if (reg % map->reg_stride)
|
||||
return -EINVAL;
|
||||
|
@ -1548,7 +1578,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
|||
}
|
||||
|
||||
for (i = 0; i < val_count * val_bytes; i += val_bytes)
|
||||
map->format.parse_val(val + i);
|
||||
map->format.parse_inplace(val + i);
|
||||
} else {
|
||||
for (i = 0; i < val_count; i++) {
|
||||
unsigned int ival;
|
||||
|
@ -1642,6 +1672,8 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
|
|||
struct regmap *map = async->map;
|
||||
bool wake;
|
||||
|
||||
trace_regmap_async_io_complete(map->dev);
|
||||
|
||||
spin_lock(&map->async_lock);
|
||||
|
||||
list_del(&async->list);
|
||||
|
@ -1688,6 +1720,8 @@ int regmap_async_complete(struct regmap *map)
|
|||
if (!map->bus->async_write)
|
||||
return 0;
|
||||
|
||||
trace_regmap_async_complete_start(map->dev);
|
||||
|
||||
wait_event(map->async_waitq, regmap_async_is_done(map));
|
||||
|
||||
spin_lock_irqsave(&map->async_lock, flags);
|
||||
|
@ -1695,6 +1729,8 @@ int regmap_async_complete(struct regmap *map)
|
|||
map->async_ret = 0;
|
||||
spin_unlock_irqrestore(&map->async_lock, flags);
|
||||
|
||||
trace_regmap_async_complete_done(map->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_async_complete);
|
||||
|
|
|
@ -389,6 +389,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
|
|||
bool *change);
|
||||
int regmap_get_val_bytes(struct regmap *map);
|
||||
int regmap_async_complete(struct regmap *map);
|
||||
bool regmap_can_raw_write(struct regmap *map);
|
||||
|
||||
int regcache_sync(struct regmap *map);
|
||||
int regcache_sync_region(struct regmap *map, unsigned int min,
|
||||
|
|
|
@ -175,6 +175,54 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
|
|||
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(regmap_async,
|
||||
|
||||
TP_PROTO(struct device *dev),
|
||||
|
||||
TP_ARGS(dev),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string( name, dev_name(dev) )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, dev_name(dev));
|
||||
),
|
||||
|
||||
TP_printk("%s", __get_str(name))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(regmap_block, regmap_async_write_start,
|
||||
|
||||
TP_PROTO(struct device *dev, unsigned int reg, int count),
|
||||
|
||||
TP_ARGS(dev, reg, count)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(regmap_async, regmap_async_io_complete,
|
||||
|
||||
TP_PROTO(struct device *dev),
|
||||
|
||||
TP_ARGS(dev)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
|
||||
|
||||
TP_PROTO(struct device *dev),
|
||||
|
||||
TP_ARGS(dev)
|
||||
|
||||
);
|
||||
|
||||
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
|
||||
|
||||
TP_PROTO(struct device *dev),
|
||||
|
||||
TP_ARGS(dev)
|
||||
|
||||
);
|
||||
|
||||
#endif /* _TRACE_REGMAP_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
Загрузка…
Ссылка в новой задаче