Merge remote-tracking branch 'regmap/topic/cache' into regmap-next

This commit is contained in:
Mark Brown 2013-06-30 12:40:01 +01:00
commit feff98f550
7 changed files with 180 additions and 27 deletions

View File

@ -52,6 +52,7 @@ struct regmap_async {
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
unsigned long spinlock_flags;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@ -148,6 +149,7 @@ struct regcache_ops {
int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
};
bool regmap_writeable(struct regmap *map, unsigned int reg);

View File

@ -304,6 +304,48 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
return 0;
}
static struct regcache_rbtree_node *
regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
{
struct regcache_rbtree_node *rbnode;
const struct regmap_range *range;
int i;
rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
if (!rbnode)
return NULL;
/* If there is a read table then use it to guess at an allocation */
if (map->rd_table) {
for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
if (regmap_reg_in_range(reg,
&map->rd_table->yes_ranges[i]))
break;
}
if (i != map->rd_table->n_yes_ranges) {
range = &map->rd_table->yes_ranges[i];
rbnode->blklen = range->range_max - range->range_min
+ 1;
rbnode->base_reg = range->range_min;
}
}
if (!rbnode->blklen) {
rbnode->blklen = sizeof(*rbnode);
rbnode->base_reg = reg;
}
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL);
if (!rbnode->block) {
kfree(rbnode);
return NULL;
}
return rbnode;
}
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
@ -354,23 +396,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
return 0;
}
}
/* we did not manage to find a place to insert it in an existing
* block so create a new rbnode with a single register in its block.
* This block will get populated further if any other adjacent
* registers get modified in the future.
/* We did not manage to find a place to insert it in
* an existing block so create a new rbnode.
*/
rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
rbnode = regcache_rbtree_node_alloc(map, reg);
if (!rbnode)
return -ENOMEM;
rbnode->blklen = sizeof(*rbnode);
rbnode->base_reg = reg;
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL);
if (!rbnode->block) {
kfree(rbnode);
return -ENOMEM;
}
regcache_rbtree_set_register(map, rbnode, 0, value);
regcache_rbtree_set_register(map, rbnode,
reg - rbnode->base_reg, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}

View File

@ -250,6 +250,38 @@ int regcache_write(struct regmap *map,
return 0;
}
static int regcache_default_sync(struct regmap *map, unsigned int min,
unsigned int max)
{
unsigned int reg;
for (reg = min; reg <= max; reg++) {
unsigned int val;
int ret;
if (regmap_volatile(map, reg))
continue;
ret = regcache_read(map, reg, &val);
if (ret)
return ret;
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg(map, reg);
if (ret >= 0 && val == map->reg_defaults[ret].def)
continue;
map->cache_bypass = 1;
ret = _regmap_write(map, reg, val);
map->cache_bypass = 0;
if (ret)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
}
return 0;
}
/**
* regcache_sync: Sync the register cache with the hardware.
*
@ -268,7 +300,7 @@ int regcache_sync(struct regmap *map)
const char *name;
unsigned int bypass;
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
@ -297,7 +329,10 @@ int regcache_sync(struct regmap *map)
}
map->cache_bypass = 0;
ret = map->cache_ops->sync(map, 0, map->max_register);
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, 0, map->max_register);
else
ret = regcache_default_sync(map, 0, map->max_register);
if (ret == 0)
map->cache_dirty = false;
@ -331,7 +366,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
const char *name;
unsigned int bypass;
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@ -346,7 +381,10 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
if (!map->cache_dirty)
goto out;
ret = map->cache_ops->sync(map, min, max);
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, min, max);
else
ret = regcache_default_sync(map, min, max);
out:
trace_regcache_sync(map->dev, name, "stop region");
@ -358,6 +396,43 @@ out:
}
EXPORT_SYMBOL_GPL(regcache_sync_region);
/**
* regcache_drop_region: Discard part of the register cache
*
* @map: map to operate on
* @min: first register to discard
* @max: last register to discard
*
* Discard part of the register cache.
*
* Return a negative value on failure, 0 on success.
*/
int regcache_drop_region(struct regmap *map, unsigned int min,
unsigned int max)
{
unsigned int reg;
int ret = 0;
if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop))
return -EINVAL;
map->lock(map->lock_arg);
trace_regcache_drop_region(map->dev, min, max);
if (map->cache_present)
for (reg = min; reg < max + 1; reg++)
clear_bit(reg, map->cache_present);
if (map->cache_ops && map->cache_ops->drop)
ret = map->cache_ops->drop(map, min, max);
map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regcache_drop_region);
/**
* regcache_cache_only: Put a register map into cache only mode
*

View File

@ -84,6 +84,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
unsigned int fpos_offset;
unsigned int reg_offset;
/* Suppress the cache if we're using a subrange */
if (from)
return from;
/*
* If we don't have a cache build one so we don't have to do a
* linear scan each time.

View File

@ -65,9 +65,8 @@ bool regmap_reg_in_ranges(unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
static bool _regmap_check_range_table(struct regmap *map,
unsigned int reg,
const struct regmap_access_table *table)
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table)
{
/* Check "no ranges" first */
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
@ -80,6 +79,7 @@ static bool _regmap_check_range_table(struct regmap *map,
return regmap_reg_in_ranges(reg, table->yes_ranges,
table->n_yes_ranges);
}
EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
@ -90,7 +90,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
return map->writeable_reg(map->dev, reg);
if (map->wr_table)
return _regmap_check_range_table(map, reg, map->wr_table);
return regmap_check_range_table(map, reg, map->wr_table);
return true;
}
@ -107,7 +107,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
return map->readable_reg(map->dev, reg);
if (map->rd_table)
return _regmap_check_range_table(map, reg, map->rd_table);
return regmap_check_range_table(map, reg, map->rd_table);
return true;
}
@ -121,7 +121,7 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
return map->volatile_reg(map->dev, reg);
if (map->volatile_table)
return _regmap_check_range_table(map, reg, map->volatile_table);
return regmap_check_range_table(map, reg, map->volatile_table);
return true;
}
@ -135,7 +135,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return map->precious_reg(map->dev, reg);
if (map->precious_table)
return _regmap_check_range_table(map, reg, map->precious_table);
return regmap_check_range_table(map, reg, map->precious_table);
return false;
}
@ -302,13 +302,16 @@ static void regmap_unlock_mutex(void *__map)
static void regmap_lock_spinlock(void *__map)
{
struct regmap *map = __map;
spin_lock(&map->spinlock);
unsigned long flags;
spin_lock_irqsave(&map->spinlock, flags);
map->spinlock_flags = flags;
}
static void regmap_unlock_spinlock(void *__map)
{
struct regmap *map = __map;
spin_unlock(&map->spinlock);
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
}
static void dev_get_regmap_release(struct device *dev, void *res)

View File

@ -394,10 +394,15 @@ bool regmap_can_raw_write(struct regmap *map);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
unsigned int max);
int regcache_drop_region(struct regmap *map, unsigned int min,
unsigned int max);
void regcache_cache_only(struct regmap *map, bool enable);
void regcache_cache_bypass(struct regmap *map, bool enable);
void regcache_mark_dirty(struct regmap *map);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int num_regs);
@ -562,6 +567,13 @@ static inline int regcache_sync_region(struct regmap *map, unsigned int min,
return -EINVAL;
}
static inline int regcache_drop_region(struct regmap *map, unsigned int min,
unsigned int max)
{
WARN_ONCE(1, "regmap API is disabled");
return -EINVAL;
}
static inline void regcache_cache_only(struct regmap *map, bool enable)
{
WARN_ONCE(1, "regmap API is disabled");

View File

@ -223,6 +223,29 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_done,
);
TRACE_EVENT(regcache_drop_region,
TP_PROTO(struct device *dev, unsigned int from,
unsigned int to),
TP_ARGS(dev, from, to),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__field( unsigned int, from )
__field( unsigned int, to )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->from = from;
__entry->to = to;
),
TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
(unsigned int)__entry->to)
);
#endif /* _TRACE_REGMAP_H */
/* This part must be outside protection */