Merge remote-tracking branch 'regmap/topic/bulk' into regmap-next

This commit is contained in:
Mark Brown 2018-03-12 09:50:40 -07:00
commit 493ea0c8a6
No known key found for this signature in database
GPG key ID: 24D68B725D5487D0

View file

@ -1438,8 +1438,8 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
buf[i] |= (mask >> (8 * i)) & 0xff;
}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
struct regmap_range_node *range;
unsigned long flags;
@ -1490,8 +1490,9 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
while (val_num > win_residue) {
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write(map, reg, val, win_residue *
map->format.val_bytes);
ret = _regmap_raw_write_impl(map, reg, val,
win_residue *
map->format.val_bytes);
if (ret != 0)
return ret;
@ -1707,11 +1708,11 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
return _regmap_raw_write(map, reg,
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
map->format.val_bytes);
return _regmap_raw_write_impl(map, reg,
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
map->format.val_bytes);
}
static inline void *_regmap_map_get_context(struct regmap *map)
@ -1806,6 +1807,44 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
}
EXPORT_SYMBOL_GPL(regmap_write_async);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
size_t val_bytes = map->format.val_bytes;
size_t val_count = val_len / val_bytes;
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
int ret, i;
if (!val_count)
return -EINVAL;
if (map->use_single_write)
chunk_regs = 1;
else if (map->max_raw_write && val_len > map->max_raw_write)
chunk_regs = map->max_raw_write / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
/* Write as many bytes as possible with chunk_size */
for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
if (ret)
return ret;
reg += regmap_get_offset(map, chunk_regs);
val += chunk_bytes;
val_len -= chunk_bytes;
}
/* Write remaining bytes */
if (val_len)
ret = _regmap_raw_write_impl(map, reg, val, val_len);
return ret;
}
/**
* regmap_raw_write() - Write raw values to one or more registers
*
@ -1831,8 +1870,6 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (map->max_raw_write && map->max_raw_write < val_len)
return -E2BIG;
map->lock(map->lock_arg);
@ -1923,23 +1960,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
size_t total_size = val_bytes * val_count;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
/*
* Some devices don't support bulk write, for
* them we have a series of single write operations in the first two if
* blocks.
*
* The first if block is used for memory mapped io. It does not allow
* val_bytes of 3 for example.
* The second one is for busses that do not provide raw I/O.
* The third one is used for busses which do not have these limitations
* and can write arbitrary value lengths.
* Some devices don't support bulk write, for them we have a series of
* single write operations.
*/
if (!map->bus) {
if (!map->bus || !map->format.parse_inplace) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@ -1972,81 +2001,17 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
}
out:
map->unlock(map->lock_arg);
} else if (map->bus && !map->format.parse_inplace) {
const u8 *u8 = val;
const u16 *u16 = val;
const u32 *u32 = val;
unsigned int ival;
for (i = 0; i < val_count; i++) {
switch (map->format.val_bytes) {
case 4:
ival = u32[i];
break;
case 2:
ival = u16[i];
break;
case 1:
ival = u8[i];
break;
default:
return -EINVAL;
}
ret = regmap_write(map, reg + (i * map->reg_stride),
ival);
if (ret)
return ret;
}
} else if (map->use_single_write ||
(map->max_raw_write && map->max_raw_write < total_size)) {
int chunk_stride = map->reg_stride;
size_t chunk_size = val_bytes;
size_t chunk_count = val_count;
if (!map->use_single_write) {
chunk_size = map->max_raw_write;
if (chunk_size % val_bytes)
chunk_size -= chunk_size % val_bytes;
chunk_count = total_size / chunk_size;
chunk_stride *= chunk_size / val_bytes;
}
map->lock(map->lock_arg);
/* Write as many bytes as possible with chunk_size */
for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_write(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
chunk_size);
if (ret)
break;
}
/* Write remaining bytes */
if (!ret && chunk_size * i < total_size) {
ret = _regmap_raw_write(map, reg + (i * chunk_stride),
val + (i * chunk_size),
total_size - i * chunk_size);
}
map->unlock(map->lock_arg);
} else {
void *wval;
if (!val_count)
return -EINVAL;
wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
if (!wval) {
dev_err(map->dev, "Error in memory allocation\n");
if (!wval)
return -ENOMEM;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
map->lock(map->lock_arg);
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
map->unlock(map->lock_arg);
ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
kfree(wval);
}
@ -2542,18 +2507,39 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
if (!map->bus->read) {
ret = -ENOTSUPP;
goto out;
}
if (map->max_raw_read && map->max_raw_read < val_len) {
ret = -E2BIG;
goto out;
if (map->use_single_read)
chunk_regs = 1;
else if (map->max_raw_read && val_len > map->max_raw_read)
chunk_regs = map->max_raw_read / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
/* Read bytes that fit into whole chunks */
for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_read(map, reg, val, chunk_bytes);
if (ret != 0)
goto out;
reg += regmap_get_offset(map, chunk_regs);
val += chunk_bytes;
val_len -= chunk_bytes;
}
/* Physical block read if there's no cache involved */
ret = _regmap_raw_read(map, reg, val, val_len);
/* Read remaining bytes */
if (val_len) {
ret = _regmap_raw_read(map, reg, val, val_len);
if (ret != 0)
goto out;
}
} else {
/* Otherwise go word by word for the cache; should be low
* cost as we expect to hit the cache.
@ -2653,108 +2639,60 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (val_count == 0)
return -EINVAL;
if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
/*
* Some devices does not support bulk read, for
* them we have a series of single read operations.
*/
size_t total_size = val_bytes * val_count;
if (!map->use_single_read &&
(!map->max_raw_read || map->max_raw_read > total_size)) {
ret = regmap_raw_read(map, reg, val,
val_bytes * val_count);
if (ret != 0)
return ret;
} else {
/*
* Some devices do not support bulk read or do not
* support large bulk reads, for them we have a series
* of read operations.
*/
int chunk_stride = map->reg_stride;
size_t chunk_size = val_bytes;
size_t chunk_count = val_count;
if (!map->use_single_read) {
chunk_size = map->max_raw_read;
if (chunk_size % val_bytes)
chunk_size -= chunk_size % val_bytes;
chunk_count = total_size / chunk_size;
chunk_stride *= chunk_size / val_bytes;
}
/* Read bytes that fit into a multiple of chunk_size */
for (i = 0; i < chunk_count; i++) {
ret = regmap_raw_read(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
chunk_size);
if (ret != 0)
return ret;
}
/* Read remaining bytes */
if (chunk_size * i < total_size) {
ret = regmap_raw_read(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
total_size - i * chunk_size);
if (ret != 0)
return ret;
}
}
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
#ifdef CONFIG_64BIT
u64 *u64 = val;
#endif
u32 *u32 = val;
u16 *u16 = val;
u8 *u8 = val;
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
ret = regmap_read(map, reg + regmap_get_offset(map, i),
&ival);
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
&ival);
if (ret != 0)
return ret;
goto out;
if (map->format.format_val) {
map->format.format_val(val + (i * val_bytes), ival, 0);
} else {
/* Devices providing read and write
* operations can use the bulk I/O
* functions if they define a val_bytes,
* we assume that the values are native
* endian.
*/
switch (map->format.val_bytes) {
#ifdef CONFIG_64BIT
u64 *u64 = val;
case 8:
u64[i] = ival;
break;
#endif
u32 *u32 = val;
u16 *u16 = val;
u8 *u8 = val;
switch (map->format.val_bytes) {
#ifdef CONFIG_64BIT
case 8:
u64[i] = ival;
break;
#endif
case 4:
u32[i] = ival;
break;
case 2:
u16[i] = ival;
break;
case 1:
u8[i] = ival;
break;
default:
return -EINVAL;
}
case 4:
u32[i] = ival;
break;
case 2:
u16[i] = ival;
break;
case 1:
u8[i] = ival;
break;
default:
ret = -EINVAL;
goto out;
}
}
out:
map->unlock(map->lock_arg);
}
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);