From 9ae27a8d1f3ebff09191fb8cb1341414547293b2 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Mon, 12 Feb 2018 18:15:46 +0000 Subject: [PATCH 01/10] regmap: Don't use format_val in regmap_bulk_read A bulk read can be implemented either through regmap_raw_read, or by reading each register individually using regmap_read. Both regmap_read and regmap_bulk_read should return values in native endian. In the individual case the current implementation calls format_val to put the data into the output array, which can cause endian issues. The regmap_read will have already converted the data into native endian, if the hosts endian differs from the device then format_val will switch the endian back again. Rather than using format_val simply use the code that is called if there is no format_val function. This code supports all cases except 24-bit but there don't appear to be any users of regmap_bulk_read for 24-bit. Additionally, it would have to be a big endian host for the old code to actually function correctly anyway. Fixes: 15b8d2c41fe5 ("regmap: Fix regmap_bulk_read in BE mode") Reported-by: David Rhodes Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 55 +++++++++++++++--------------------- 1 file changed, 23 insertions(+), 32 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index ee302ccdfbc8..4037b3782bd3 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2709,47 +2709,38 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(val + i); } else { +#ifdef CONFIG_64BIT + u64 *u64 = val; +#endif + u32 *u32 = val; + u16 *u16 = val; + u8 *u8 = val; + for (i = 0; i < val_count; i++) { unsigned int ival; + ret = regmap_read(map, reg + regmap_get_offset(map, i), &ival); if (ret != 0) return ret; - if (map->format.format_val) { - map->format.format_val(val + (i * val_bytes), ival, 0); - } else { - /* Devices providing read and write - * operations can use the bulk I/O - * functions if they define a val_bytes, - * we assume that the values are native - * endian. - */ + switch (map->format.val_bytes) { #ifdef CONFIG_64BIT - u64 *u64 = val; + case 8: + u64[i] = ival; + break; #endif - u32 *u32 = val; - u16 *u16 = val; - u8 *u8 = val; - - switch (map->format.val_bytes) { -#ifdef CONFIG_64BIT - case 8: - u64[i] = ival; - break; -#endif - case 4: - u32[i] = ival; - break; - case 2: - u16[i] = ival; - break; - case 1: - u8[i] = ival; - break; - default: - return -EINVAL; - } + case 4: + u32[i] = ival; + break; + case 2: + u16[i] = ival; + break; + case 1: + u8[i] = ival; + break; + default: + return -EINVAL; } } } From 45abcc556721a6d18e4af82e20e164044f0a3e36 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Mon, 12 Feb 2018 18:15:47 +0000 Subject: [PATCH 02/10] regmap: Use helper function for register offset As a helper function exists for calculating register offsets lets use that rather than open coding with the reg_stride. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 4037b3782bd3..f075c05859b0 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1993,7 +1993,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, return -EINVAL; } - ret = regmap_write(map, reg + (i * map->reg_stride), + ret = regmap_write(map, reg + regmap_get_offset(map, i), ival); if (ret) return ret; From 0645ba4331c2b02ba9907b1591ba722535890e9f Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 15 Feb 2018 17:52:16 +0000 Subject: [PATCH 03/10] regmap: Move the handling for max_raw_read into regmap_raw_read Currently regmap_bulk_read will split a read into chunks before calling regmap_raw_read if max_raw_read is set. It is more logical for this handling to be inside regmap_raw_read itself, as this removes the need to keep re-implementing the chunking code, which would be the same for all users of regmap_raw_read. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 90 ++++++++++++++---------------------- 1 file changed, 35 insertions(+), 55 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f075c05859b0..0cc7387008c9 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2542,18 +2542,45 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || map->cache_type == REGCACHE_NONE) { + int chunk_stride = map->reg_stride; + size_t chunk_size = val_bytes; + size_t chunk_count = val_count; + if (!map->bus->read) { ret = -ENOTSUPP; goto out; } - if (map->max_raw_read && map->max_raw_read < val_len) { - ret = -E2BIG; - goto out; + + if (!map->use_single_read) { + if (map->max_raw_read) + chunk_size = map->max_raw_read; + else + chunk_size = val_len; + if (chunk_size % val_bytes) + chunk_size -= chunk_size % val_bytes; + chunk_count = val_len / chunk_size; + chunk_stride *= chunk_size / val_bytes; } - /* Physical block read if there's no cache involved */ - ret = _regmap_raw_read(map, reg, val, val_len); + /* Read bytes that fit into a multiple of chunk_size */ + for (i = 0; i < chunk_count; i++) { + ret = _regmap_raw_read(map, + reg + (i * chunk_stride), + val + (i * chunk_size), + chunk_size); + if (ret != 0) + return ret; + } + /* Read remaining bytes */ + if (chunk_size * i < val_len) { + ret = _regmap_raw_read(map, + reg + (i * chunk_stride), + val + (i * chunk_size), + val_len - i * chunk_size); + if (ret != 0) + return ret; + } } else { /* Otherwise go word by word for the cache; should be low * cost as we expect to hit the cache. @@ -2655,56 +2682,9 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, return -EINVAL; if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { - /* - * Some devices does not support bulk read, for - * them we have a series of single read operations. - */ - size_t total_size = val_bytes * val_count; - - if (!map->use_single_read && - (!map->max_raw_read || map->max_raw_read > total_size)) { - ret = regmap_raw_read(map, reg, val, - val_bytes * val_count); - if (ret != 0) - return ret; - } else { - /* - * Some devices do not support bulk read or do not - * support large bulk reads, for them we have a series - * of read operations. - */ - int chunk_stride = map->reg_stride; - size_t chunk_size = val_bytes; - size_t chunk_count = val_count; - - if (!map->use_single_read) { - chunk_size = map->max_raw_read; - if (chunk_size % val_bytes) - chunk_size -= chunk_size % val_bytes; - chunk_count = total_size / chunk_size; - chunk_stride *= chunk_size / val_bytes; - } - - /* Read bytes that fit into a multiple of chunk_size */ - for (i = 0; i < chunk_count; i++) { - ret = regmap_raw_read(map, - reg + (i * chunk_stride), - val + (i * chunk_size), - chunk_size); - if (ret != 0) - return ret; - } - - /* Read remaining bytes */ - if (chunk_size * i < total_size) { - ret = regmap_raw_read(map, - reg + (i * chunk_stride), - val + (i * chunk_size), - total_size - i * chunk_size); - if (ret != 0) - return ret; - } - } + ret = regmap_raw_read(map, reg, val, val_bytes * val_count); + if (ret != 0) + return ret; for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(val + i); From 1b079ca2c2e9a4652051bc4b62a5ef83d59d86bb Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 15 Feb 2018 17:52:17 +0000 Subject: [PATCH 04/10] regmap: Tidy up regmap_raw_read chunking code Raw reads may need to be split into small chunks if max_raw_read is set. Tidy up the code implementing this, the new code is slightly clearer, slightly shorter and slightly more efficient. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 44 ++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0cc7387008c9..ff30a9157de5 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2542,44 +2542,38 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || map->cache_type == REGCACHE_NONE) { - int chunk_stride = map->reg_stride; - size_t chunk_size = val_bytes; - size_t chunk_count = val_count; + size_t chunk_count, chunk_bytes; + size_t chunk_regs = val_count; if (!map->bus->read) { ret = -ENOTSUPP; goto out; } - if (!map->use_single_read) { - if (map->max_raw_read) - chunk_size = map->max_raw_read; - else - chunk_size = val_len; - if (chunk_size % val_bytes) - chunk_size -= chunk_size % val_bytes; - chunk_count = val_len / chunk_size; - chunk_stride *= chunk_size / val_bytes; - } + if (map->use_single_read) + chunk_regs = 1; + else if (map->max_raw_read && val_len > map->max_raw_read) + chunk_regs = map->max_raw_read / val_bytes; - /* Read bytes that fit into a multiple of chunk_size */ + chunk_count = val_count / chunk_regs; + chunk_bytes = chunk_regs * val_bytes; + + /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_read(map, - reg + (i * chunk_stride), - val + (i * chunk_size), - chunk_size); + ret = _regmap_raw_read(map, reg, val, chunk_bytes); if (ret != 0) - return ret; + goto out; + + reg += regmap_get_offset(map, chunk_regs); + val += chunk_bytes; + val_len -= chunk_bytes; } /* Read remaining bytes */ - if (chunk_size * i < val_len) { - ret = _regmap_raw_read(map, - reg + (i * chunk_stride), - val + (i * chunk_size), - val_len - i * chunk_size); + if (val_len) { + ret = _regmap_raw_read(map, reg, val, val_len); if (ret != 0) - return ret; + goto out; } } else { /* Otherwise go word by word for the cache; should be low From 186ba2eec275a5e4ee09d4b6a77c619e46fab9fd Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 15 Feb 2018 17:52:18 +0000 Subject: [PATCH 05/10] regmap: Use _regmap_read in regmap_bulk_read Bulk reads may potentially read a lot of registers and regmap_read will take and release the regmap lock for each register. Avoid bouncing the lock so frequently by holding the lock locally and calling _regmap_read instead. This also has the nice side-effect that all the reads will be done atomically so no other threads can sneak a write in during the regmap_bulk_read. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index ff30a9157de5..258a40e2a1d3 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2674,6 +2674,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; + if (val_count == 0) + return -EINVAL; if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); @@ -2690,13 +2692,15 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, u16 *u16 = val; u8 *u8 = val; + map->lock(map->lock_arg); + for (i = 0; i < val_count; i++) { unsigned int ival; - ret = regmap_read(map, reg + regmap_get_offset(map, i), - &ival); + ret = _regmap_read(map, reg + regmap_get_offset(map, i), + &ival); if (ret != 0) - return ret; + goto out; switch (map->format.val_bytes) { #ifdef CONFIG_64BIT @@ -2714,12 +2718,16 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, u8[i] = ival; break; default: - return -EINVAL; + ret = -EINVAL; + goto out; } } + +out: + map->unlock(map->lock_arg); } - return 0; + return ret; } EXPORT_SYMBOL_GPL(regmap_bulk_read); From 0812d8ffa9955251ba0077488d4408d8987ec091 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 22 Feb 2018 12:59:10 +0000 Subject: [PATCH 06/10] regmap: Format data for raw write in regmap_bulk_write In the case were the bulk transaction is split up into smaller chunks data is passed directly to regmap_raw_write. However regmap_bulk_write uses data in host endian and regmap_raw_write expects data in device endian. As such if the host and device differ in endian the wrong data will be written to the device. Correct this issue using a similar approach to the single raw write case below it, duplicate the data into a new buffer and use parse_inplace to format the data correctly. Fixes: adaac459759d ("regmap: Introduce max_raw_read/write for regmap_bulk_read/write") Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f5d653663626..8fe6e08fa41e 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2003,6 +2003,17 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, int chunk_stride = map->reg_stride; size_t chunk_size = val_bytes; size_t chunk_count = val_count; + void *wval; + + if (!val_count) + return -EINVAL; + + wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); + if (!wval) + return -ENOMEM; + + for (i = 0; i < val_count * val_bytes; i += val_bytes) + map->format.parse_inplace(wval + i); if (!map->use_single_write) { chunk_size = map->max_raw_write; @@ -2017,7 +2028,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_write(map, reg + (i * chunk_stride), - val + (i * chunk_size), + wval + (i * chunk_size), chunk_size); if (ret) break; @@ -2026,10 +2037,12 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, /* Write remaining bytes */ if (!ret && chunk_size * i < total_size) { ret = _regmap_raw_write(map, reg + (i * chunk_stride), - val + (i * chunk_size), + wval + (i * chunk_size), total_size - i * chunk_size); } map->unlock(map->lock_arg); + + kfree(wval); } else { void *wval; From b4ecfec5ee3f282a4ac0876de332876fec9b488c Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 22 Feb 2018 12:59:11 +0000 Subject: [PATCH 07/10] regmap: Remove unnecessary printk for failed allocation Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 8fe6e08fa41e..707b0450ad72 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2050,10 +2050,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, return -EINVAL; wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); - if (!wval) { - dev_err(map->dev, "Error in memory allocation\n"); + if (!wval) return -ENOMEM; - } + for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(wval + i); From 7ef2c6b8689a084954cffbd102ee49c2fb72cbd4 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 22 Feb 2018 12:59:12 +0000 Subject: [PATCH 08/10] regmap: Move the handling for max_raw_write into regmap_raw_write Currently regmap_bulk_write will split a write into chunks before calling regmap_raw_write if max_raw_write is set. It is more logical for this handling to be inside regmap_raw_write itself, as this removes the need to keep re-implementing the chunking code, which would be the same for all users of regmap_raw_write. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 117 ++++++++++++++++------------------- 1 file changed, 54 insertions(+), 63 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 707b0450ad72..e82ea77849fb 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1438,8 +1438,8 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, buf[i] |= (mask >> (8 * i)) & 0xff; } -int _regmap_raw_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) +static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) { struct regmap_range_node *range; unsigned long flags; @@ -1490,8 +1490,9 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, while (val_num > win_residue) { dev_dbg(map->dev, "Writing window %d/%zu\n", win_residue, val_len / map->format.val_bytes); - ret = _regmap_raw_write(map, reg, val, win_residue * - map->format.val_bytes); + ret = _regmap_raw_write_impl(map, reg, val, + win_residue * + map->format.val_bytes); if (ret != 0) return ret; @@ -1707,11 +1708,11 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, map->format.format_val(map->work_buf + map->format.reg_bytes + map->format.pad_bytes, val, 0); - return _regmap_raw_write(map, reg, - map->work_buf + - map->format.reg_bytes + - map->format.pad_bytes, - map->format.val_bytes); + return _regmap_raw_write_impl(map, reg, + map->work_buf + + map->format.reg_bytes + + map->format.pad_bytes, + map->format.val_bytes); } static inline void *_regmap_map_get_context(struct regmap *map) @@ -1806,6 +1807,49 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) } EXPORT_SYMBOL_GPL(regmap_write_async); +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + size_t val_bytes = map->format.val_bytes; + size_t val_count = val_len / val_bytes; + int chunk_stride = map->reg_stride; + size_t chunk_size = val_bytes; + size_t chunk_count = val_count; + int ret, i; + + if (!val_count) + return -EINVAL; + + if (!map->use_single_write) { + if (map->max_raw_write) + chunk_size = map->max_raw_write; + else + chunk_size = val_len; + if (chunk_size % val_bytes) + chunk_size -= chunk_size % val_bytes; + chunk_count = val_len / chunk_size; + chunk_stride *= chunk_size / val_bytes; + } + + /* Write as many bytes as possible with chunk_size */ + for (i = 0; i < chunk_count; i++) { + ret = _regmap_raw_write_impl(map, + reg + (i * chunk_stride), + val + (i * chunk_size), + chunk_size); + if (ret) + return ret; + } + + /* Write remaining bytes */ + if (!ret && chunk_size * i < val_len) + ret = _regmap_raw_write_impl(map, reg + (i * chunk_stride), + val + (i * chunk_size), + val_len - i * chunk_size); + + return ret; +} + /** * regmap_raw_write() - Write raw values to one or more registers * @@ -1831,8 +1875,6 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; - if (map->max_raw_write && map->max_raw_write < val_len) - return -E2BIG; map->lock(map->lock_arg); @@ -1923,7 +1965,6 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, { int ret = 0, i; size_t val_bytes = map->format.val_bytes; - size_t total_size = val_bytes * val_count; if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; @@ -1998,57 +2039,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, if (ret) return ret; } - } else if (map->use_single_write || - (map->max_raw_write && map->max_raw_write < total_size)) { - int chunk_stride = map->reg_stride; - size_t chunk_size = val_bytes; - size_t chunk_count = val_count; - void *wval; - - if (!val_count) - return -EINVAL; - - wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); - if (!wval) - return -ENOMEM; - - for (i = 0; i < val_count * val_bytes; i += val_bytes) - map->format.parse_inplace(wval + i); - - if (!map->use_single_write) { - chunk_size = map->max_raw_write; - if (chunk_size % val_bytes) - chunk_size -= chunk_size % val_bytes; - chunk_count = total_size / chunk_size; - chunk_stride *= chunk_size / val_bytes; - } - - map->lock(map->lock_arg); - /* Write as many bytes as possible with chunk_size */ - for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_write(map, - reg + (i * chunk_stride), - wval + (i * chunk_size), - chunk_size); - if (ret) - break; - } - - /* Write remaining bytes */ - if (!ret && chunk_size * i < total_size) { - ret = _regmap_raw_write(map, reg + (i * chunk_stride), - wval + (i * chunk_size), - total_size - i * chunk_size); - } - map->unlock(map->lock_arg); - - kfree(wval); } else { void *wval; - if (!val_count) - return -EINVAL; - wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); if (!wval) return -ENOMEM; @@ -2056,9 +2049,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(wval + i); - map->lock(map->lock_arg); - ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); - map->unlock(map->lock_arg); + ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); kfree(wval); } From 364e378b8d1679f91a29a9537a881bba17931cfb Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 22 Feb 2018 12:59:13 +0000 Subject: [PATCH 09/10] regmap: Tidy up regmap_raw_write chunking code Raw writes may need to be split into small chunks if max_raw_write is set. Tidy up the code implementing this, the new code is slightly clearer, slightly shorter and slightly more efficient. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 37 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index e82ea77849fb..bfdd66dd3766 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1812,40 +1812,35 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, { size_t val_bytes = map->format.val_bytes; size_t val_count = val_len / val_bytes; - int chunk_stride = map->reg_stride; - size_t chunk_size = val_bytes; - size_t chunk_count = val_count; + size_t chunk_count, chunk_bytes; + size_t chunk_regs = val_count; int ret, i; if (!val_count) return -EINVAL; - if (!map->use_single_write) { - if (map->max_raw_write) - chunk_size = map->max_raw_write; - else - chunk_size = val_len; - if (chunk_size % val_bytes) - chunk_size -= chunk_size % val_bytes; - chunk_count = val_len / chunk_size; - chunk_stride *= chunk_size / val_bytes; - } + if (map->use_single_write) + chunk_regs = 1; + else if (map->max_raw_write && val_len > map->max_raw_write) + chunk_regs = map->max_raw_write / val_bytes; + + chunk_count = val_count / chunk_regs; + chunk_bytes = chunk_regs * val_bytes; /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_write_impl(map, - reg + (i * chunk_stride), - val + (i * chunk_size), - chunk_size); + ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); if (ret) return ret; + + reg += regmap_get_offset(map, chunk_regs); + val += chunk_bytes; + val_len -= chunk_bytes; } /* Write remaining bytes */ - if (!ret && chunk_size * i < val_len) - ret = _regmap_raw_write_impl(map, reg + (i * chunk_stride), - val + (i * chunk_size), - val_len - i * chunk_size); + if (val_len) + ret = _regmap_raw_write_impl(map, reg, val, val_len); return ret; } From fb44f3cec35c6e71865012fa281ba6d4ff50a99a Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 22 Feb 2018 12:59:14 +0000 Subject: [PATCH 10/10] regmap: Merge redundant handling in regmap_bulk_write The handling for the first two cases in regmap_bulk_write is essentially identical. The first case is just a better implementation of the second, supporting 8 byte registers and doing the locking manually to avoid bouncing the lock for each register. Drop some redundant code by removing the second of these cases and allowing both situations to be handled by the same code. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 39 +++--------------------------------- 1 file changed, 3 insertions(+), 36 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index bfdd66dd3766..fafee9251d65 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1965,17 +1965,10 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, return -EINVAL; /* - * Some devices don't support bulk write, for - * them we have a series of single write operations in the first two if - * blocks. - * - * The first if block is used for memory mapped io. It does not allow - * val_bytes of 3 for example. - * The second one is for busses that do not provide raw I/O. - * The third one is used for busses which do not have these limitations - * and can write arbitrary value lengths. + * Some devices don't support bulk write, for them we have a series of + * single write operations. */ - if (!map->bus) { + if (!map->bus || !map->format.parse_inplace) { map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; @@ -2008,32 +2001,6 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, } out: map->unlock(map->lock_arg); - } else if (map->bus && !map->format.parse_inplace) { - const u8 *u8 = val; - const u16 *u16 = val; - const u32 *u32 = val; - unsigned int ival; - - for (i = 0; i < val_count; i++) { - switch (map->format.val_bytes) { - case 4: - ival = u32[i]; - break; - case 2: - ival = u16[i]; - break; - case 1: - ival = u8[i]; - break; - default: - return -EINVAL; - } - - ret = regmap_write(map, reg + regmap_get_offset(map, i), - ival); - if (ret) - return ret; - } } else { void *wval;