regmap: Move the handling for max_raw_read into regmap_raw_read

Currently regmap_bulk_read will split a read into chunks before
calling regmap_raw_read if max_raw_read is set. It is more logical for
this handling to be inside regmap_raw_read itself, as this removes the
need to keep re-implementing the chunking code, which would be the
same for all users of regmap_raw_read.

Signed-off-by: Charles Keepax <ckeepax@opensource.cirrus.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Charles Keepax 2018-02-15 17:52:16 +00:00 committed by Mark Brown
parent 45abcc5567
commit 0645ba4331
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0
1 changed files with 35 additions and 55 deletions

View File

@ -2542,18 +2542,45 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
int chunk_stride = map->reg_stride;
size_t chunk_size = val_bytes;
size_t chunk_count = val_count;
if (!map->bus->read) {
ret = -ENOTSUPP;
goto out;
}
if (map->max_raw_read && map->max_raw_read < val_len) {
ret = -E2BIG;
goto out;
if (!map->use_single_read) {
if (map->max_raw_read)
chunk_size = map->max_raw_read;
else
chunk_size = val_len;
if (chunk_size % val_bytes)
chunk_size -= chunk_size % val_bytes;
chunk_count = val_len / chunk_size;
chunk_stride *= chunk_size / val_bytes;
}
/* Physical block read if there's no cache involved */
ret = _regmap_raw_read(map, reg, val, val_len);
/* Read bytes that fit into a multiple of chunk_size */
for (i = 0; i < chunk_count; i++) {
ret = _regmap_raw_read(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
chunk_size);
if (ret != 0)
return ret;
}
/* Read remaining bytes */
if (chunk_size * i < val_len) {
ret = _regmap_raw_read(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
val_len - i * chunk_size);
if (ret != 0)
return ret;
}
} else {
/* Otherwise go word by word for the cache; should be low
* cost as we expect to hit the cache.
@ -2655,56 +2682,9 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
return -EINVAL;
if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
/*
* Some devices does not support bulk read, for
* them we have a series of single read operations.
*/
size_t total_size = val_bytes * val_count;
if (!map->use_single_read &&
(!map->max_raw_read || map->max_raw_read > total_size)) {
ret = regmap_raw_read(map, reg, val,
val_bytes * val_count);
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;
} else {
/*
* Some devices do not support bulk read or do not
* support large bulk reads, for them we have a series
* of read operations.
*/
int chunk_stride = map->reg_stride;
size_t chunk_size = val_bytes;
size_t chunk_count = val_count;
if (!map->use_single_read) {
chunk_size = map->max_raw_read;
if (chunk_size % val_bytes)
chunk_size -= chunk_size % val_bytes;
chunk_count = total_size / chunk_size;
chunk_stride *= chunk_size / val_bytes;
}
/* Read bytes that fit into a multiple of chunk_size */
for (i = 0; i < chunk_count; i++) {
ret = regmap_raw_read(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
chunk_size);
if (ret != 0)
return ret;
}
/* Read remaining bytes */
if (chunk_size * i < total_size) {
ret = regmap_raw_read(map,
reg + (i * chunk_stride),
val + (i * chunk_size),
total_size - i * chunk_size);
if (ret != 0)
return ret;
}
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);