blob: 42d8404bc8ccac40fdcf1f9705456f51446dcf0c [file] [log] [blame]
Mark Brownb83a3132011-05-11 19:59:58 +02001/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
Stephen Warrenf5d6eba2012-03-09 13:17:28 -070013#include <linux/device.h>
Mark Brownb83a3132011-05-11 19:59:58 +020014#include <linux/slab.h>
Paul Gortmaker19694b52012-02-28 19:28:02 -050015#include <linux/export.h>
Mark Brownb83a3132011-05-11 19:59:58 +020016#include <linux/mutex.h>
17#include <linux/err.h>
Xiubo Lid647c192014-07-15 12:23:02 +080018#include <linux/of.h>
Krystian Garbaciak6863ca62012-06-15 11:23:56 +010019#include <linux/rbtree.h>
Stephen Warren30b2a552013-02-02 22:50:14 -070020#include <linux/sched.h>
Nariman Poushin2de9d602015-07-16 16:36:22 +010021#include <linux/delay.h>
Xiubo Lica747be2016-01-04 18:00:33 +080022#include <linux/log2.h>
Baolin Wang8698b932017-11-01 10:11:55 +080023#include <linux/hwspinlock.h>
Mark Brownb83a3132011-05-11 19:59:58 +020024
Mark Brownfb2736b2011-07-24 21:30:55 +010025#define CREATE_TRACE_POINTS
Steven Rostedtf58078d2015-03-19 17:50:47 -040026#include "trace.h"
Mark Brownfb2736b2011-07-24 21:30:55 +010027
Mark Brown93de9122011-07-20 22:35:37 +010028#include "internal.h"
Mark Brownb83a3132011-05-11 19:59:58 +020029
Mark Brown1044c182012-07-06 14:10:23 +010030/*
31 * Sometimes for failures during very early init the trace
32 * infrastructure isn't available early enough to be used. For this
33 * sort of problem defining LOG_DEVICE will add printks for basic
34 * register I/O on a specific device.
35 */
36#undef LOG_DEVICE
37
Ben Dooks95093762018-10-02 11:42:05 +010038#ifdef LOG_DEVICE
39static inline bool regmap_should_log(struct regmap *map)
40{
41 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
42}
43#else
44static inline bool regmap_should_log(struct regmap *map) { return false; }
45#endif
46
47
Mark Brown1044c182012-07-06 14:10:23 +010048static int _regmap_update_bits(struct regmap *map, unsigned int reg,
49 unsigned int mask, unsigned int val,
Kuninori Morimoto7ff05892015-06-16 08:52:22 +000050 bool *change, bool force_write);
Mark Brown1044c182012-07-06 14:10:23 +010051
Boris BREZILLON3ac17032014-04-17 11:40:11 +020052static int _regmap_bus_reg_read(void *context, unsigned int reg,
53 unsigned int *val);
Andrey Smirnovad278402013-01-12 12:54:12 -080054static int _regmap_bus_read(void *context, unsigned int reg,
55 unsigned int *val);
Andrey Smirnov07c320d2013-01-12 12:54:13 -080056static int _regmap_bus_formatted_write(void *context, unsigned int reg,
57 unsigned int val);
Boris BREZILLON3ac17032014-04-17 11:40:11 +020058static int _regmap_bus_reg_write(void *context, unsigned int reg,
59 unsigned int val);
Andrey Smirnov07c320d2013-01-12 12:54:13 -080060static int _regmap_bus_raw_write(void *context, unsigned int reg,
61 unsigned int val);
Andrey Smirnovad278402013-01-12 12:54:12 -080062
Davide Ciminaghi76aad392012-11-20 15:20:30 +010063bool regmap_reg_in_ranges(unsigned int reg,
64 const struct regmap_range *ranges,
65 unsigned int nranges)
66{
67 const struct regmap_range *r;
68 int i;
69
70 for (i = 0, r = ranges; i < nranges; i++, r++)
71 if (regmap_reg_in_range(reg, r))
72 return true;
73 return false;
74}
75EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
76
Mark Brown154881e2013-05-08 13:55:23 +010077bool regmap_check_range_table(struct regmap *map, unsigned int reg,
78 const struct regmap_access_table *table)
Davide Ciminaghi76aad392012-11-20 15:20:30 +010079{
80 /* Check "no ranges" first */
81 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
82 return false;
83
84 /* In case zero "yes ranges" are supplied, any reg is OK */
85 if (!table->n_yes_ranges)
86 return true;
87
88 return regmap_reg_in_ranges(reg, table->yes_ranges,
89 table->n_yes_ranges);
90}
Mark Brown154881e2013-05-08 13:55:23 +010091EXPORT_SYMBOL_GPL(regmap_check_range_table);
Davide Ciminaghi76aad392012-11-20 15:20:30 +010092
Mark Brown8de2f082011-08-10 17:14:41 +090093bool regmap_writeable(struct regmap *map, unsigned int reg)
94{
95 if (map->max_register && reg > map->max_register)
96 return false;
97
98 if (map->writeable_reg)
99 return map->writeable_reg(map->dev, reg);
100
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100101 if (map->wr_table)
Mark Brown154881e2013-05-08 13:55:23 +0100102 return regmap_check_range_table(map, reg, map->wr_table);
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100103
Mark Brown8de2f082011-08-10 17:14:41 +0900104 return true;
105}
106
Cristian Birsan1ea975c2016-08-08 18:44:21 +0300107bool regmap_cached(struct regmap *map, unsigned int reg)
108{
109 int ret;
110 unsigned int val;
111
Charles Keepax71df1792018-02-12 18:15:44 +0000112 if (map->cache_type == REGCACHE_NONE)
Cristian Birsan1ea975c2016-08-08 18:44:21 +0300113 return false;
114
115 if (!map->cache_ops)
116 return false;
117
118 if (map->max_register && reg > map->max_register)
119 return false;
120
121 map->lock(map->lock_arg);
122 ret = regcache_read(map, reg, &val);
123 map->unlock(map->lock_arg);
124 if (ret)
125 return false;
126
127 return true;
128}
129
Mark Brown8de2f082011-08-10 17:14:41 +0900130bool regmap_readable(struct regmap *map, unsigned int reg)
131{
Lars-Peter Clausen04dc91c2015-07-13 12:26:44 +0200132 if (!map->reg_read)
133 return false;
134
Mark Brown8de2f082011-08-10 17:14:41 +0900135 if (map->max_register && reg > map->max_register)
136 return false;
137
Wolfram Sang4191f192012-01-30 15:08:16 +0100138 if (map->format.format_write)
139 return false;
140
Mark Brown8de2f082011-08-10 17:14:41 +0900141 if (map->readable_reg)
142 return map->readable_reg(map->dev, reg);
143
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100144 if (map->rd_table)
Mark Brown154881e2013-05-08 13:55:23 +0100145 return regmap_check_range_table(map, reg, map->rd_table);
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100146
Mark Brown8de2f082011-08-10 17:14:41 +0900147 return true;
148}
149
150bool regmap_volatile(struct regmap *map, unsigned int reg)
151{
Mark Brown5844a8b2014-08-26 12:12:17 +0100152 if (!map->format.format_write && !regmap_readable(map, reg))
Mark Brown8de2f082011-08-10 17:14:41 +0900153 return false;
154
155 if (map->volatile_reg)
156 return map->volatile_reg(map->dev, reg);
157
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100158 if (map->volatile_table)
Mark Brown154881e2013-05-08 13:55:23 +0100159 return regmap_check_range_table(map, reg, map->volatile_table);
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100160
Mark Brownb92be6f2013-06-03 17:24:08 +0100161 if (map->cache_ops)
162 return false;
163 else
164 return true;
Mark Brown8de2f082011-08-10 17:14:41 +0900165}
166
167bool regmap_precious(struct regmap *map, unsigned int reg)
168{
Wolfram Sang4191f192012-01-30 15:08:16 +0100169 if (!regmap_readable(map, reg))
Mark Brown8de2f082011-08-10 17:14:41 +0900170 return false;
171
172 if (map->precious_reg)
173 return map->precious_reg(map->dev, reg);
174
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100175 if (map->precious_table)
Mark Brown154881e2013-05-08 13:55:23 +0100176 return regmap_check_range_table(map, reg, map->precious_table);
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100177
Mark Brown8de2f082011-08-10 17:14:41 +0900178 return false;
179}
180
Ben Whittencdf6b112018-10-19 10:33:50 +0100181bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
182{
183 if (map->writeable_noinc_reg)
184 return map->writeable_noinc_reg(map->dev, reg);
185
186 if (map->wr_noinc_table)
187 return regmap_check_range_table(map, reg, map->wr_noinc_table);
188
189 return true;
190}
191
Crestez Dan Leonard74fe7b52018-08-07 17:52:17 +0300192bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
193{
194 if (map->readable_noinc_reg)
195 return map->readable_noinc_reg(map->dev, reg);
196
197 if (map->rd_noinc_table)
198 return regmap_check_range_table(map, reg, map->rd_noinc_table);
199
200 return true;
201}
202
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +0100203static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
Paul Bollea8f28cf2012-10-08 22:06:30 +0200204 size_t num)
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +0100205{
206 unsigned int i;
207
208 for (i = 0; i < num; i++)
Charles Keepaxb8f9a032018-02-12 18:15:45 +0000209 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +0100210 return false;
211
212 return true;
213}
214
Wolfram Sang9aa50752012-01-27 16:10:22 +0100215static void regmap_format_2_6_write(struct regmap *map,
216 unsigned int reg, unsigned int val)
217{
218 u8 *out = map->work_buf;
219
220 *out = (reg << 6) | val;
221}
222
Mark Brownb83a3132011-05-11 19:59:58 +0200223static void regmap_format_4_12_write(struct regmap *map,
224 unsigned int reg, unsigned int val)
225{
226 __be16 *out = map->work_buf;
227 *out = cpu_to_be16((reg << 12) | val);
228}
229
230static void regmap_format_7_9_write(struct regmap *map,
231 unsigned int reg, unsigned int val)
232{
233 __be16 *out = map->work_buf;
234 *out = cpu_to_be16((reg << 9) | val);
235}
236
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100237static void regmap_format_10_14_write(struct regmap *map,
238 unsigned int reg, unsigned int val)
239{
240 u8 *out = map->work_buf;
241
242 out[2] = val;
243 out[1] = (val >> 8) | (reg << 6);
244 out[0] = reg >> 2;
245}
246
Marc Reillyd939fb92012-03-16 12:11:43 +1100247static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
Mark Brownb83a3132011-05-11 19:59:58 +0200248{
249 u8 *b = buf;
250
Marc Reillyd939fb92012-03-16 12:11:43 +1100251 b[0] = val << shift;
Mark Brownb83a3132011-05-11 19:59:58 +0200252}
253
Stephen Warren141eba22012-05-24 10:47:26 -0600254static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
Mark Brownb83a3132011-05-11 19:59:58 +0200255{
256 __be16 *b = buf;
257
Marc Reillyd939fb92012-03-16 12:11:43 +1100258 b[0] = cpu_to_be16(val << shift);
Mark Brownb83a3132011-05-11 19:59:58 +0200259}
260
Xiubo Li4aa8c062014-04-02 18:09:07 +0800261static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
262{
263 __le16 *b = buf;
264
265 b[0] = cpu_to_le16(val << shift);
266}
267
Stephen Warren141eba22012-05-24 10:47:26 -0600268static void regmap_format_16_native(void *buf, unsigned int val,
269 unsigned int shift)
270{
271 *(u16 *)buf = val << shift;
272}
273
Marc Reillyd939fb92012-03-16 12:11:43 +1100274static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
Marc Reillyea279fc2012-03-16 12:11:42 +1100275{
276 u8 *b = buf;
277
Marc Reillyd939fb92012-03-16 12:11:43 +1100278 val <<= shift;
279
Marc Reillyea279fc2012-03-16 12:11:42 +1100280 b[0] = val >> 16;
281 b[1] = val >> 8;
282 b[2] = val;
283}
284
Stephen Warren141eba22012-05-24 10:47:26 -0600285static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
Mark Brown7d5e5252012-02-17 15:58:25 -0800286{
287 __be32 *b = buf;
288
Marc Reillyd939fb92012-03-16 12:11:43 +1100289 b[0] = cpu_to_be32(val << shift);
Mark Brown7d5e5252012-02-17 15:58:25 -0800290}
291
Xiubo Li4aa8c062014-04-02 18:09:07 +0800292static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
293{
294 __le32 *b = buf;
295
296 b[0] = cpu_to_le32(val << shift);
297}
298
Stephen Warren141eba22012-05-24 10:47:26 -0600299static void regmap_format_32_native(void *buf, unsigned int val,
300 unsigned int shift)
301{
302 *(u32 *)buf = val << shift;
303}
304
Xiubo Liafcc00b2015-12-03 17:31:52 +0800305#ifdef CONFIG_64BIT
306static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
307{
308 __be64 *b = buf;
309
Dan Carpenter01c377b2015-12-12 15:59:14 +0300310 b[0] = cpu_to_be64((u64)val << shift);
Xiubo Liafcc00b2015-12-03 17:31:52 +0800311}
312
313static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
314{
315 __le64 *b = buf;
316
Dan Carpenter01c377b2015-12-12 15:59:14 +0300317 b[0] = cpu_to_le64((u64)val << shift);
Xiubo Liafcc00b2015-12-03 17:31:52 +0800318}
319
320static void regmap_format_64_native(void *buf, unsigned int val,
321 unsigned int shift)
322{
Dan Carpenter01c377b2015-12-12 15:59:14 +0300323 *(u64 *)buf = (u64)val << shift;
Xiubo Liafcc00b2015-12-03 17:31:52 +0800324}
325#endif
326
Mark Brown8a819ff2013-03-04 09:04:51 +0800327static void regmap_parse_inplace_noop(void *buf)
Mark Brownb83a3132011-05-11 19:59:58 +0200328{
Mark Brown8a819ff2013-03-04 09:04:51 +0800329}
330
331static unsigned int regmap_parse_8(const void *buf)
332{
333 const u8 *b = buf;
Mark Brownb83a3132011-05-11 19:59:58 +0200334
335 return b[0];
336}
337
Mark Brown8a819ff2013-03-04 09:04:51 +0800338static unsigned int regmap_parse_16_be(const void *buf)
339{
340 const __be16 *b = buf;
341
342 return be16_to_cpu(b[0]);
343}
344
Xiubo Li4aa8c062014-04-02 18:09:07 +0800345static unsigned int regmap_parse_16_le(const void *buf)
346{
347 const __le16 *b = buf;
348
349 return le16_to_cpu(b[0]);
350}
351
Mark Brown8a819ff2013-03-04 09:04:51 +0800352static void regmap_parse_16_be_inplace(void *buf)
Mark Brownb83a3132011-05-11 19:59:58 +0200353{
354 __be16 *b = buf;
355
356 b[0] = be16_to_cpu(b[0]);
Mark Brownb83a3132011-05-11 19:59:58 +0200357}
358
Xiubo Li4aa8c062014-04-02 18:09:07 +0800359static void regmap_parse_16_le_inplace(void *buf)
360{
361 __le16 *b = buf;
362
363 b[0] = le16_to_cpu(b[0]);
364}
365
Mark Brown8a819ff2013-03-04 09:04:51 +0800366static unsigned int regmap_parse_16_native(const void *buf)
Stephen Warren141eba22012-05-24 10:47:26 -0600367{
368 return *(u16 *)buf;
369}
370
Mark Brown8a819ff2013-03-04 09:04:51 +0800371static unsigned int regmap_parse_24(const void *buf)
Marc Reillyea279fc2012-03-16 12:11:42 +1100372{
Mark Brown8a819ff2013-03-04 09:04:51 +0800373 const u8 *b = buf;
Marc Reillyea279fc2012-03-16 12:11:42 +1100374 unsigned int ret = b[2];
375 ret |= ((unsigned int)b[1]) << 8;
376 ret |= ((unsigned int)b[0]) << 16;
377
378 return ret;
379}
380
Mark Brown8a819ff2013-03-04 09:04:51 +0800381static unsigned int regmap_parse_32_be(const void *buf)
382{
383 const __be32 *b = buf;
384
385 return be32_to_cpu(b[0]);
386}
387
Xiubo Li4aa8c062014-04-02 18:09:07 +0800388static unsigned int regmap_parse_32_le(const void *buf)
389{
390 const __le32 *b = buf;
391
392 return le32_to_cpu(b[0]);
393}
394
Mark Brown8a819ff2013-03-04 09:04:51 +0800395static void regmap_parse_32_be_inplace(void *buf)
Mark Brown7d5e5252012-02-17 15:58:25 -0800396{
397 __be32 *b = buf;
398
399 b[0] = be32_to_cpu(b[0]);
Mark Brown7d5e5252012-02-17 15:58:25 -0800400}
401
Xiubo Li4aa8c062014-04-02 18:09:07 +0800402static void regmap_parse_32_le_inplace(void *buf)
403{
404 __le32 *b = buf;
405
406 b[0] = le32_to_cpu(b[0]);
407}
408
Mark Brown8a819ff2013-03-04 09:04:51 +0800409static unsigned int regmap_parse_32_native(const void *buf)
Stephen Warren141eba22012-05-24 10:47:26 -0600410{
411 return *(u32 *)buf;
412}
413
Xiubo Liafcc00b2015-12-03 17:31:52 +0800414#ifdef CONFIG_64BIT
415static unsigned int regmap_parse_64_be(const void *buf)
416{
417 const __be64 *b = buf;
418
419 return be64_to_cpu(b[0]);
420}
421
422static unsigned int regmap_parse_64_le(const void *buf)
423{
424 const __le64 *b = buf;
425
426 return le64_to_cpu(b[0]);
427}
428
429static void regmap_parse_64_be_inplace(void *buf)
430{
431 __be64 *b = buf;
432
433 b[0] = be64_to_cpu(b[0]);
434}
435
436static void regmap_parse_64_le_inplace(void *buf)
437{
438 __le64 *b = buf;
439
440 b[0] = le64_to_cpu(b[0]);
441}
442
443static unsigned int regmap_parse_64_native(const void *buf)
444{
445 return *(u64 *)buf;
446}
447#endif
448
Baolin Wang8698b932017-11-01 10:11:55 +0800449static void regmap_lock_hwlock(void *__map)
450{
451 struct regmap *map = __map;
452
453 hwspin_lock_timeout(map->hwlock, UINT_MAX);
454}
455
456static void regmap_lock_hwlock_irq(void *__map)
457{
458 struct regmap *map = __map;
459
460 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
461}
462
463static void regmap_lock_hwlock_irqsave(void *__map)
464{
465 struct regmap *map = __map;
466
467 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
468 &map->spinlock_flags);
469}
470
471static void regmap_unlock_hwlock(void *__map)
472{
473 struct regmap *map = __map;
474
475 hwspin_unlock(map->hwlock);
476}
477
478static void regmap_unlock_hwlock_irq(void *__map)
479{
480 struct regmap *map = __map;
481
482 hwspin_unlock_irq(map->hwlock);
483}
484
485static void regmap_unlock_hwlock_irqrestore(void *__map)
486{
487 struct regmap *map = __map;
488
489 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
490}
491
Bartosz Golaszewski81e30b12017-12-13 10:28:10 +0100492static void regmap_lock_unlock_none(void *__map)
Bartosz Golaszewskic9b41fc2017-12-06 15:26:21 +0100493{
494
495}
Mark Brownb83a3132011-05-11 19:59:58 +0200496
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200497static void regmap_lock_mutex(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600498{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200499 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600500 mutex_lock(&map->mutex);
501}
502
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200503static void regmap_unlock_mutex(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600504{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200505 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600506 mutex_unlock(&map->mutex);
507}
508
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200509static void regmap_lock_spinlock(void *__map)
Fabio Estevamb4519c72013-07-16 02:10:11 -0300510__acquires(&map->spinlock)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600511{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200512 struct regmap *map = __map;
Lars-Peter Clausen92ab1aa2013-05-24 10:29:22 +0200513 unsigned long flags;
514
515 spin_lock_irqsave(&map->spinlock, flags);
516 map->spinlock_flags = flags;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600517}
518
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200519static void regmap_unlock_spinlock(void *__map)
Fabio Estevamb4519c72013-07-16 02:10:11 -0300520__releases(&map->spinlock)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600521{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200522 struct regmap *map = __map;
Lars-Peter Clausen92ab1aa2013-05-24 10:29:22 +0200523 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600524}
525
Mark Brown72b39f62012-05-08 17:44:40 +0100526static void dev_get_regmap_release(struct device *dev, void *res)
527{
528 /*
529 * We don't actually have anything to do here; the goal here
530 * is not to manage the regmap but to provide a simple way to
531 * get the regmap back given a struct device.
532 */
533}
534
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100535static bool _regmap_range_add(struct regmap *map,
536 struct regmap_range_node *data)
537{
538 struct rb_root *root = &map->range_tree;
539 struct rb_node **new = &(root->rb_node), *parent = NULL;
540
541 while (*new) {
542 struct regmap_range_node *this =
Geliang Tang671a9112016-12-19 22:40:25 +0800543 rb_entry(*new, struct regmap_range_node, node);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100544
545 parent = *new;
546 if (data->range_max < this->range_min)
547 new = &((*new)->rb_left);
548 else if (data->range_min > this->range_max)
549 new = &((*new)->rb_right);
550 else
551 return false;
552 }
553
554 rb_link_node(&data->node, parent, new);
555 rb_insert_color(&data->node, root);
556
557 return true;
558}
559
560static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
561 unsigned int reg)
562{
563 struct rb_node *node = map->range_tree.rb_node;
564
565 while (node) {
566 struct regmap_range_node *this =
Geliang Tang671a9112016-12-19 22:40:25 +0800567 rb_entry(node, struct regmap_range_node, node);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100568
569 if (reg < this->range_min)
570 node = node->rb_left;
571 else if (reg > this->range_max)
572 node = node->rb_right;
573 else
574 return this;
575 }
576
577 return NULL;
578}
579
580static void regmap_range_exit(struct regmap *map)
581{
582 struct rb_node *next;
583 struct regmap_range_node *range_node;
584
585 next = rb_first(&map->range_tree);
586 while (next) {
587 range_node = rb_entry(next, struct regmap_range_node, node);
588 next = rb_next(&range_node->node);
589 rb_erase(&range_node->node, &map->range_tree);
590 kfree(range_node);
591 }
592
593 kfree(map->selector_work_buf);
594}
595
Michal Simek6cfec042014-02-10 16:22:33 +0100596int regmap_attach_dev(struct device *dev, struct regmap *map,
597 const struct regmap_config *config)
598{
599 struct regmap **m;
600
601 map->dev = dev;
602
603 regmap_debugfs_init(map, config->name);
604
605 /* Add a devres resource for dev_get_regmap() */
606 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
607 if (!m) {
608 regmap_debugfs_exit(map);
609 return -ENOMEM;
610 }
611 *m = map;
612 devres_add(dev, m);
613
614 return 0;
615}
616EXPORT_SYMBOL_GPL(regmap_attach_dev);
617
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200618static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
619 const struct regmap_config *config)
Xiubo Lid647c192014-07-15 12:23:02 +0800620{
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200621 enum regmap_endian endian;
Xiubo Lid647c192014-07-15 12:23:02 +0800622
Stephen Warren45e1a272014-08-19 10:49:07 -0600623 /* Retrieve the endianness specification from the regmap config */
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200624 endian = config->reg_format_endian;
Xiubo Lid647c192014-07-15 12:23:02 +0800625
Stephen Warren45e1a272014-08-19 10:49:07 -0600626 /* If the regmap config specified a non-default value, use that */
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200627 if (endian != REGMAP_ENDIAN_DEFAULT)
628 return endian;
Stephen Warren45e1a272014-08-19 10:49:07 -0600629
630 /* Retrieve the endianness specification from the bus config */
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200631 if (bus && bus->reg_format_endian_default)
632 endian = bus->reg_format_endian_default;
Xiubo Lid647c192014-07-15 12:23:02 +0800633
Stephen Warren45e1a272014-08-19 10:49:07 -0600634 /* If the bus specified a non-default value, use that */
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200635 if (endian != REGMAP_ENDIAN_DEFAULT)
636 return endian;
Stephen Warren45e1a272014-08-19 10:49:07 -0600637
638 /* Use this if no other value was found */
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200639 return REGMAP_ENDIAN_BIG;
640}
Stephen Warren45e1a272014-08-19 10:49:07 -0600641
Guenter Roeck3c174d22015-02-03 10:01:18 -0800642enum regmap_endian regmap_get_val_endian(struct device *dev,
643 const struct regmap_bus *bus,
644 const struct regmap_config *config)
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200645{
Pankaj Dubey6e64b6c2014-09-18 15:12:20 +0530646 struct device_node *np;
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200647 enum regmap_endian endian;
648
649 /* Retrieve the endianness specification from the regmap config */
650 endian = config->val_format_endian;
651
652 /* If the regmap config specified a non-default value, use that */
653 if (endian != REGMAP_ENDIAN_DEFAULT)
654 return endian;
655
Pankaj Dubey6e64b6c2014-09-18 15:12:20 +0530656 /* If the dev and dev->of_node exist try to get endianness from DT */
657 if (dev && dev->of_node) {
658 np = dev->of_node;
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200659
Pankaj Dubey6e64b6c2014-09-18 15:12:20 +0530660 /* Parse the device's DT node for an endianness specification */
661 if (of_property_read_bool(np, "big-endian"))
662 endian = REGMAP_ENDIAN_BIG;
663 else if (of_property_read_bool(np, "little-endian"))
664 endian = REGMAP_ENDIAN_LITTLE;
Mark Browna06c4882016-01-26 17:59:30 +0000665 else if (of_property_read_bool(np, "native-endian"))
666 endian = REGMAP_ENDIAN_NATIVE;
Pankaj Dubey6e64b6c2014-09-18 15:12:20 +0530667
668 /* If the endianness was specified in DT, use that */
669 if (endian != REGMAP_ENDIAN_DEFAULT)
670 return endian;
671 }
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200672
673 /* Retrieve the endianness specification from the bus config */
674 if (bus && bus->val_format_endian_default)
675 endian = bus->val_format_endian_default;
676
677 /* If the bus specified a non-default value, use that */
678 if (endian != REGMAP_ENDIAN_DEFAULT)
679 return endian;
680
681 /* Use this if no other value was found */
682 return REGMAP_ENDIAN_BIG;
Xiubo Lid647c192014-07-15 12:23:02 +0800683}
Guenter Roeck3c174d22015-02-03 10:01:18 -0800684EXPORT_SYMBOL_GPL(regmap_get_val_endian);
Xiubo Lid647c192014-07-15 12:23:02 +0800685
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +0800686struct regmap *__regmap_init(struct device *dev,
687 const struct regmap_bus *bus,
688 void *bus_context,
689 const struct regmap_config *config,
690 struct lock_class_key *lock_key,
691 const char *lock_name)
Mark Brownb83a3132011-05-11 19:59:58 +0200692{
Michal Simek6cfec042014-02-10 16:22:33 +0100693 struct regmap *map;
Mark Brownb83a3132011-05-11 19:59:58 +0200694 int ret = -EINVAL;
Stephen Warren141eba22012-05-24 10:47:26 -0600695 enum regmap_endian reg_endian, val_endian;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100696 int i, j;
Mark Brownb83a3132011-05-11 19:59:58 +0200697
Andrey Smirnovd2a58842013-01-27 10:49:05 -0800698 if (!config)
Lars-Peter Clausenabbb18f2011-11-14 10:40:15 +0100699 goto err;
Mark Brownb83a3132011-05-11 19:59:58 +0200700
701 map = kzalloc(sizeof(*map), GFP_KERNEL);
702 if (map == NULL) {
703 ret = -ENOMEM;
704 goto err;
705 }
706
Bartosz Golaszewski8253bb32017-12-13 17:25:31 +0100707 if (config->name) {
708 map->name = kstrdup_const(config->name, GFP_KERNEL);
709 if (!map->name) {
710 ret = -ENOMEM;
711 goto err_map;
712 }
713 }
714
Bartosz Golaszewskic9b41fc2017-12-06 15:26:21 +0100715 if (config->disable_locking) {
Bartosz Golaszewski81e30b12017-12-13 10:28:10 +0100716 map->lock = map->unlock = regmap_lock_unlock_none;
Mark Brown72465732017-12-12 16:56:43 +0000717 regmap_debugfs_disable(map);
Bartosz Golaszewskic9b41fc2017-12-06 15:26:21 +0100718 } else if (config->lock && config->unlock) {
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200719 map->lock = config->lock;
720 map->unlock = config->unlock;
721 map->lock_arg = config->lock_arg;
Baolin Wanga4887812017-12-25 14:37:09 +0800722 } else if (config->use_hwlock) {
Baolin Wang8698b932017-11-01 10:11:55 +0800723 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
724 if (!map->hwlock) {
725 ret = -ENXIO;
Bartosz Golaszewski8253bb32017-12-13 17:25:31 +0100726 goto err_name;
Baolin Wang8698b932017-11-01 10:11:55 +0800727 }
728
729 switch (config->hwlock_mode) {
730 case HWLOCK_IRQSTATE:
731 map->lock = regmap_lock_hwlock_irqsave;
732 map->unlock = regmap_unlock_hwlock_irqrestore;
733 break;
734 case HWLOCK_IRQ:
735 map->lock = regmap_lock_hwlock_irq;
736 map->unlock = regmap_unlock_hwlock_irq;
737 break;
738 default:
739 map->lock = regmap_lock_hwlock;
740 map->unlock = regmap_unlock_hwlock;
741 break;
742 }
743
744 map->lock_arg = map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600745 } else {
Andrey Smirnovd2a58842013-01-27 10:49:05 -0800746 if ((bus && bus->fast_io) ||
747 config->fast_io) {
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200748 spin_lock_init(&map->spinlock);
749 map->lock = regmap_lock_spinlock;
750 map->unlock = regmap_unlock_spinlock;
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +0800751 lockdep_set_class_and_name(&map->spinlock,
752 lock_key, lock_name);
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200753 } else {
754 mutex_init(&map->mutex);
755 map->lock = regmap_lock_mutex;
756 map->unlock = regmap_unlock_mutex;
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +0800757 lockdep_set_class_and_name(&map->mutex,
758 lock_key, lock_name);
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200759 }
760 map->lock_arg = map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600761 }
Stephen Boydb4a21fc2015-09-11 16:37:05 -0700762
763 /*
764 * When we write in fast-paths with regmap_bulk_write() don't allocate
765 * scratch buffers with sleeping allocations.
766 */
767 if ((bus && bus->fast_io) || config->fast_io)
768 map->alloc_flags = GFP_ATOMIC;
769 else
770 map->alloc_flags = GFP_KERNEL;
771
Wolfram Sangc212acc2012-01-28 02:16:41 +0100772 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
Mark Brown82159ba2012-01-18 10:52:25 +0000773 map->format.pad_bytes = config->pad_bits / 8;
Wolfram Sangc212acc2012-01-28 02:16:41 +0100774 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
Fabio Estevam5494a982012-05-31 21:10:30 -0300775 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
776 config->val_bits + config->pad_bits, 8);
Marc Reillyd939fb92012-03-16 12:11:43 +1100777 map->reg_shift = config->pad_bits % 8;
Stephen Warrenf01ee602012-04-09 13:40:24 -0600778 if (config->reg_stride)
779 map->reg_stride = config->reg_stride;
780 else
781 map->reg_stride = 1;
Xiubo Lica747be2016-01-04 18:00:33 +0800782 if (is_power_of_2(map->reg_stride))
783 map->reg_stride_order = ilog2(map->reg_stride);
784 else
785 map->reg_stride_order = -1;
David Frey1c96a2f2018-09-01 09:50:41 -0700786 map->use_single_read = config->use_single_read || !bus || !bus->read;
787 map->use_single_write = config->use_single_write || !bus || !bus->write;
Markus Pargmann9c9f7f62015-08-21 10:26:43 +0200788 map->can_multi_write = config->can_multi_write && bus && bus->write;
Sergey SENOZHATSKY17649c92015-08-31 18:54:58 +0900789 if (bus) {
790 map->max_raw_read = bus->max_raw_read;
791 map->max_raw_write = bus->max_raw_write;
792 }
Mark Brownb83a3132011-05-11 19:59:58 +0200793 map->dev = dev;
794 map->bus = bus;
Stephen Warren0135bbc2012-04-04 15:48:30 -0600795 map->bus_context = bus_context;
Mark Brown2e2ae662011-07-20 22:33:39 +0100796 map->max_register = config->max_register;
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100797 map->wr_table = config->wr_table;
798 map->rd_table = config->rd_table;
799 map->volatile_table = config->volatile_table;
800 map->precious_table = config->precious_table;
Ben Whittencdf6b112018-10-19 10:33:50 +0100801 map->wr_noinc_table = config->wr_noinc_table;
Crestez Dan Leonard74fe7b52018-08-07 17:52:17 +0300802 map->rd_noinc_table = config->rd_noinc_table;
Mark Brown2e2ae662011-07-20 22:33:39 +0100803 map->writeable_reg = config->writeable_reg;
804 map->readable_reg = config->readable_reg;
805 map->volatile_reg = config->volatile_reg;
Mark Brown2efe1642011-08-08 15:41:46 +0900806 map->precious_reg = config->precious_reg;
Ben Whittencdf6b112018-10-19 10:33:50 +0100807 map->writeable_noinc_reg = config->writeable_noinc_reg;
Crestez Dan Leonard74fe7b52018-08-07 17:52:17 +0300808 map->readable_noinc_reg = config->readable_noinc_reg;
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100809 map->cache_type = config->cache_type;
Mark Brownb83a3132011-05-11 19:59:58 +0200810
Mark Brown0d509f22013-01-27 22:07:38 +0800811 spin_lock_init(&map->async_lock);
812 INIT_LIST_HEAD(&map->async_list);
Mark Brown7e09a972013-10-07 23:00:24 +0100813 INIT_LIST_HEAD(&map->async_free);
Mark Brown0d509f22013-01-27 22:07:38 +0800814 init_waitqueue_head(&map->async_waitq);
815
Andrew F. Davis9bf485c2018-01-07 17:19:09 -0600816 if (config->read_flag_mask ||
817 config->write_flag_mask ||
818 config->zero_flag_mask) {
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200819 map->read_flag_mask = config->read_flag_mask;
820 map->write_flag_mask = config->write_flag_mask;
Andrey Smirnovd2a58842013-01-27 10:49:05 -0800821 } else if (bus) {
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200822 map->read_flag_mask = bus->read_flag_mask;
823 }
824
Andrey Smirnovd2a58842013-01-27 10:49:05 -0800825 if (!bus) {
826 map->reg_read = config->reg_read;
827 map->reg_write = config->reg_write;
828
829 map->defer_caching = false;
830 goto skip_format_initialization;
Boris BREZILLON3ac17032014-04-17 11:40:11 +0200831 } else if (!bus->read || !bus->write) {
832 map->reg_read = _regmap_bus_reg_read;
833 map->reg_write = _regmap_bus_reg_write;
834
835 map->defer_caching = false;
836 goto skip_format_initialization;
Andrey Smirnovd2a58842013-01-27 10:49:05 -0800837 } else {
838 map->reg_read = _regmap_bus_read;
Jon Ringle77792b12015-10-01 12:38:07 -0400839 map->reg_update_bits = bus->reg_update_bits;
Andrey Smirnovd2a58842013-01-27 10:49:05 -0800840 }
Andrey Smirnovad278402013-01-12 12:54:12 -0800841
Geert Uytterhoevencf673fb2014-08-27 16:36:03 +0200842 reg_endian = regmap_get_reg_endian(bus, config);
843 val_endian = regmap_get_val_endian(dev, bus, config);
Stephen Warren141eba22012-05-24 10:47:26 -0600844
Marc Reillyd939fb92012-03-16 12:11:43 +1100845 switch (config->reg_bits + map->reg_shift) {
Wolfram Sang9aa50752012-01-27 16:10:22 +0100846 case 2:
847 switch (config->val_bits) {
848 case 6:
849 map->format.format_write = regmap_format_2_6_write;
850 break;
851 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800852 goto err_hwlock;
Wolfram Sang9aa50752012-01-27 16:10:22 +0100853 }
854 break;
855
Mark Brownb83a3132011-05-11 19:59:58 +0200856 case 4:
857 switch (config->val_bits) {
858 case 12:
859 map->format.format_write = regmap_format_4_12_write;
860 break;
861 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800862 goto err_hwlock;
Mark Brownb83a3132011-05-11 19:59:58 +0200863 }
864 break;
865
866 case 7:
867 switch (config->val_bits) {
868 case 9:
869 map->format.format_write = regmap_format_7_9_write;
870 break;
871 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800872 goto err_hwlock;
Mark Brownb83a3132011-05-11 19:59:58 +0200873 }
874 break;
875
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100876 case 10:
877 switch (config->val_bits) {
878 case 14:
879 map->format.format_write = regmap_format_10_14_write;
880 break;
881 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800882 goto err_hwlock;
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100883 }
884 break;
885
Mark Brownb83a3132011-05-11 19:59:58 +0200886 case 8:
887 map->format.format_reg = regmap_format_8;
888 break;
889
890 case 16:
Stephen Warren141eba22012-05-24 10:47:26 -0600891 switch (reg_endian) {
892 case REGMAP_ENDIAN_BIG:
893 map->format.format_reg = regmap_format_16_be;
894 break;
Tony Lindgren55562442016-09-15 13:56:11 -0700895 case REGMAP_ENDIAN_LITTLE:
896 map->format.format_reg = regmap_format_16_le;
897 break;
Stephen Warren141eba22012-05-24 10:47:26 -0600898 case REGMAP_ENDIAN_NATIVE:
899 map->format.format_reg = regmap_format_16_native;
900 break;
901 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800902 goto err_hwlock;
Stephen Warren141eba22012-05-24 10:47:26 -0600903 }
Mark Brownb83a3132011-05-11 19:59:58 +0200904 break;
905
Lars-Peter Clausen237019e2013-01-10 17:06:10 +0100906 case 24:
907 if (reg_endian != REGMAP_ENDIAN_BIG)
Baolin Wang8698b932017-11-01 10:11:55 +0800908 goto err_hwlock;
Lars-Peter Clausen237019e2013-01-10 17:06:10 +0100909 map->format.format_reg = regmap_format_24;
910 break;
911
Mark Brown7d5e5252012-02-17 15:58:25 -0800912 case 32:
Stephen Warren141eba22012-05-24 10:47:26 -0600913 switch (reg_endian) {
914 case REGMAP_ENDIAN_BIG:
915 map->format.format_reg = regmap_format_32_be;
916 break;
Tony Lindgren55562442016-09-15 13:56:11 -0700917 case REGMAP_ENDIAN_LITTLE:
918 map->format.format_reg = regmap_format_32_le;
919 break;
Stephen Warren141eba22012-05-24 10:47:26 -0600920 case REGMAP_ENDIAN_NATIVE:
921 map->format.format_reg = regmap_format_32_native;
922 break;
923 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800924 goto err_hwlock;
Stephen Warren141eba22012-05-24 10:47:26 -0600925 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800926 break;
927
Xiubo Liafcc00b2015-12-03 17:31:52 +0800928#ifdef CONFIG_64BIT
929 case 64:
930 switch (reg_endian) {
931 case REGMAP_ENDIAN_BIG:
932 map->format.format_reg = regmap_format_64_be;
933 break;
Tony Lindgren55562442016-09-15 13:56:11 -0700934 case REGMAP_ENDIAN_LITTLE:
935 map->format.format_reg = regmap_format_64_le;
936 break;
Xiubo Liafcc00b2015-12-03 17:31:52 +0800937 case REGMAP_ENDIAN_NATIVE:
938 map->format.format_reg = regmap_format_64_native;
939 break;
940 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800941 goto err_hwlock;
Xiubo Liafcc00b2015-12-03 17:31:52 +0800942 }
943 break;
944#endif
945
Mark Brownb83a3132011-05-11 19:59:58 +0200946 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800947 goto err_hwlock;
Mark Brownb83a3132011-05-11 19:59:58 +0200948 }
949
Mark Brown8a819ff2013-03-04 09:04:51 +0800950 if (val_endian == REGMAP_ENDIAN_NATIVE)
951 map->format.parse_inplace = regmap_parse_inplace_noop;
952
Mark Brownb83a3132011-05-11 19:59:58 +0200953 switch (config->val_bits) {
954 case 8:
955 map->format.format_val = regmap_format_8;
956 map->format.parse_val = regmap_parse_8;
Mark Brown8a819ff2013-03-04 09:04:51 +0800957 map->format.parse_inplace = regmap_parse_inplace_noop;
Mark Brownb83a3132011-05-11 19:59:58 +0200958 break;
959 case 16:
Stephen Warren141eba22012-05-24 10:47:26 -0600960 switch (val_endian) {
961 case REGMAP_ENDIAN_BIG:
962 map->format.format_val = regmap_format_16_be;
963 map->format.parse_val = regmap_parse_16_be;
Mark Brown8a819ff2013-03-04 09:04:51 +0800964 map->format.parse_inplace = regmap_parse_16_be_inplace;
Stephen Warren141eba22012-05-24 10:47:26 -0600965 break;
Xiubo Li4aa8c062014-04-02 18:09:07 +0800966 case REGMAP_ENDIAN_LITTLE:
967 map->format.format_val = regmap_format_16_le;
968 map->format.parse_val = regmap_parse_16_le;
969 map->format.parse_inplace = regmap_parse_16_le_inplace;
970 break;
Stephen Warren141eba22012-05-24 10:47:26 -0600971 case REGMAP_ENDIAN_NATIVE:
972 map->format.format_val = regmap_format_16_native;
973 map->format.parse_val = regmap_parse_16_native;
974 break;
975 default:
Baolin Wang8698b932017-11-01 10:11:55 +0800976 goto err_hwlock;
Stephen Warren141eba22012-05-24 10:47:26 -0600977 }
Mark Brownb83a3132011-05-11 19:59:58 +0200978 break;
Marc Reillyea279fc2012-03-16 12:11:42 +1100979 case 24:
Stephen Warren141eba22012-05-24 10:47:26 -0600980 if (val_endian != REGMAP_ENDIAN_BIG)
Baolin Wang8698b932017-11-01 10:11:55 +0800981 goto err_hwlock;
Marc Reillyea279fc2012-03-16 12:11:42 +1100982 map->format.format_val = regmap_format_24;
983 map->format.parse_val = regmap_parse_24;
984 break;
Mark Brown7d5e5252012-02-17 15:58:25 -0800985 case 32:
Stephen Warren141eba22012-05-24 10:47:26 -0600986 switch (val_endian) {
987 case REGMAP_ENDIAN_BIG:
988 map->format.format_val = regmap_format_32_be;
989 map->format.parse_val = regmap_parse_32_be;
Mark Brown8a819ff2013-03-04 09:04:51 +0800990 map->format.parse_inplace = regmap_parse_32_be_inplace;
Stephen Warren141eba22012-05-24 10:47:26 -0600991 break;
Xiubo Li4aa8c062014-04-02 18:09:07 +0800992 case REGMAP_ENDIAN_LITTLE:
993 map->format.format_val = regmap_format_32_le;
994 map->format.parse_val = regmap_parse_32_le;
995 map->format.parse_inplace = regmap_parse_32_le_inplace;
996 break;
Stephen Warren141eba22012-05-24 10:47:26 -0600997 case REGMAP_ENDIAN_NATIVE:
998 map->format.format_val = regmap_format_32_native;
999 map->format.parse_val = regmap_parse_32_native;
1000 break;
1001 default:
Baolin Wang8698b932017-11-01 10:11:55 +08001002 goto err_hwlock;
Stephen Warren141eba22012-05-24 10:47:26 -06001003 }
Mark Brown7d5e5252012-02-17 15:58:25 -08001004 break;
Xiubo Liafcc00b2015-12-03 17:31:52 +08001005#ifdef CONFIG_64BIT
Dan Carpenter782035e2015-12-12 15:59:43 +03001006 case 64:
Xiubo Liafcc00b2015-12-03 17:31:52 +08001007 switch (val_endian) {
1008 case REGMAP_ENDIAN_BIG:
1009 map->format.format_val = regmap_format_64_be;
1010 map->format.parse_val = regmap_parse_64_be;
1011 map->format.parse_inplace = regmap_parse_64_be_inplace;
1012 break;
1013 case REGMAP_ENDIAN_LITTLE:
1014 map->format.format_val = regmap_format_64_le;
1015 map->format.parse_val = regmap_parse_64_le;
1016 map->format.parse_inplace = regmap_parse_64_le_inplace;
1017 break;
1018 case REGMAP_ENDIAN_NATIVE:
1019 map->format.format_val = regmap_format_64_native;
1020 map->format.parse_val = regmap_parse_64_native;
1021 break;
1022 default:
Baolin Wang8698b932017-11-01 10:11:55 +08001023 goto err_hwlock;
Xiubo Liafcc00b2015-12-03 17:31:52 +08001024 }
1025 break;
1026#endif
Mark Brownb83a3132011-05-11 19:59:58 +02001027 }
1028
Stephen Warren141eba22012-05-24 10:47:26 -06001029 if (map->format.format_write) {
1030 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1031 (val_endian != REGMAP_ENDIAN_BIG))
Baolin Wang8698b932017-11-01 10:11:55 +08001032 goto err_hwlock;
Markus Pargmann67921a12015-08-21 10:26:42 +02001033 map->use_single_write = true;
Stephen Warren141eba22012-05-24 10:47:26 -06001034 }
Mark Brown7a647612012-04-30 23:26:32 +01001035
Mark Brownb83a3132011-05-11 19:59:58 +02001036 if (!map->format.format_write &&
1037 !(map->format.format_reg && map->format.format_val))
Baolin Wang8698b932017-11-01 10:11:55 +08001038 goto err_hwlock;
Mark Brownb83a3132011-05-11 19:59:58 +02001039
Mark Brown82159ba2012-01-18 10:52:25 +00001040 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
Mark Brownb83a3132011-05-11 19:59:58 +02001041 if (map->work_buf == NULL) {
1042 ret = -ENOMEM;
Baolin Wang8698b932017-11-01 10:11:55 +08001043 goto err_hwlock;
Mark Brownb83a3132011-05-11 19:59:58 +02001044 }
1045
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001046 if (map->format.format_write) {
1047 map->defer_caching = false;
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001048 map->reg_write = _regmap_bus_formatted_write;
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001049 } else if (map->format.format_val) {
1050 map->defer_caching = true;
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001051 map->reg_write = _regmap_bus_raw_write;
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001052 }
1053
1054skip_format_initialization:
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001055
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001056 map->range_tree = RB_ROOT;
Mark Browne3549cd2012-10-02 20:17:15 +01001057 for (i = 0; i < config->num_ranges; i++) {
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001058 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1059 struct regmap_range_node *new;
1060
1061 /* Sanity check */
Mark Brown061adc02012-10-03 12:17:51 +01001062 if (range_cfg->range_max < range_cfg->range_min) {
1063 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1064 range_cfg->range_max, range_cfg->range_min);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001065 goto err_range;
Mark Brown061adc02012-10-03 12:17:51 +01001066 }
1067
1068 if (range_cfg->range_max > map->max_register) {
1069 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1070 range_cfg->range_max, map->max_register);
1071 goto err_range;
1072 }
1073
1074 if (range_cfg->selector_reg > map->max_register) {
1075 dev_err(map->dev,
1076 "Invalid range %d: selector out of map\n", i);
1077 goto err_range;
1078 }
1079
1080 if (range_cfg->window_len == 0) {
1081 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1082 i);
1083 goto err_range;
1084 }
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001085
1086 /* Make sure, that this register range has no selector
1087 or data window within its boundary */
Mark Browne3549cd2012-10-02 20:17:15 +01001088 for (j = 0; j < config->num_ranges; j++) {
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001089 unsigned sel_reg = config->ranges[j].selector_reg;
1090 unsigned win_min = config->ranges[j].window_start;
1091 unsigned win_max = win_min +
1092 config->ranges[j].window_len - 1;
1093
Philipp Zabelf161d222013-07-23 12:16:02 +02001094 /* Allow data window inside its own virtual range */
1095 if (j == i)
1096 continue;
1097
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001098 if (range_cfg->range_min <= sel_reg &&
1099 sel_reg <= range_cfg->range_max) {
Mark Brown061adc02012-10-03 12:17:51 +01001100 dev_err(map->dev,
1101 "Range %d: selector for %d in window\n",
1102 i, j);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001103 goto err_range;
1104 }
1105
1106 if (!(win_max < range_cfg->range_min ||
1107 win_min > range_cfg->range_max)) {
Mark Brown061adc02012-10-03 12:17:51 +01001108 dev_err(map->dev,
1109 "Range %d: window for %d in window\n",
1110 i, j);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001111 goto err_range;
1112 }
1113 }
1114
1115 new = kzalloc(sizeof(*new), GFP_KERNEL);
1116 if (new == NULL) {
1117 ret = -ENOMEM;
1118 goto err_range;
1119 }
1120
Mark Brown4b020b32012-10-03 13:13:16 +01001121 new->map = map;
Mark Brownd058bb42012-10-03 12:40:47 +01001122 new->name = range_cfg->name;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001123 new->range_min = range_cfg->range_min;
1124 new->range_max = range_cfg->range_max;
1125 new->selector_reg = range_cfg->selector_reg;
1126 new->selector_mask = range_cfg->selector_mask;
1127 new->selector_shift = range_cfg->selector_shift;
1128 new->window_start = range_cfg->window_start;
1129 new->window_len = range_cfg->window_len;
1130
Nenghua Cao53e87f82014-02-21 16:05:45 +08001131 if (!_regmap_range_add(map, new)) {
Mark Brown061adc02012-10-03 12:17:51 +01001132 dev_err(map->dev, "Failed to add range %d\n", i);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001133 kfree(new);
1134 goto err_range;
1135 }
1136
1137 if (map->selector_work_buf == NULL) {
1138 map->selector_work_buf =
1139 kzalloc(map->format.buf_size, GFP_KERNEL);
1140 if (map->selector_work_buf == NULL) {
1141 ret = -ENOMEM;
1142 goto err_range;
1143 }
1144 }
1145 }
Mark Brown052d2cd2011-11-21 19:05:13 +00001146
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +01001147 ret = regcache_init(map, config);
Mark Brown0ff3e622012-10-04 17:39:13 +01001148 if (ret != 0)
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001149 goto err_range;
1150
Daeseok Youna7a037c2014-04-01 19:46:43 +09001151 if (dev) {
Michal Simek6cfec042014-02-10 16:22:33 +01001152 ret = regmap_attach_dev(dev, map, config);
1153 if (ret != 0)
1154 goto err_regcache;
David Lechner9b947a12018-02-19 15:43:02 -06001155 } else {
1156 regmap_debugfs_init(map, config->name);
Daeseok Youna7a037c2014-04-01 19:46:43 +09001157 }
Mark Brown72b39f62012-05-08 17:44:40 +01001158
Mark Brownb83a3132011-05-11 19:59:58 +02001159 return map;
1160
Michal Simek6cfec042014-02-10 16:22:33 +01001161err_regcache:
Mark Brown72b39f62012-05-08 17:44:40 +01001162 regcache_exit(map);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001163err_range:
1164 regmap_range_exit(map);
Lars-Peter Clausen58072cb2011-11-10 18:15:15 +01001165 kfree(map->work_buf);
Baolin Wang8698b932017-11-01 10:11:55 +08001166err_hwlock:
Baolin Wanga1a68fc2017-11-20 15:27:28 +08001167 if (map->hwlock)
Mark Brown267f3e42017-11-03 19:50:20 +00001168 hwspin_lock_free(map->hwlock);
Bartosz Golaszewski8253bb32017-12-13 17:25:31 +01001169err_name:
1170 kfree_const(map->name);
Mark Brownb83a3132011-05-11 19:59:58 +02001171err_map:
1172 kfree(map);
1173err:
1174 return ERR_PTR(ret);
1175}
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +08001176EXPORT_SYMBOL_GPL(__regmap_init);
Mark Brownb83a3132011-05-11 19:59:58 +02001177
Mark Brownc0eb4672012-01-30 19:56:52 +00001178static void devm_regmap_release(struct device *dev, void *res)
1179{
1180 regmap_exit(*(struct regmap **)res);
1181}
1182
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +08001183struct regmap *__devm_regmap_init(struct device *dev,
1184 const struct regmap_bus *bus,
1185 void *bus_context,
1186 const struct regmap_config *config,
1187 struct lock_class_key *lock_key,
1188 const char *lock_name)
Mark Brownc0eb4672012-01-30 19:56:52 +00001189{
1190 struct regmap **ptr, *regmap;
1191
1192 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1193 if (!ptr)
1194 return ERR_PTR(-ENOMEM);
1195
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +08001196 regmap = __regmap_init(dev, bus, bus_context, config,
1197 lock_key, lock_name);
Mark Brownc0eb4672012-01-30 19:56:52 +00001198 if (!IS_ERR(regmap)) {
1199 *ptr = regmap;
1200 devres_add(dev, ptr);
1201 } else {
1202 devres_free(ptr);
1203 }
1204
1205 return regmap;
1206}
Nicolas Boichat3cfe7a72015-07-08 14:30:18 +08001207EXPORT_SYMBOL_GPL(__devm_regmap_init);
Mark Brownc0eb4672012-01-30 19:56:52 +00001208
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001209static void regmap_field_init(struct regmap_field *rm_field,
1210 struct regmap *regmap, struct reg_field reg_field)
1211{
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001212 rm_field->regmap = regmap;
1213 rm_field->reg = reg_field.reg;
1214 rm_field->shift = reg_field.lsb;
Maxime Coquelin921cc292015-06-16 13:53:19 +02001215 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
Kuninori Morimotoa0102372013-09-01 20:30:50 -07001216 rm_field->id_size = reg_field.id_size;
1217 rm_field->id_offset = reg_field.id_offset;
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001218}
1219
1220/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001221 * devm_regmap_field_alloc() - Allocate and initialise a register field.
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001222 *
1223 * @dev: Device that will be interacted with
1224 * @regmap: regmap bank in which this register field is located.
1225 * @reg_field: Register field with in the bank.
1226 *
1227 * The return value will be an ERR_PTR() on error or a valid pointer
1228 * to a struct regmap_field. The regmap_field will be automatically freed
1229 * by the device management code.
1230 */
1231struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1232 struct regmap *regmap, struct reg_field reg_field)
1233{
1234 struct regmap_field *rm_field = devm_kzalloc(dev,
1235 sizeof(*rm_field), GFP_KERNEL);
1236 if (!rm_field)
1237 return ERR_PTR(-ENOMEM);
1238
1239 regmap_field_init(rm_field, regmap, reg_field);
1240
1241 return rm_field;
1242
1243}
1244EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1245
1246/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001247 * devm_regmap_field_free() - Free a register field allocated using
1248 * devm_regmap_field_alloc.
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001249 *
1250 * @dev: Device that will be interacted with
1251 * @field: regmap field which should be freed.
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001252 *
1253 * Free register field allocated using devm_regmap_field_alloc(). Usually
1254 * drivers need not call this function, as the memory allocated via devm
1255 * will be freed as per device-driver life-cyle.
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001256 */
1257void devm_regmap_field_free(struct device *dev,
1258 struct regmap_field *field)
1259{
1260 devm_kfree(dev, field);
1261}
1262EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1263
1264/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001265 * regmap_field_alloc() - Allocate and initialise a register field.
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001266 *
1267 * @regmap: regmap bank in which this register field is located.
1268 * @reg_field: Register field with in the bank.
1269 *
1270 * The return value will be an ERR_PTR() on error or a valid pointer
1271 * to a struct regmap_field. The regmap_field should be freed by the
1272 * user once its finished working with it using regmap_field_free().
1273 */
1274struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1275 struct reg_field reg_field)
1276{
1277 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1278
1279 if (!rm_field)
1280 return ERR_PTR(-ENOMEM);
1281
1282 regmap_field_init(rm_field, regmap, reg_field);
1283
1284 return rm_field;
1285}
1286EXPORT_SYMBOL_GPL(regmap_field_alloc);
1287
1288/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001289 * regmap_field_free() - Free register field allocated using
1290 * regmap_field_alloc.
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001291 *
1292 * @field: regmap field which should be freed.
1293 */
1294void regmap_field_free(struct regmap_field *field)
1295{
1296 kfree(field);
1297}
1298EXPORT_SYMBOL_GPL(regmap_field_free);
1299
Mark Brownb83a3132011-05-11 19:59:58 +02001300/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001301 * regmap_reinit_cache() - Reinitialise the current register cache
Mark Brownbf315172011-12-03 17:06:20 +00001302 *
1303 * @map: Register map to operate on.
1304 * @config: New configuration. Only the cache data will be used.
1305 *
1306 * Discard any existing register cache for the map and initialize a
1307 * new cache. This can be used to restore the cache to defaults or to
1308 * update the cache configuration to reflect runtime discovery of the
1309 * hardware.
Dimitris Papastamos4d879512012-07-27 14:54:15 +01001310 *
1311 * No explicit locking is done here, the user needs to ensure that
1312 * this function will not race with other calls to regmap.
Mark Brownbf315172011-12-03 17:06:20 +00001313 */
1314int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1315{
Mark Brownbf315172011-12-03 17:06:20 +00001316 regcache_exit(map);
Mark Browna24f64a2012-01-26 18:30:16 +00001317 regmap_debugfs_exit(map);
Mark Brownbf315172011-12-03 17:06:20 +00001318
1319 map->max_register = config->max_register;
1320 map->writeable_reg = config->writeable_reg;
1321 map->readable_reg = config->readable_reg;
1322 map->volatile_reg = config->volatile_reg;
1323 map->precious_reg = config->precious_reg;
Ben Whittencdf6b112018-10-19 10:33:50 +01001324 map->writeable_noinc_reg = config->writeable_noinc_reg;
Crestez Dan Leonard74fe7b52018-08-07 17:52:17 +03001325 map->readable_noinc_reg = config->readable_noinc_reg;
Mark Brownbf315172011-12-03 17:06:20 +00001326 map->cache_type = config->cache_type;
1327
Stephen Warrend3c242e2012-04-04 15:48:29 -06001328 regmap_debugfs_init(map, config->name);
Mark Browna24f64a2012-01-26 18:30:16 +00001329
Mark Brown421e8d22012-01-20 13:39:37 +00001330 map->cache_bypass = false;
1331 map->cache_only = false;
1332
Dimitris Papastamos4d879512012-07-27 14:54:15 +01001333 return regcache_init(map, config);
Mark Brownbf315172011-12-03 17:06:20 +00001334}
Mark Brown752a6a52012-05-14 10:00:12 +01001335EXPORT_SYMBOL_GPL(regmap_reinit_cache);
Mark Brownbf315172011-12-03 17:06:20 +00001336
1337/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001338 * regmap_exit() - Free a previously allocated register map
1339 *
1340 * @map: Register map to operate on.
Mark Brownb83a3132011-05-11 19:59:58 +02001341 */
1342void regmap_exit(struct regmap *map)
1343{
Mark Brown7e09a972013-10-07 23:00:24 +01001344 struct regmap_async *async;
1345
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001346 regcache_exit(map);
Mark Brown31244e32011-07-20 22:56:53 +01001347 regmap_debugfs_exit(map);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001348 regmap_range_exit(map);
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001349 if (map->bus && map->bus->free_context)
Stephen Warren0135bbc2012-04-04 15:48:30 -06001350 map->bus->free_context(map->bus_context);
Mark Brownb83a3132011-05-11 19:59:58 +02001351 kfree(map->work_buf);
Mark Brown7e09a972013-10-07 23:00:24 +01001352 while (!list_empty(&map->async_free)) {
1353 async = list_first_entry_or_null(&map->async_free,
1354 struct regmap_async,
1355 list);
1356 list_del(&async->list);
1357 kfree(async->work_buf);
1358 kfree(async);
1359 }
Baolin Wanga1a68fc2017-11-20 15:27:28 +08001360 if (map->hwlock)
Mark Browne8419c402017-11-03 19:53:56 +00001361 hwspin_lock_free(map->hwlock);
Bartosz Golaszewski8253bb32017-12-13 17:25:31 +01001362 kfree_const(map->name);
Mark Brownb83a3132011-05-11 19:59:58 +02001363 kfree(map);
1364}
1365EXPORT_SYMBOL_GPL(regmap_exit);
1366
Mark Brown72b39f62012-05-08 17:44:40 +01001367static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1368{
1369 struct regmap **r = res;
1370 if (!r || !*r) {
1371 WARN_ON(!r || !*r);
1372 return 0;
1373 }
1374
1375 /* If the user didn't specify a name match any */
1376 if (data)
1377 return (*r)->name == data;
1378 else
1379 return 1;
1380}
1381
1382/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001383 * dev_get_regmap() - Obtain the regmap (if any) for a device
Mark Brown72b39f62012-05-08 17:44:40 +01001384 *
1385 * @dev: Device to retrieve the map for
1386 * @name: Optional name for the register map, usually NULL.
1387 *
1388 * Returns the regmap for the device if one is present, or NULL. If
1389 * name is specified then it must match the name specified when
1390 * registering the device, if it is NULL then the first regmap found
1391 * will be used. Devices with multiple register maps are very rare,
1392 * generic code should normally not need to specify a name.
1393 */
1394struct regmap *dev_get_regmap(struct device *dev, const char *name)
1395{
1396 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1397 dev_get_regmap_match, (void *)name);
1398
1399 if (!r)
1400 return NULL;
1401 return *r;
1402}
1403EXPORT_SYMBOL_GPL(dev_get_regmap);
1404
Tuomas Tynkkynen8d7d3972014-07-21 18:38:47 +03001405/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001406 * regmap_get_device() - Obtain the device from a regmap
Tuomas Tynkkynen8d7d3972014-07-21 18:38:47 +03001407 *
1408 * @map: Register map to operate on.
1409 *
1410 * Returns the underlying device that the regmap has been created for.
1411 */
1412struct device *regmap_get_device(struct regmap *map)
1413{
1414 return map->dev;
1415}
Mark Brownfa2fbe42014-07-25 18:30:31 +01001416EXPORT_SYMBOL_GPL(regmap_get_device);
Tuomas Tynkkynen8d7d3972014-07-21 18:38:47 +03001417
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001418static int _regmap_select_page(struct regmap *map, unsigned int *reg,
Mark Brown98bc7df2012-10-04 17:31:11 +01001419 struct regmap_range_node *range,
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001420 unsigned int val_num)
1421{
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001422 void *orig_work_buf;
1423 unsigned int win_offset;
1424 unsigned int win_page;
1425 bool page_chg;
1426 int ret;
1427
Mark Brown98bc7df2012-10-04 17:31:11 +01001428 win_offset = (*reg - range->range_min) % range->window_len;
1429 win_page = (*reg - range->range_min) / range->window_len;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001430
Mark Brown98bc7df2012-10-04 17:31:11 +01001431 if (val_num > 1) {
1432 /* Bulk write shouldn't cross range boundary */
1433 if (*reg + val_num - 1 > range->range_max)
1434 return -EINVAL;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001435
Mark Brown98bc7df2012-10-04 17:31:11 +01001436 /* ... or single page boundary */
1437 if (val_num > range->window_len - win_offset)
1438 return -EINVAL;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001439 }
1440
Mark Brown98bc7df2012-10-04 17:31:11 +01001441 /* It is possible to have selector register inside data window.
1442 In that case, selector register is located on every page and
1443 it needs no page switching, when accessed alone. */
1444 if (val_num > 1 ||
1445 range->window_start + win_offset != range->selector_reg) {
1446 /* Use separate work_buf during page switching */
1447 orig_work_buf = map->work_buf;
1448 map->work_buf = map->selector_work_buf;
1449
1450 ret = _regmap_update_bits(map, range->selector_reg,
1451 range->selector_mask,
1452 win_page << range->selector_shift,
Kuninori Morimoto7ff05892015-06-16 08:52:22 +00001453 &page_chg, false);
Mark Brown98bc7df2012-10-04 17:31:11 +01001454
1455 map->work_buf = orig_work_buf;
1456
Mark Brown0ff3e622012-10-04 17:39:13 +01001457 if (ret != 0)
Mark Brown98bc7df2012-10-04 17:31:11 +01001458 return ret;
1459 }
1460
1461 *reg = range->window_start + win_offset;
1462
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001463 return 0;
1464}
1465
Tony Lindgrenf50e38c2016-09-15 13:56:10 -07001466static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1467 unsigned long mask)
1468{
1469 u8 *buf;
1470 int i;
1471
1472 if (!mask || !map->work_buf)
1473 return;
1474
1475 buf = map->work_buf;
1476
1477 for (i = 0; i < max_bytes; i++)
1478 buf[i] |= (mask >> (8 * i)) & 0xff;
1479}
1480
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001481static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1482 const void *val, size_t val_len)
Mark Brownb83a3132011-05-11 19:59:58 +02001483{
Mark Brown98bc7df2012-10-04 17:31:11 +01001484 struct regmap_range_node *range;
Mark Brown0d509f22013-01-27 22:07:38 +08001485 unsigned long flags;
Mark Brown0d509f22013-01-27 22:07:38 +08001486 void *work_val = map->work_buf + map->format.reg_bytes +
1487 map->format.pad_bytes;
Mark Brownb83a3132011-05-11 19:59:58 +02001488 void *buf;
1489 int ret = -ENOTSUPP;
1490 size_t len;
Mark Brown73304782011-07-24 11:46:20 +01001491 int i;
1492
Mark Brownf1b5c5c2013-03-13 19:18:13 +00001493 WARN_ON(!map->bus);
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001494
Mark Brown73304782011-07-24 11:46:20 +01001495 /* Check for unwritable registers before we start */
Han Nandor8b9f9d42019-04-02 08:01:22 +00001496 for (i = 0; i < val_len / map->format.val_bytes; i++)
1497 if (!regmap_writeable(map,
1498 reg + regmap_get_offset(map, i)))
1499 return -EINVAL;
Mark Brownb83a3132011-05-11 19:59:58 +02001500
Laxman Dewanganc9157192012-02-10 21:30:27 +05301501 if (!map->cache_bypass && map->format.parse_val) {
1502 unsigned int ival;
1503 int val_bytes = map->format.val_bytes;
1504 for (i = 0; i < val_len / val_bytes; i++) {
Stephen Warren5a08d152013-03-20 17:02:02 -06001505 ival = map->format.parse_val(val + (i * val_bytes));
Xiubo Lica747be2016-01-04 18:00:33 +08001506 ret = regcache_write(map,
1507 reg + regmap_get_offset(map, i),
Stephen Warrenf01ee602012-04-09 13:40:24 -06001508 ival);
Laxman Dewanganc9157192012-02-10 21:30:27 +05301509 if (ret) {
1510 dev_err(map->dev,
Mark Brown6d04b8a2012-10-26 19:05:32 +01001511 "Error in caching of register: %x ret: %d\n",
Laxman Dewanganc9157192012-02-10 21:30:27 +05301512 reg + i, ret);
1513 return ret;
1514 }
1515 }
1516 if (map->cache_only) {
1517 map->cache_dirty = true;
1518 return 0;
1519 }
1520 }
1521
Mark Brown98bc7df2012-10-04 17:31:11 +01001522 range = _regmap_range_lookup(map, reg);
1523 if (range) {
Mark Brown8a2ceac2012-10-04 18:20:18 +01001524 int val_num = val_len / map->format.val_bytes;
1525 int win_offset = (reg - range->range_min) % range->window_len;
1526 int win_residue = range->window_len - win_offset;
1527
1528 /* If the write goes beyond the end of the window split it */
1529 while (val_num > win_residue) {
Fabio Estevam1a61cfe2012-10-25 14:07:18 -02001530 dev_dbg(map->dev, "Writing window %d/%zu\n",
Mark Brown8a2ceac2012-10-04 18:20:18 +01001531 win_residue, val_len / map->format.val_bytes);
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001532 ret = _regmap_raw_write_impl(map, reg, val,
1533 win_residue *
1534 map->format.val_bytes);
Mark Brown8a2ceac2012-10-04 18:20:18 +01001535 if (ret != 0)
1536 return ret;
1537
1538 reg += win_residue;
1539 val_num -= win_residue;
1540 val += win_residue * map->format.val_bytes;
1541 val_len -= win_residue * map->format.val_bytes;
1542
1543 win_offset = (reg - range->range_min) %
1544 range->window_len;
1545 win_residue = range->window_len - win_offset;
1546 }
1547
1548 ret = _regmap_select_page(map, &reg, range, val_num);
Mark Brown0ff3e622012-10-04 17:39:13 +01001549 if (ret != 0)
Mark Brown98bc7df2012-10-04 17:31:11 +01001550 return ret;
1551 }
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001552
Marc Reillyd939fb92012-03-16 12:11:43 +11001553 map->format.format_reg(map->work_buf, reg, map->reg_shift);
Tony Lindgrenf50e38c2016-09-15 13:56:10 -07001554 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1555 map->write_flag_mask);
Lars-Peter Clausen6f306442011-09-05 20:46:32 +02001556
Mark Brown651e0132013-10-08 18:37:36 +01001557 /*
1558 * Essentially all I/O mechanisms will be faster with a single
1559 * buffer to write. Since register syncs often generate raw
1560 * writes of single registers optimise that case.
1561 */
1562 if (val != work_val && val_len == map->format.val_bytes) {
1563 memcpy(work_val, val, map->format.val_bytes);
1564 val = work_val;
1565 }
1566
Mark Brown0a819802013-10-09 12:28:52 +01001567 if (map->async && map->bus->async_write) {
Mark Brown7e09a972013-10-07 23:00:24 +01001568 struct regmap_async *async;
Mark Brown0d509f22013-01-27 22:07:38 +08001569
Philipp Zabelc6b570d2015-03-09 12:20:13 +01001570 trace_regmap_async_write_start(map, reg, val_len);
Mark Brownfe7d4cc2013-02-21 19:05:48 +00001571
Mark Brown7e09a972013-10-07 23:00:24 +01001572 spin_lock_irqsave(&map->async_lock, flags);
1573 async = list_first_entry_or_null(&map->async_free,
1574 struct regmap_async,
1575 list);
1576 if (async)
1577 list_del(&async->list);
1578 spin_unlock_irqrestore(&map->async_lock, flags);
1579
1580 if (!async) {
1581 async = map->bus->async_alloc();
1582 if (!async)
1583 return -ENOMEM;
1584
1585 async->work_buf = kzalloc(map->format.buf_size,
1586 GFP_KERNEL | GFP_DMA);
1587 if (!async->work_buf) {
1588 kfree(async);
1589 return -ENOMEM;
1590 }
Mark Brown0d509f22013-01-27 22:07:38 +08001591 }
1592
Mark Brown0d509f22013-01-27 22:07:38 +08001593 async->map = map;
1594
1595 /* If the caller supplied the value we can use it safely. */
1596 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1597 map->format.reg_bytes + map->format.val_bytes);
Mark Brown0d509f22013-01-27 22:07:38 +08001598
1599 spin_lock_irqsave(&map->async_lock, flags);
1600 list_add_tail(&async->list, &map->async_list);
1601 spin_unlock_irqrestore(&map->async_lock, flags);
1602
Mark Brown04c50cc2013-10-10 22:38:29 +01001603 if (val != work_val)
1604 ret = map->bus->async_write(map->bus_context,
1605 async->work_buf,
1606 map->format.reg_bytes +
1607 map->format.pad_bytes,
1608 val, val_len, async);
1609 else
1610 ret = map->bus->async_write(map->bus_context,
1611 async->work_buf,
1612 map->format.reg_bytes +
1613 map->format.pad_bytes +
1614 val_len, NULL, 0, async);
Mark Brown0d509f22013-01-27 22:07:38 +08001615
1616 if (ret != 0) {
1617 dev_err(map->dev, "Failed to schedule write: %d\n",
1618 ret);
1619
1620 spin_lock_irqsave(&map->async_lock, flags);
Mark Brown7e09a972013-10-07 23:00:24 +01001621 list_move(&async->list, &map->async_free);
Mark Brown0d509f22013-01-27 22:07:38 +08001622 spin_unlock_irqrestore(&map->async_lock, flags);
Mark Brown0d509f22013-01-27 22:07:38 +08001623 }
Mark Brownf951b652013-03-27 13:08:44 +00001624
1625 return ret;
Mark Brown0d509f22013-01-27 22:07:38 +08001626 }
1627
Philipp Zabelc6b570d2015-03-09 12:20:13 +01001628 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
Mark Brownfb2736b2011-07-24 21:30:55 +01001629
Mark Brown2547e202011-07-20 21:47:22 +01001630 /* If we're doing a single register write we can probably just
1631 * send the work_buf directly, otherwise try to do a gather
1632 * write.
1633 */
Mark Brown0d509f22013-01-27 22:07:38 +08001634 if (val == work_val)
Stephen Warren0135bbc2012-04-04 15:48:30 -06001635 ret = map->bus->write(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00001636 map->format.reg_bytes +
1637 map->format.pad_bytes +
1638 val_len);
Mark Brown2547e202011-07-20 21:47:22 +01001639 else if (map->bus->gather_write)
Stephen Warren0135bbc2012-04-04 15:48:30 -06001640 ret = map->bus->gather_write(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00001641 map->format.reg_bytes +
1642 map->format.pad_bytes,
Mark Brownb83a3132011-05-11 19:59:58 +02001643 val, val_len);
1644
Mark Brown2547e202011-07-20 21:47:22 +01001645 /* If that didn't work fall back on linearising by hand. */
Mark Brownb83a3132011-05-11 19:59:58 +02001646 if (ret == -ENOTSUPP) {
Mark Brown82159ba2012-01-18 10:52:25 +00001647 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1648 buf = kzalloc(len, GFP_KERNEL);
Mark Brownb83a3132011-05-11 19:59:58 +02001649 if (!buf)
1650 return -ENOMEM;
1651
1652 memcpy(buf, map->work_buf, map->format.reg_bytes);
Mark Brown82159ba2012-01-18 10:52:25 +00001653 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1654 val, val_len);
Stephen Warren0135bbc2012-04-04 15:48:30 -06001655 ret = map->bus->write(map->bus_context, buf, len);
Mark Brownb83a3132011-05-11 19:59:58 +02001656
1657 kfree(buf);
Elaine Zhang815806e2016-08-18 17:01:55 +08001658 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
Nikita Yushchenkof0aa1ce2016-09-22 12:02:25 +03001659 /* regcache_drop_region() takes lock that we already have,
1660 * thus call map->cache_ops->drop() directly
1661 */
1662 if (map->cache_ops && map->cache_ops->drop)
1663 map->cache_ops->drop(map, reg, reg + 1);
Mark Brownb83a3132011-05-11 19:59:58 +02001664 }
1665
Philipp Zabelc6b570d2015-03-09 12:20:13 +01001666 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
Mark Brownfb2736b2011-07-24 21:30:55 +01001667
Mark Brownb83a3132011-05-11 19:59:58 +02001668 return ret;
1669}
1670
Mark Brown221ad7f2013-03-26 21:24:20 +00001671/**
1672 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1673 *
1674 * @map: Map to check.
1675 */
1676bool regmap_can_raw_write(struct regmap *map)
1677{
Markus Pargmann07ea4002015-08-12 12:12:33 +02001678 return map->bus && map->bus->write && map->format.format_val &&
1679 map->format.format_reg;
Mark Brown221ad7f2013-03-26 21:24:20 +00001680}
1681EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1682
Markus Pargmannf50c9eb2015-08-30 09:33:54 +02001683/**
1684 * regmap_get_raw_read_max - Get the maximum size we can read
1685 *
1686 * @map: Map to check.
1687 */
1688size_t regmap_get_raw_read_max(struct regmap *map)
1689{
1690 return map->max_raw_read;
1691}
1692EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1693
1694/**
1695 * regmap_get_raw_write_max - Get the maximum size we can read
1696 *
1697 * @map: Map to check.
1698 */
1699size_t regmap_get_raw_write_max(struct regmap *map)
1700{
1701 return map->max_raw_write;
1702}
1703EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1704
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001705static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1706 unsigned int val)
1707{
1708 int ret;
1709 struct regmap_range_node *range;
1710 struct regmap *map = context;
1711
Mark Brownf1b5c5c2013-03-13 19:18:13 +00001712 WARN_ON(!map->bus || !map->format.format_write);
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001713
1714 range = _regmap_range_lookup(map, reg);
1715 if (range) {
1716 ret = _regmap_select_page(map, &reg, range, 1);
1717 if (ret != 0)
1718 return ret;
1719 }
1720
1721 map->format.format_write(map, reg, val);
1722
Philipp Zabelc6b570d2015-03-09 12:20:13 +01001723 trace_regmap_hw_write_start(map, reg, 1);
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001724
1725 ret = map->bus->write(map->bus_context, map->work_buf,
1726 map->format.buf_size);
1727
Philipp Zabelc6b570d2015-03-09 12:20:13 +01001728 trace_regmap_hw_write_done(map, reg, 1);
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001729
1730 return ret;
1731}
1732
Boris BREZILLON3ac17032014-04-17 11:40:11 +02001733static int _regmap_bus_reg_write(void *context, unsigned int reg,
1734 unsigned int val)
1735{
1736 struct regmap *map = context;
1737
1738 return map->bus->reg_write(map->bus_context, reg, val);
1739}
1740
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001741static int _regmap_bus_raw_write(void *context, unsigned int reg,
1742 unsigned int val)
1743{
1744 struct regmap *map = context;
1745
Mark Brownf1b5c5c2013-03-13 19:18:13 +00001746 WARN_ON(!map->bus || !map->format.format_val);
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001747
1748 map->format.format_val(map->work_buf + map->format.reg_bytes
1749 + map->format.pad_bytes, val, 0);
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001750 return _regmap_raw_write_impl(map, reg,
1751 map->work_buf +
1752 map->format.reg_bytes +
1753 map->format.pad_bytes,
1754 map->format.val_bytes);
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001755}
1756
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001757static inline void *_regmap_map_get_context(struct regmap *map)
1758{
1759 return (map->bus) ? map : map->bus_context;
1760}
1761
Dimitris Papastamos4d2dc092011-09-29 10:39:07 +01001762int _regmap_write(struct regmap *map, unsigned int reg,
1763 unsigned int val)
Mark Brownb83a3132011-05-11 19:59:58 +02001764{
Mark Brownfb2736b2011-07-24 21:30:55 +01001765 int ret;
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001766 void *context = _regmap_map_get_context(map);
Mark Brownb83a3132011-05-11 19:59:58 +02001767
Ionut Nicu515f2262013-08-09 12:09:20 +02001768 if (!regmap_writeable(map, reg))
1769 return -EIO;
1770
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001771 if (!map->cache_bypass && !map->defer_caching) {
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001772 ret = regcache_write(map, reg, val);
1773 if (ret != 0)
1774 return ret;
Mark Brown8ae0d7e2011-10-26 10:34:22 +02001775 if (map->cache_only) {
1776 map->cache_dirty = true;
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001777 return 0;
Mark Brown8ae0d7e2011-10-26 10:34:22 +02001778 }
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001779 }
1780
Ben Dooks95093762018-10-02 11:42:05 +01001781 if (regmap_should_log(map))
Mark Brown1044c182012-07-06 14:10:23 +01001782 dev_info(map->dev, "%x <= %x\n", reg, val);
Mark Brown1044c182012-07-06 14:10:23 +01001783
Philipp Zabelc6b570d2015-03-09 12:20:13 +01001784 trace_regmap_reg_write(map, reg, val);
Mark Brownfb2736b2011-07-24 21:30:55 +01001785
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001786 return map->reg_write(context, reg, val);
Mark Brownb83a3132011-05-11 19:59:58 +02001787}
1788
1789/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001790 * regmap_write() - Write a value to a single register
Mark Brownb83a3132011-05-11 19:59:58 +02001791 *
1792 * @map: Register map to write to
1793 * @reg: Register to write to
1794 * @val: Value to be written
1795 *
1796 * A value of zero will be returned on success, a negative errno will
1797 * be returned in error cases.
1798 */
1799int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1800{
1801 int ret;
1802
Xiubo Lifcac0232015-12-16 17:45:32 +08001803 if (!IS_ALIGNED(reg, map->reg_stride))
Stephen Warrenf01ee602012-04-09 13:40:24 -06001804 return -EINVAL;
1805
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001806 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001807
1808 ret = _regmap_write(map, reg, val);
1809
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001810 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001811
1812 return ret;
1813}
1814EXPORT_SYMBOL_GPL(regmap_write);
1815
1816/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001817 * regmap_write_async() - Write a value to a single register asynchronously
Mark Brown915f4412013-10-09 13:30:10 +01001818 *
1819 * @map: Register map to write to
1820 * @reg: Register to write to
1821 * @val: Value to be written
1822 *
1823 * A value of zero will be returned on success, a negative errno will
1824 * be returned in error cases.
1825 */
1826int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1827{
1828 int ret;
1829
Xiubo Lifcac0232015-12-16 17:45:32 +08001830 if (!IS_ALIGNED(reg, map->reg_stride))
Mark Brown915f4412013-10-09 13:30:10 +01001831 return -EINVAL;
1832
1833 map->lock(map->lock_arg);
1834
1835 map->async = true;
1836
1837 ret = _regmap_write(map, reg, val);
1838
1839 map->async = false;
1840
1841 map->unlock(map->lock_arg);
1842
1843 return ret;
1844}
1845EXPORT_SYMBOL_GPL(regmap_write_async);
1846
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001847int _regmap_raw_write(struct regmap *map, unsigned int reg,
1848 const void *val, size_t val_len)
1849{
1850 size_t val_bytes = map->format.val_bytes;
1851 size_t val_count = val_len / val_bytes;
Charles Keepax364e3782018-02-22 12:59:13 +00001852 size_t chunk_count, chunk_bytes;
1853 size_t chunk_regs = val_count;
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001854 int ret, i;
1855
1856 if (!val_count)
1857 return -EINVAL;
1858
Charles Keepax364e3782018-02-22 12:59:13 +00001859 if (map->use_single_write)
1860 chunk_regs = 1;
1861 else if (map->max_raw_write && val_len > map->max_raw_write)
1862 chunk_regs = map->max_raw_write / val_bytes;
1863
1864 chunk_count = val_count / chunk_regs;
1865 chunk_bytes = chunk_regs * val_bytes;
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001866
1867 /* Write as many bytes as possible with chunk_size */
1868 for (i = 0; i < chunk_count; i++) {
Charles Keepax364e3782018-02-22 12:59:13 +00001869 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001870 if (ret)
1871 return ret;
Charles Keepax364e3782018-02-22 12:59:13 +00001872
1873 reg += regmap_get_offset(map, chunk_regs);
1874 val += chunk_bytes;
1875 val_len -= chunk_bytes;
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001876 }
1877
1878 /* Write remaining bytes */
Charles Keepax364e3782018-02-22 12:59:13 +00001879 if (val_len)
1880 ret = _regmap_raw_write_impl(map, reg, val, val_len);
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00001881
1882 return ret;
1883}
1884
Mark Brown915f4412013-10-09 13:30:10 +01001885/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001886 * regmap_raw_write() - Write raw values to one or more registers
Mark Brownb83a3132011-05-11 19:59:58 +02001887 *
1888 * @map: Register map to write to
1889 * @reg: Initial register to write to
1890 * @val: Block of data to be written, laid out for direct transmission to the
1891 * device
1892 * @val_len: Length of data pointed to by val.
1893 *
1894 * This function is intended to be used for things like firmware
1895 * download where a large block of data needs to be transferred to the
1896 * device. No formatting will be done on the data provided.
1897 *
1898 * A value of zero will be returned on success, a negative errno will
1899 * be returned in error cases.
1900 */
1901int regmap_raw_write(struct regmap *map, unsigned int reg,
1902 const void *val, size_t val_len)
1903{
1904 int ret;
1905
Mark Brown221ad7f2013-03-26 21:24:20 +00001906 if (!regmap_can_raw_write(map))
Andrey Smirnovd2a58842013-01-27 10:49:05 -08001907 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -06001908 if (val_len % map->format.val_bytes)
1909 return -EINVAL;
1910
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001911 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001912
Mark Brown0a819802013-10-09 12:28:52 +01001913 ret = _regmap_raw_write(map, reg, val, val_len);
Mark Brownb83a3132011-05-11 19:59:58 +02001914
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001915 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001916
1917 return ret;
1918}
1919EXPORT_SYMBOL_GPL(regmap_raw_write);
1920
Srinivas Kandagatla67252282013-06-11 13:18:15 +01001921/**
Ben Whittencdf6b112018-10-19 10:33:50 +01001922 * regmap_noinc_write(): Write data from a register without incrementing the
1923 * register number
1924 *
1925 * @map: Register map to write to
1926 * @reg: Register to write to
1927 * @val: Pointer to data buffer
1928 * @val_len: Length of output buffer in bytes.
1929 *
1930 * The regmap API usually assumes that bulk bus write operations will write a
1931 * range of registers. Some devices have certain registers for which a write
1932 * operation can write to an internal FIFO.
1933 *
1934 * The target register must be volatile but registers after it can be
1935 * completely unrelated cacheable registers.
1936 *
1937 * This will attempt multiple writes as required to write val_len bytes.
1938 *
1939 * A value of zero will be returned on success, a negative errno will be
1940 * returned in error cases.
1941 */
1942int regmap_noinc_write(struct regmap *map, unsigned int reg,
1943 const void *val, size_t val_len)
1944{
1945 size_t write_len;
1946 int ret;
1947
1948 if (!map->bus)
1949 return -EINVAL;
1950 if (!map->bus->write)
1951 return -ENOTSUPP;
1952 if (val_len % map->format.val_bytes)
1953 return -EINVAL;
1954 if (!IS_ALIGNED(reg, map->reg_stride))
1955 return -EINVAL;
1956 if (val_len == 0)
1957 return -EINVAL;
1958
1959 map->lock(map->lock_arg);
1960
1961 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
1962 ret = -EINVAL;
1963 goto out_unlock;
1964 }
1965
1966 while (val_len) {
1967 if (map->max_raw_write && map->max_raw_write < val_len)
1968 write_len = map->max_raw_write;
1969 else
1970 write_len = val_len;
1971 ret = _regmap_raw_write(map, reg, val, write_len);
1972 if (ret)
1973 goto out_unlock;
1974 val = ((u8 *)val) + write_len;
1975 val_len -= write_len;
1976 }
1977
1978out_unlock:
1979 map->unlock(map->lock_arg);
1980 return ret;
1981}
1982EXPORT_SYMBOL_GPL(regmap_noinc_write);
1983
1984/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001985 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1986 * register field.
Kuninori Morimotofdf20022013-09-01 20:24:50 -07001987 *
1988 * @field: Register field to write to
1989 * @mask: Bitmask to change
1990 * @val: Value to be written
Kuninori Morimoto28972ea2016-02-15 05:23:55 +00001991 * @change: Boolean indicating if a write was done
1992 * @async: Boolean indicating asynchronously
1993 * @force: Boolean indicating use force update
Kuninori Morimotofdf20022013-09-01 20:24:50 -07001994 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00001995 * Perform a read/modify/write cycle on the register field with change,
1996 * async, force option.
1997 *
Kuninori Morimotofdf20022013-09-01 20:24:50 -07001998 * A value of zero will be returned on success, a negative errno will
1999 * be returned in error cases.
2000 */
Kuninori Morimoto28972ea2016-02-15 05:23:55 +00002001int regmap_field_update_bits_base(struct regmap_field *field,
2002 unsigned int mask, unsigned int val,
2003 bool *change, bool async, bool force)
Kuninori Morimotofdf20022013-09-01 20:24:50 -07002004{
2005 mask = (mask << field->shift) & field->mask;
2006
Kuninori Morimoto28972ea2016-02-15 05:23:55 +00002007 return regmap_update_bits_base(field->regmap, field->reg,
2008 mask, val << field->shift,
2009 change, async, force);
Kuninori Morimotofdf20022013-09-01 20:24:50 -07002010}
Kuninori Morimoto28972ea2016-02-15 05:23:55 +00002011EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
Kuninori Morimotofdf20022013-09-01 20:24:50 -07002012
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002013/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002014 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2015 * register field with port ID
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002016 *
2017 * @field: Register field to write to
2018 * @id: port ID
2019 * @mask: Bitmask to change
2020 * @val: Value to be written
Kuninori Morimotoe126ede2016-02-15 05:24:51 +00002021 * @change: Boolean indicating if a write was done
2022 * @async: Boolean indicating asynchronously
2023 * @force: Boolean indicating use force update
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002024 *
2025 * A value of zero will be returned on success, a negative errno will
2026 * be returned in error cases.
2027 */
Kuninori Morimotoe126ede2016-02-15 05:24:51 +00002028int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2029 unsigned int mask, unsigned int val,
2030 bool *change, bool async, bool force)
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002031{
2032 if (id >= field->id_size)
2033 return -EINVAL;
2034
2035 mask = (mask << field->shift) & field->mask;
2036
Kuninori Morimotoe126ede2016-02-15 05:24:51 +00002037 return regmap_update_bits_base(field->regmap,
2038 field->reg + (field->id_offset * id),
2039 mask, val << field->shift,
2040 change, async, force);
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002041}
Kuninori Morimotoe126ede2016-02-15 05:24:51 +00002042EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002043
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002044/**
2045 * regmap_bulk_write() - Write multiple registers to the device
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302046 *
2047 * @map: Register map to write to
2048 * @reg: First register to be write from
2049 * @val: Block of data to be written, in native register size for device
2050 * @val_count: Number of registers to write
2051 *
2052 * This function is intended to be used for writing a large block of
Nestor Ovroy31b35e9e2013-01-18 16:51:03 +01002053 * data to the device either in single transfer or multiple transfer.
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302054 *
2055 * A value of zero will be returned on success, a negative errno will
2056 * be returned in error cases.
2057 */
2058int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2059 size_t val_count)
2060{
2061 int ret = 0, i;
2062 size_t val_bytes = map->format.val_bytes;
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302063
Xiubo Lifcac0232015-12-16 17:45:32 +08002064 if (!IS_ALIGNED(reg, map->reg_stride))
Stephen Warrenf01ee602012-04-09 13:40:24 -06002065 return -EINVAL;
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302066
Stephen Boydf4298362013-12-26 13:52:04 -08002067 /*
Charles Keepaxfb44f3c2018-02-22 12:59:14 +00002068 * Some devices don't support bulk write, for them we have a series of
2069 * single write operations.
Stephen Boydf4298362013-12-26 13:52:04 -08002070 */
Charles Keepaxfb44f3c2018-02-22 12:59:14 +00002071 if (!map->bus || !map->format.parse_inplace) {
Takashi Iwai4999e962014-03-18 12:58:33 +01002072 map->lock(map->lock_arg);
Stephen Boydf4298362013-12-26 13:52:04 -08002073 for (i = 0; i < val_count; i++) {
2074 unsigned int ival;
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302075
Stephen Boydf4298362013-12-26 13:52:04 -08002076 switch (val_bytes) {
2077 case 1:
2078 ival = *(u8 *)(val + (i * val_bytes));
2079 break;
2080 case 2:
2081 ival = *(u16 *)(val + (i * val_bytes));
2082 break;
2083 case 4:
2084 ival = *(u32 *)(val + (i * val_bytes));
2085 break;
2086#ifdef CONFIG_64BIT
2087 case 8:
2088 ival = *(u64 *)(val + (i * val_bytes));
2089 break;
2090#endif
2091 default:
2092 ret = -EINVAL;
2093 goto out;
2094 }
2095
Xiubo Lica747be2016-01-04 18:00:33 +08002096 ret = _regmap_write(map,
2097 reg + regmap_get_offset(map, i),
2098 ival);
Stephen Boydf4298362013-12-26 13:52:04 -08002099 if (ret != 0)
2100 goto out;
2101 }
Takashi Iwai4999e962014-03-18 12:58:33 +01002102out:
2103 map->unlock(map->lock_arg);
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302104 } else {
Stephen Boydf4298362013-12-26 13:52:04 -08002105 void *wval;
2106
Stephen Boydb4a21fc2015-09-11 16:37:05 -07002107 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
Charles Keepaxb4ecfec2018-02-22 12:59:11 +00002108 if (!wval)
Takashi Iwai4999e962014-03-18 12:58:33 +01002109 return -ENOMEM;
Charles Keepaxb4ecfec2018-02-22 12:59:11 +00002110
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302111 for (i = 0; i < val_count * val_bytes; i += val_bytes)
Mark Brown8a819ff2013-03-04 09:04:51 +08002112 map->format.parse_inplace(wval + i);
Stephen Boydf4298362013-12-26 13:52:04 -08002113
Charles Keepax7ef2c6b2018-02-22 12:59:12 +00002114 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302115
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302116 kfree(wval);
Stephen Boydf4298362013-12-26 13:52:04 -08002117 }
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05302118 return ret;
2119}
2120EXPORT_SYMBOL_GPL(regmap_bulk_write);
2121
Anthony Oleche33fabd2013-10-11 15:31:11 +01002122/*
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002123 * _regmap_raw_multi_reg_write()
2124 *
2125 * the (register,newvalue) pairs in regs have not been formatted, but
2126 * they are all in the same page and have been changed to being page
Xiubo Lib486afb2015-08-12 15:02:19 +08002127 * relative. The page register has been written if that was necessary.
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002128 */
2129static int _regmap_raw_multi_reg_write(struct regmap *map,
Nariman Poushin8019ff62015-07-16 16:36:21 +01002130 const struct reg_sequence *regs,
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002131 size_t num_regs)
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002132{
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002133 int ret;
2134 void *buf;
2135 int i;
2136 u8 *u8;
2137 size_t val_bytes = map->format.val_bytes;
2138 size_t reg_bytes = map->format.reg_bytes;
2139 size_t pad_bytes = map->format.pad_bytes;
2140 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2141 size_t len = pair_size * num_regs;
2142
Xiubo Lif5727cd2014-04-30 17:31:08 +08002143 if (!len)
2144 return -EINVAL;
2145
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002146 buf = kzalloc(len, GFP_KERNEL);
2147 if (!buf)
2148 return -ENOMEM;
2149
2150 /* We have to linearise by hand. */
2151
2152 u8 = buf;
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002153
2154 for (i = 0; i < num_regs; i++) {
Markus Pargmann2f9b6602015-08-12 12:12:28 +02002155 unsigned int reg = regs[i].reg;
2156 unsigned int val = regs[i].def;
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002157 trace_regmap_hw_write_start(map, reg, 1);
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002158 map->format.format_reg(u8, reg, map->reg_shift);
2159 u8 += reg_bytes + pad_bytes;
2160 map->format.format_val(u8, val, 0);
2161 u8 += val_bytes;
2162 }
2163 u8 = buf;
2164 *u8 |= map->write_flag_mask;
2165
2166 ret = map->bus->write(map->bus_context, buf, len);
2167
2168 kfree(buf);
2169
2170 for (i = 0; i < num_regs; i++) {
2171 int reg = regs[i].reg;
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002172 trace_regmap_hw_write_done(map, reg, 1);
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002173 }
2174 return ret;
2175}
2176
2177static unsigned int _regmap_register_page(struct regmap *map,
2178 unsigned int reg,
2179 struct regmap_range_node *range)
2180{
2181 unsigned int win_page = (reg - range->range_min) / range->window_len;
2182
2183 return win_page;
2184}
2185
2186static int _regmap_range_multi_paged_reg_write(struct regmap *map,
Nariman Poushin8019ff62015-07-16 16:36:21 +01002187 struct reg_sequence *regs,
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002188 size_t num_regs)
2189{
2190 int ret;
2191 int i, n;
Nariman Poushin8019ff62015-07-16 16:36:21 +01002192 struct reg_sequence *base;
Geert Uytterhoevenb48d1392014-04-22 12:47:29 +02002193 unsigned int this_page = 0;
Nariman Poushin2de9d602015-07-16 16:36:22 +01002194 unsigned int page_change = 0;
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002195 /*
2196 * the set of registers are not neccessarily in order, but
2197 * since the order of write must be preserved this algorithm
Nariman Poushin2de9d602015-07-16 16:36:22 +01002198 * chops the set each time the page changes. This also applies
2199 * if there is a delay required at any point in the sequence.
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002200 */
2201 base = regs;
2202 for (i = 0, n = 0; i < num_regs; i++, n++) {
2203 unsigned int reg = regs[i].reg;
2204 struct regmap_range_node *range;
2205
2206 range = _regmap_range_lookup(map, reg);
2207 if (range) {
2208 unsigned int win_page = _regmap_register_page(map, reg,
2209 range);
2210
2211 if (i == 0)
2212 this_page = win_page;
2213 if (win_page != this_page) {
2214 this_page = win_page;
Nariman Poushin2de9d602015-07-16 16:36:22 +01002215 page_change = 1;
2216 }
2217 }
2218
2219 /* If we have both a page change and a delay make sure to
2220 * write the regs and apply the delay before we change the
2221 * page.
2222 */
2223
2224 if (page_change || regs[i].delay_us) {
2225
2226 /* For situations where the first write requires
2227 * a delay we need to make sure we don't call
2228 * raw_multi_reg_write with n=0
2229 * This can't occur with page breaks as we
2230 * never write on the first iteration
2231 */
2232 if (regs[i].delay_us && i == 0)
2233 n = 1;
2234
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002235 ret = _regmap_raw_multi_reg_write(map, base, n);
2236 if (ret != 0)
2237 return ret;
Nariman Poushin2de9d602015-07-16 16:36:22 +01002238
2239 if (regs[i].delay_us)
2240 udelay(regs[i].delay_us);
2241
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002242 base += n;
2243 n = 0;
Nariman Poushin2de9d602015-07-16 16:36:22 +01002244
2245 if (page_change) {
2246 ret = _regmap_select_page(map,
2247 &base[n].reg,
2248 range, 1);
2249 if (ret != 0)
2250 return ret;
2251
2252 page_change = 0;
2253 }
2254
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002255 }
Nariman Poushin2de9d602015-07-16 16:36:22 +01002256
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002257 }
2258 if (n > 0)
2259 return _regmap_raw_multi_reg_write(map, base, n);
2260 return 0;
2261}
2262
2263static int _regmap_multi_reg_write(struct regmap *map,
Nariman Poushin8019ff62015-07-16 16:36:21 +01002264 const struct reg_sequence *regs,
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002265 size_t num_regs)
2266{
2267 int i;
2268 int ret;
2269
2270 if (!map->can_multi_write) {
2271 for (i = 0; i < num_regs; i++) {
2272 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2273 if (ret != 0)
2274 return ret;
Nariman Poushin2de9d602015-07-16 16:36:22 +01002275
2276 if (regs[i].delay_us)
2277 udelay(regs[i].delay_us);
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002278 }
2279 return 0;
2280 }
2281
2282 if (!map->format.parse_inplace)
2283 return -EINVAL;
2284
2285 if (map->writeable_reg)
2286 for (i = 0; i < num_regs; i++) {
2287 int reg = regs[i].reg;
2288 if (!map->writeable_reg(map->dev, reg))
2289 return -EINVAL;
Xiubo Lifcac0232015-12-16 17:45:32 +08002290 if (!IS_ALIGNED(reg, map->reg_stride))
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002291 return -EINVAL;
2292 }
2293
2294 if (!map->cache_bypass) {
2295 for (i = 0; i < num_regs; i++) {
2296 unsigned int val = regs[i].def;
2297 unsigned int reg = regs[i].reg;
2298 ret = regcache_write(map, reg, val);
2299 if (ret) {
2300 dev_err(map->dev,
2301 "Error in caching of register: %x ret: %d\n",
2302 reg, ret);
2303 return ret;
2304 }
2305 }
2306 if (map->cache_only) {
2307 map->cache_dirty = true;
2308 return 0;
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002309 }
2310 }
2311
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002312 WARN_ON(!map->bus);
2313
2314 for (i = 0; i < num_regs; i++) {
2315 unsigned int reg = regs[i].reg;
2316 struct regmap_range_node *range;
Nariman Poushin2de9d602015-07-16 16:36:22 +01002317
2318 /* Coalesce all the writes between a page break or a delay
2319 * in a sequence
2320 */
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002321 range = _regmap_range_lookup(map, reg);
Nariman Poushin2de9d602015-07-16 16:36:22 +01002322 if (range || regs[i].delay_us) {
Nariman Poushin8019ff62015-07-16 16:36:21 +01002323 size_t len = sizeof(struct reg_sequence)*num_regs;
2324 struct reg_sequence *base = kmemdup(regs, len,
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002325 GFP_KERNEL);
2326 if (!base)
2327 return -ENOMEM;
2328 ret = _regmap_range_multi_paged_reg_write(map, base,
2329 num_regs);
2330 kfree(base);
2331
2332 return ret;
2333 }
2334 }
2335 return _regmap_raw_multi_reg_write(map, regs, num_regs);
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002336}
2337
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002338/**
2339 * regmap_multi_reg_write() - Write multiple registers to the device
Anthony Oleche33fabd2013-10-11 15:31:11 +01002340 *
2341 * @map: Register map to write to
2342 * @regs: Array of structures containing register,value to be written
2343 * @num_regs: Number of registers to write
2344 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002345 * Write multiple registers to the device where the set of register, value
2346 * pairs are supplied in any order, possibly not all in a single range.
2347 *
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002348 * The 'normal' block write mode will send ultimately send data on the
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002349 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002350 * addressed. However, this alternative block multi write mode will send
2351 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2352 * must of course support the mode.
Anthony Oleche33fabd2013-10-11 15:31:11 +01002353 *
Opensource [Anthony Olech]e894c3f2014-03-04 13:54:02 +00002354 * A value of zero will be returned on success, a negative errno will be
2355 * returned in error cases.
Anthony Oleche33fabd2013-10-11 15:31:11 +01002356 */
Nariman Poushin8019ff62015-07-16 16:36:21 +01002357int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
Charles Keepaxf7e2cec2014-02-25 13:45:49 +00002358 int num_regs)
Anthony Oleche33fabd2013-10-11 15:31:11 +01002359{
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002360 int ret;
Anthony Oleche33fabd2013-10-11 15:31:11 +01002361
2362 map->lock(map->lock_arg);
2363
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002364 ret = _regmap_multi_reg_write(map, regs, num_regs);
2365
Anthony Oleche33fabd2013-10-11 15:31:11 +01002366 map->unlock(map->lock_arg);
2367
2368 return ret;
2369}
2370EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2371
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002372/**
2373 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2374 * device but not the cache
Mark Brown0d509f22013-01-27 22:07:38 +08002375 *
2376 * @map: Register map to write to
2377 * @regs: Array of structures containing register,value to be written
2378 * @num_regs: Number of registers to write
2379 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002380 * Write multiple registers to the device but not the cache where the set
2381 * of register are supplied in any order.
2382 *
Mark Brown0d509f22013-01-27 22:07:38 +08002383 * This function is intended to be used for writing a large block of data
2384 * atomically to the device in single transfer for those I2C client devices
2385 * that implement this alternative block write mode.
2386 *
2387 * A value of zero will be returned on success, a negative errno will
2388 * be returned in error cases.
2389 */
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002390int regmap_multi_reg_write_bypassed(struct regmap *map,
Nariman Poushin8019ff62015-07-16 16:36:21 +01002391 const struct reg_sequence *regs,
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002392 int num_regs)
Mark Brown0d509f22013-01-27 22:07:38 +08002393{
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002394 int ret;
2395 bool bypass;
Mark Brown0d509f22013-01-27 22:07:38 +08002396
2397 map->lock(map->lock_arg);
2398
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002399 bypass = map->cache_bypass;
2400 map->cache_bypass = true;
2401
2402 ret = _regmap_multi_reg_write(map, regs, num_regs);
2403
2404 map->cache_bypass = bypass;
2405
Mark Brown0a819802013-10-09 12:28:52 +01002406 map->unlock(map->lock_arg);
2407
2408 return ret;
2409}
Charles Keepax1d5b40b2014-02-25 13:45:50 +00002410EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
Mark Brown0d509f22013-01-27 22:07:38 +08002411
2412/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002413 * regmap_raw_write_async() - Write raw values to one or more registers
2414 * asynchronously
Mark Brown0d509f22013-01-27 22:07:38 +08002415 *
2416 * @map: Register map to write to
2417 * @reg: Initial register to write to
2418 * @val: Block of data to be written, laid out for direct transmission to the
2419 * device. Must be valid until regmap_async_complete() is called.
2420 * @val_len: Length of data pointed to by val.
2421 *
2422 * This function is intended to be used for things like firmware
2423 * download where a large block of data needs to be transferred to the
2424 * device. No formatting will be done on the data provided.
2425 *
2426 * If supported by the underlying bus the write will be scheduled
2427 * asynchronously, helping maximise I/O speed on higher speed buses
2428 * like SPI. regmap_async_complete() can be called to ensure that all
2429 * asynchrnous writes have been completed.
2430 *
2431 * A value of zero will be returned on success, a negative errno will
2432 * be returned in error cases.
2433 */
2434int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2435 const void *val, size_t val_len)
2436{
2437 int ret;
2438
2439 if (val_len % map->format.val_bytes)
2440 return -EINVAL;
Xiubo Lifcac0232015-12-16 17:45:32 +08002441 if (!IS_ALIGNED(reg, map->reg_stride))
Mark Brown0d509f22013-01-27 22:07:38 +08002442 return -EINVAL;
2443
2444 map->lock(map->lock_arg);
2445
2446 map->async = true;
2447
2448 ret = _regmap_raw_write(map, reg, val, val_len);
2449
2450 map->async = false;
2451
2452 map->unlock(map->lock_arg);
2453
2454 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02002455}
2456EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2457
Mark Brown98bc7df2012-10-04 17:31:11 +01002458static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
Mark Brownb83a3132011-05-11 19:59:58 +02002459 unsigned int val_len)
2460{
2461 struct regmap_range_node *range;
Mark Brownb83a3132011-05-11 19:59:58 +02002462 int ret;
2463
Mark Brownf1b5c5c2013-03-13 19:18:13 +00002464 WARN_ON(!map->bus);
Andrey Smirnovd2a58842013-01-27 10:49:05 -08002465
Mark Brownbb2bb452016-02-01 21:09:14 +00002466 if (!map->bus || !map->bus->read)
2467 return -EINVAL;
2468
Mark Brown98bc7df2012-10-04 17:31:11 +01002469 range = _regmap_range_lookup(map, reg);
2470 if (range) {
2471 ret = _regmap_select_page(map, &reg, range,
2472 val_len / map->format.val_bytes);
Mark Brown0ff3e622012-10-04 17:39:13 +01002473 if (ret != 0)
Mark Brown98bc7df2012-10-04 17:31:11 +01002474 return ret;
2475 }
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01002476
Marc Reillyd939fb92012-03-16 12:11:43 +11002477 map->format.format_reg(map->work_buf, reg, map->reg_shift);
Tony Lindgrenf50e38c2016-09-15 13:56:10 -07002478 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2479 map->read_flag_mask);
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002480 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
Mark Brownfb2736b2011-07-24 21:30:55 +01002481
Stephen Warren0135bbc2012-04-04 15:48:30 -06002482 ret = map->bus->read(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00002483 map->format.reg_bytes + map->format.pad_bytes,
Mark Brown40c5cc22011-07-24 22:39:12 +01002484 val, val_len);
Mark Brownb83a3132011-05-11 19:59:58 +02002485
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002486 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
Mark Brownfb2736b2011-07-24 21:30:55 +01002487
2488 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02002489}
2490
Boris BREZILLON3ac17032014-04-17 11:40:11 +02002491static int _regmap_bus_reg_read(void *context, unsigned int reg,
2492 unsigned int *val)
2493{
2494 struct regmap *map = context;
2495
2496 return map->bus->reg_read(map->bus_context, reg, val);
2497}
2498
Andrey Smirnovad278402013-01-12 12:54:12 -08002499static int _regmap_bus_read(void *context, unsigned int reg,
2500 unsigned int *val)
2501{
2502 int ret;
2503 struct regmap *map = context;
Krzysztof Adamski4c90f292017-11-30 15:09:15 +01002504 void *work_val = map->work_buf + map->format.reg_bytes +
2505 map->format.pad_bytes;
Andrey Smirnovad278402013-01-12 12:54:12 -08002506
2507 if (!map->format.parse_val)
2508 return -EINVAL;
2509
Krzysztof Adamski4c90f292017-11-30 15:09:15 +01002510 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
Andrey Smirnovad278402013-01-12 12:54:12 -08002511 if (ret == 0)
Krzysztof Adamski4c90f292017-11-30 15:09:15 +01002512 *val = map->format.parse_val(work_val);
Andrey Smirnovad278402013-01-12 12:54:12 -08002513
2514 return ret;
2515}
2516
Mark Brownb83a3132011-05-11 19:59:58 +02002517static int _regmap_read(struct regmap *map, unsigned int reg,
2518 unsigned int *val)
2519{
2520 int ret;
Andrey Smirnovd2a58842013-01-27 10:49:05 -08002521 void *context = _regmap_map_get_context(map);
2522
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01002523 if (!map->cache_bypass) {
2524 ret = regcache_read(map, reg, val);
2525 if (ret == 0)
2526 return 0;
2527 }
2528
2529 if (map->cache_only)
2530 return -EBUSY;
2531
Michal Simekd4807ad2014-02-10 12:59:46 +01002532 if (!regmap_readable(map, reg))
2533 return -EIO;
2534
Andrey Smirnovd2a58842013-01-27 10:49:05 -08002535 ret = map->reg_read(context, reg, val);
Mark Brownfb2736b2011-07-24 21:30:55 +01002536 if (ret == 0) {
Ben Dooks95093762018-10-02 11:42:05 +01002537 if (regmap_should_log(map))
Mark Brown1044c182012-07-06 14:10:23 +01002538 dev_info(map->dev, "%x => %x\n", reg, *val);
Mark Brown1044c182012-07-06 14:10:23 +01002539
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002540 trace_regmap_reg_read(map, reg, *val);
Mark Brownb83a3132011-05-11 19:59:58 +02002541
Andrey Smirnovad278402013-01-12 12:54:12 -08002542 if (!map->cache_bypass)
2543 regcache_write(map, reg, *val);
2544 }
Mark Brownf2985362012-04-30 21:25:05 +01002545
Mark Brownb83a3132011-05-11 19:59:58 +02002546 return ret;
2547}
2548
2549/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002550 * regmap_read() - Read a value from a single register
Mark Brownb83a3132011-05-11 19:59:58 +02002551 *
Gerhard Sittig00933802013-11-11 10:42:36 +01002552 * @map: Register map to read from
Mark Brownb83a3132011-05-11 19:59:58 +02002553 * @reg: Register to be read from
2554 * @val: Pointer to store read value
2555 *
2556 * A value of zero will be returned on success, a negative errno will
2557 * be returned in error cases.
2558 */
2559int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2560{
2561 int ret;
2562
Xiubo Lifcac0232015-12-16 17:45:32 +08002563 if (!IS_ALIGNED(reg, map->reg_stride))
Stephen Warrenf01ee602012-04-09 13:40:24 -06002564 return -EINVAL;
2565
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02002566 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02002567
2568 ret = _regmap_read(map, reg, val);
2569
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02002570 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02002571
2572 return ret;
2573}
2574EXPORT_SYMBOL_GPL(regmap_read);
2575
2576/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002577 * regmap_raw_read() - Read raw data from the device
Mark Brownb83a3132011-05-11 19:59:58 +02002578 *
Gerhard Sittig00933802013-11-11 10:42:36 +01002579 * @map: Register map to read from
Mark Brownb83a3132011-05-11 19:59:58 +02002580 * @reg: First register to be read from
2581 * @val: Pointer to store read value
2582 * @val_len: Size of data to read
2583 *
2584 * A value of zero will be returned on success, a negative errno will
2585 * be returned in error cases.
2586 */
2587int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2588 size_t val_len)
2589{
Mark Brownb8fb5ab2012-02-21 19:12:47 +00002590 size_t val_bytes = map->format.val_bytes;
2591 size_t val_count = val_len / val_bytes;
2592 unsigned int v;
2593 int ret, i;
Mark Brown04e016a2011-10-09 13:35:43 +01002594
Andrey Smirnovd2a58842013-01-27 10:49:05 -08002595 if (!map->bus)
2596 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -06002597 if (val_len % map->format.val_bytes)
2598 return -EINVAL;
Xiubo Lifcac0232015-12-16 17:45:32 +08002599 if (!IS_ALIGNED(reg, map->reg_stride))
Stephen Warrenf01ee602012-04-09 13:40:24 -06002600 return -EINVAL;
Mark Brownfa3eec72015-07-01 23:51:43 +01002601 if (val_count == 0)
2602 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -06002603
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02002604 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02002605
Mark Brownb8fb5ab2012-02-21 19:12:47 +00002606 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2607 map->cache_type == REGCACHE_NONE) {
Charles Keepax1b079ca2018-02-15 17:52:17 +00002608 size_t chunk_count, chunk_bytes;
2609 size_t chunk_regs = val_count;
Charles Keepax0645ba42018-02-15 17:52:16 +00002610
Markus Pargmann9a16ea92015-08-20 11:12:35 +02002611 if (!map->bus->read) {
2612 ret = -ENOTSUPP;
2613 goto out;
2614 }
Charles Keepax0645ba42018-02-15 17:52:16 +00002615
Charles Keepax1b079ca2018-02-15 17:52:17 +00002616 if (map->use_single_read)
2617 chunk_regs = 1;
2618 else if (map->max_raw_read && val_len > map->max_raw_read)
2619 chunk_regs = map->max_raw_read / val_bytes;
Markus Pargmann9a16ea92015-08-20 11:12:35 +02002620
Charles Keepax1b079ca2018-02-15 17:52:17 +00002621 chunk_count = val_count / chunk_regs;
2622 chunk_bytes = chunk_regs * val_bytes;
2623
2624 /* Read bytes that fit into whole chunks */
Charles Keepax0645ba42018-02-15 17:52:16 +00002625 for (i = 0; i < chunk_count; i++) {
Charles Keepax1b079ca2018-02-15 17:52:17 +00002626 ret = _regmap_raw_read(map, reg, val, chunk_bytes);
Charles Keepax0645ba42018-02-15 17:52:16 +00002627 if (ret != 0)
Charles Keepax1b079ca2018-02-15 17:52:17 +00002628 goto out;
2629
2630 reg += regmap_get_offset(map, chunk_regs);
2631 val += chunk_bytes;
2632 val_len -= chunk_bytes;
Mark Brownb8fb5ab2012-02-21 19:12:47 +00002633 }
2634
Charles Keepax0645ba42018-02-15 17:52:16 +00002635 /* Read remaining bytes */
Charles Keepax1b079ca2018-02-15 17:52:17 +00002636 if (val_len) {
2637 ret = _regmap_raw_read(map, reg, val, val_len);
Charles Keepax0645ba42018-02-15 17:52:16 +00002638 if (ret != 0)
Charles Keepax1b079ca2018-02-15 17:52:17 +00002639 goto out;
Charles Keepax0645ba42018-02-15 17:52:16 +00002640 }
Mark Brownb8fb5ab2012-02-21 19:12:47 +00002641 } else {
2642 /* Otherwise go word by word for the cache; should be low
2643 * cost as we expect to hit the cache.
2644 */
2645 for (i = 0; i < val_count; i++) {
Xiubo Lica747be2016-01-04 18:00:33 +08002646 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
Stephen Warrenf01ee602012-04-09 13:40:24 -06002647 &v);
Mark Brownb8fb5ab2012-02-21 19:12:47 +00002648 if (ret != 0)
2649 goto out;
2650
Marc Reillyd939fb92012-03-16 12:11:43 +11002651 map->format.format_val(val + (i * val_bytes), v, 0);
Mark Brownb8fb5ab2012-02-21 19:12:47 +00002652 }
2653 }
2654
2655 out:
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02002656 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02002657
2658 return ret;
2659}
2660EXPORT_SYMBOL_GPL(regmap_raw_read);
2661
2662/**
Crestez Dan Leonard74fe7b52018-08-07 17:52:17 +03002663 * regmap_noinc_read(): Read data from a register without incrementing the
2664 * register number
2665 *
2666 * @map: Register map to read from
2667 * @reg: Register to read from
2668 * @val: Pointer to data buffer
2669 * @val_len: Length of output buffer in bytes.
2670 *
2671 * The regmap API usually assumes that bulk bus read operations will read a
2672 * range of registers. Some devices have certain registers for which a read
2673 * operation read will read from an internal FIFO.
2674 *
2675 * The target register must be volatile but registers after it can be
2676 * completely unrelated cacheable registers.
2677 *
2678 * This will attempt multiple reads as required to read val_len bytes.
2679 *
2680 * A value of zero will be returned on success, a negative errno will be
2681 * returned in error cases.
2682 */
2683int regmap_noinc_read(struct regmap *map, unsigned int reg,
2684 void *val, size_t val_len)
2685{
2686 size_t read_len;
2687 int ret;
2688
2689 if (!map->bus)
2690 return -EINVAL;
2691 if (!map->bus->read)
2692 return -ENOTSUPP;
2693 if (val_len % map->format.val_bytes)
2694 return -EINVAL;
2695 if (!IS_ALIGNED(reg, map->reg_stride))
2696 return -EINVAL;
2697 if (val_len == 0)
2698 return -EINVAL;
2699
2700 map->lock(map->lock_arg);
2701
2702 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2703 ret = -EINVAL;
2704 goto out_unlock;
2705 }
2706
2707 while (val_len) {
2708 if (map->max_raw_read && map->max_raw_read < val_len)
2709 read_len = map->max_raw_read;
2710 else
2711 read_len = val_len;
2712 ret = _regmap_raw_read(map, reg, val, read_len);
2713 if (ret)
2714 goto out_unlock;
2715 val = ((u8 *)val) + read_len;
2716 val_len -= read_len;
2717 }
2718
2719out_unlock:
2720 map->unlock(map->lock_arg);
2721 return ret;
2722}
2723EXPORT_SYMBOL_GPL(regmap_noinc_read);
2724
2725/**
2726 * regmap_field_read(): Read a value to a single register field
Srinivas Kandagatla67252282013-06-11 13:18:15 +01002727 *
2728 * @field: Register field to read from
2729 * @val: Pointer to store read value
2730 *
2731 * A value of zero will be returned on success, a negative errno will
2732 * be returned in error cases.
2733 */
2734int regmap_field_read(struct regmap_field *field, unsigned int *val)
2735{
2736 int ret;
2737 unsigned int reg_val;
2738 ret = regmap_read(field->regmap, field->reg, &reg_val);
2739 if (ret != 0)
2740 return ret;
2741
2742 reg_val &= field->mask;
2743 reg_val >>= field->shift;
2744 *val = reg_val;
2745
2746 return ret;
2747}
2748EXPORT_SYMBOL_GPL(regmap_field_read);
2749
2750/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002751 * regmap_fields_read() - Read a value to a single register field with port ID
Kuninori Morimotoa0102372013-09-01 20:30:50 -07002752 *
2753 * @field: Register field to read from
2754 * @id: port ID
2755 * @val: Pointer to store read value
2756 *
2757 * A value of zero will be returned on success, a negative errno will
2758 * be returned in error cases.
2759 */
2760int regmap_fields_read(struct regmap_field *field, unsigned int id,
2761 unsigned int *val)
2762{
2763 int ret;
2764 unsigned int reg_val;
2765
2766 if (id >= field->id_size)
2767 return -EINVAL;
2768
2769 ret = regmap_read(field->regmap,
2770 field->reg + (field->id_offset * id),
2771 &reg_val);
2772 if (ret != 0)
2773 return ret;
2774
2775 reg_val &= field->mask;
2776 reg_val >>= field->shift;
2777 *val = reg_val;
2778
2779 return ret;
2780}
2781EXPORT_SYMBOL_GPL(regmap_fields_read);
2782
2783/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002784 * regmap_bulk_read() - Read multiple registers from the device
Mark Brownb83a3132011-05-11 19:59:58 +02002785 *
Gerhard Sittig00933802013-11-11 10:42:36 +01002786 * @map: Register map to read from
Mark Brownb83a3132011-05-11 19:59:58 +02002787 * @reg: First register to be read from
2788 * @val: Pointer to store read value, in native register size for device
2789 * @val_count: Number of registers to read
2790 *
2791 * A value of zero will be returned on success, a negative errno will
2792 * be returned in error cases.
2793 */
2794int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2795 size_t val_count)
2796{
2797 int ret, i;
2798 size_t val_bytes = map->format.val_bytes;
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +01002799 bool vol = regmap_volatile_range(map, reg, val_count);
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01002800
Xiubo Lifcac0232015-12-16 17:45:32 +08002801 if (!IS_ALIGNED(reg, map->reg_stride))
Stephen Warrenf01ee602012-04-09 13:40:24 -06002802 return -EINVAL;
Charles Keepax186ba2e2018-02-15 17:52:18 +00002803 if (val_count == 0)
2804 return -EINVAL;
Mark Brownb83a3132011-05-11 19:59:58 +02002805
Stephen Boyd3b58ee12013-12-13 09:14:07 -08002806 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
Charles Keepax0645ba42018-02-15 17:52:16 +00002807 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2808 if (ret != 0)
2809 return ret;
Mark Brownde2d8082011-10-10 13:24:52 +01002810
2811 for (i = 0; i < val_count * val_bytes; i += val_bytes)
Mark Brown8a819ff2013-03-04 09:04:51 +08002812 map->format.parse_inplace(val + i);
Mark Brownde2d8082011-10-10 13:24:52 +01002813 } else {
Charles Keepax9ae27a82018-02-12 18:15:46 +00002814#ifdef CONFIG_64BIT
2815 u64 *u64 = val;
2816#endif
2817 u32 *u32 = val;
2818 u16 *u16 = val;
2819 u8 *u8 = val;
2820
Charles Keepax186ba2e2018-02-15 17:52:18 +00002821 map->lock(map->lock_arg);
2822
Mark Brownde2d8082011-10-10 13:24:52 +01002823 for (i = 0; i < val_count; i++) {
Laxman Dewangan6560ffd2012-05-09 17:43:12 +05302824 unsigned int ival;
Charles Keepax9ae27a82018-02-12 18:15:46 +00002825
Charles Keepax186ba2e2018-02-15 17:52:18 +00002826 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2827 &ival);
Mark Brownde2d8082011-10-10 13:24:52 +01002828 if (ret != 0)
Charles Keepax186ba2e2018-02-15 17:52:18 +00002829 goto out;
Mark Brownd5b98eb2015-08-28 20:04:53 +01002830
Charles Keepax9ae27a82018-02-12 18:15:46 +00002831 switch (map->format.val_bytes) {
Xiubo Li19c04782015-12-09 17:11:53 +08002832#ifdef CONFIG_64BIT
Charles Keepax9ae27a82018-02-12 18:15:46 +00002833 case 8:
2834 u64[i] = ival;
2835 break;
Xiubo Li19c04782015-12-09 17:11:53 +08002836#endif
Charles Keepax9ae27a82018-02-12 18:15:46 +00002837 case 4:
2838 u32[i] = ival;
2839 break;
2840 case 2:
2841 u16[i] = ival;
2842 break;
2843 case 1:
2844 u8[i] = ival;
2845 break;
2846 default:
Charles Keepax186ba2e2018-02-15 17:52:18 +00002847 ret = -EINVAL;
2848 goto out;
Mark Brownd5b98eb2015-08-28 20:04:53 +01002849 }
Mark Brownde2d8082011-10-10 13:24:52 +01002850 }
Charles Keepax186ba2e2018-02-15 17:52:18 +00002851
2852out:
2853 map->unlock(map->lock_arg);
Mark Brownde2d8082011-10-10 13:24:52 +01002854 }
Mark Brownb83a3132011-05-11 19:59:58 +02002855
Charles Keepax186ba2e2018-02-15 17:52:18 +00002856 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02002857}
2858EXPORT_SYMBOL_GPL(regmap_bulk_read);
2859
Mark Brown018690d2011-11-29 20:10:36 +00002860static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2861 unsigned int mask, unsigned int val,
Kuninori Morimoto7ff05892015-06-16 08:52:22 +00002862 bool *change, bool force_write)
Mark Brownb83a3132011-05-11 19:59:58 +02002863{
2864 int ret;
Mark Brownd91e8db22011-11-18 16:03:50 +00002865 unsigned int tmp, orig;
Mark Brownb83a3132011-05-11 19:59:58 +02002866
Jon Ringle77792b12015-10-01 12:38:07 -04002867 if (change)
2868 *change = false;
Mark Brownb83a3132011-05-11 19:59:58 +02002869
Jon Ringle77792b12015-10-01 12:38:07 -04002870 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2871 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2872 if (ret == 0 && change)
Xiubo Lie2f74dc2014-02-20 08:50:10 +08002873 *change = true;
Mark Brown018690d2011-11-29 20:10:36 +00002874 } else {
Jon Ringle77792b12015-10-01 12:38:07 -04002875 ret = _regmap_read(map, reg, &orig);
2876 if (ret != 0)
2877 return ret;
2878
2879 tmp = orig & ~mask;
2880 tmp |= val & mask;
2881
2882 if (force_write || (tmp != orig)) {
2883 ret = _regmap_write(map, reg, tmp);
2884 if (ret == 0 && change)
2885 *change = true;
2886 }
Mark Brown018690d2011-11-29 20:10:36 +00002887 }
Mark Brownb83a3132011-05-11 19:59:58 +02002888
Mark Brownb83a3132011-05-11 19:59:58 +02002889 return ret;
2890}
Mark Brown018690d2011-11-29 20:10:36 +00002891
2892/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002893 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
Mark Brown018690d2011-11-29 20:10:36 +00002894 *
2895 * @map: Register map to update
2896 * @reg: Register to update
2897 * @mask: Bitmask to change
2898 * @val: New value for bitmask
Kuninori Morimoto91d31b92016-02-15 05:22:18 +00002899 * @change: Boolean indicating if a write was done
2900 * @async: Boolean indicating asynchronously
2901 * @force: Boolean indicating use force update
Mark Brown018690d2011-11-29 20:10:36 +00002902 *
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002903 * Perform a read/modify/write cycle on a register map with change, async, force
2904 * options.
2905 *
2906 * If async is true:
2907 *
2908 * With most buses the read must be done synchronously so this is most useful
2909 * for devices with a cache which do not need to interact with the hardware to
2910 * determine the current register value.
Mark Brown915f4412013-10-09 13:30:10 +01002911 *
2912 * Returns zero for success, a negative number on error.
2913 */
Kuninori Morimoto91d31b92016-02-15 05:22:18 +00002914int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2915 unsigned int mask, unsigned int val,
2916 bool *change, bool async, bool force)
Mark Brown915f4412013-10-09 13:30:10 +01002917{
Mark Brown915f4412013-10-09 13:30:10 +01002918 int ret;
2919
2920 map->lock(map->lock_arg);
2921
Kuninori Morimoto91d31b92016-02-15 05:22:18 +00002922 map->async = async;
Mark Brown915f4412013-10-09 13:30:10 +01002923
Kuninori Morimoto91d31b92016-02-15 05:22:18 +00002924 ret = _regmap_update_bits(map, reg, mask, val, change, force);
Mark Brown915f4412013-10-09 13:30:10 +01002925
2926 map->async = false;
2927
2928 map->unlock(map->lock_arg);
2929
2930 return ret;
2931}
Kuninori Morimoto91d31b92016-02-15 05:22:18 +00002932EXPORT_SYMBOL_GPL(regmap_update_bits_base);
Mark Brown915f4412013-10-09 13:30:10 +01002933
Mark Brown0d509f22013-01-27 22:07:38 +08002934void regmap_async_complete_cb(struct regmap_async *async, int ret)
2935{
2936 struct regmap *map = async->map;
2937 bool wake;
2938
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002939 trace_regmap_async_io_complete(map);
Mark Brownfe7d4cc2013-02-21 19:05:48 +00002940
Mark Brown0d509f22013-01-27 22:07:38 +08002941 spin_lock(&map->async_lock);
Mark Brown7e09a972013-10-07 23:00:24 +01002942 list_move(&async->list, &map->async_free);
Mark Brown0d509f22013-01-27 22:07:38 +08002943 wake = list_empty(&map->async_list);
2944
2945 if (ret != 0)
2946 map->async_ret = ret;
2947
2948 spin_unlock(&map->async_lock);
2949
Mark Brown0d509f22013-01-27 22:07:38 +08002950 if (wake)
2951 wake_up(&map->async_waitq);
2952}
Axel Linf804fb52013-02-03 00:14:13 +08002953EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
Mark Brown0d509f22013-01-27 22:07:38 +08002954
2955static int regmap_async_is_done(struct regmap *map)
2956{
2957 unsigned long flags;
2958 int ret;
2959
2960 spin_lock_irqsave(&map->async_lock, flags);
2961 ret = list_empty(&map->async_list);
2962 spin_unlock_irqrestore(&map->async_lock, flags);
2963
2964 return ret;
2965}
2966
2967/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00002968 * regmap_async_complete - Ensure all asynchronous I/O has completed.
Mark Brown0d509f22013-01-27 22:07:38 +08002969 *
2970 * @map: Map to operate on.
2971 *
2972 * Blocks until any pending asynchronous I/O has completed. Returns
2973 * an error code for any failed I/O operations.
2974 */
2975int regmap_async_complete(struct regmap *map)
2976{
2977 unsigned long flags;
2978 int ret;
2979
2980 /* Nothing to do with no async support */
Daniel Mackf2e055e2013-07-04 13:11:03 +02002981 if (!map->bus || !map->bus->async_write)
Mark Brown0d509f22013-01-27 22:07:38 +08002982 return 0;
2983
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002984 trace_regmap_async_complete_start(map);
Mark Brownfe7d4cc2013-02-21 19:05:48 +00002985
Mark Brown0d509f22013-01-27 22:07:38 +08002986 wait_event(map->async_waitq, regmap_async_is_done(map));
2987
2988 spin_lock_irqsave(&map->async_lock, flags);
2989 ret = map->async_ret;
2990 map->async_ret = 0;
2991 spin_unlock_irqrestore(&map->async_lock, flags);
2992
Philipp Zabelc6b570d2015-03-09 12:20:13 +01002993 trace_regmap_async_complete_done(map);
Mark Brownfe7d4cc2013-02-21 19:05:48 +00002994
Mark Brown0d509f22013-01-27 22:07:38 +08002995 return ret;
2996}
Mark Brownf88948e2013-02-05 13:53:26 +00002997EXPORT_SYMBOL_GPL(regmap_async_complete);
Mark Brown0d509f22013-01-27 22:07:38 +08002998
Mark Brown22f0d902012-01-21 12:01:14 +00002999/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00003000 * regmap_register_patch - Register and apply register updates to be applied
3001 * on device initialistion
Mark Brown22f0d902012-01-21 12:01:14 +00003002 *
3003 * @map: Register map to apply updates to.
3004 * @regs: Values to update.
3005 * @num_regs: Number of entries in regs.
3006 *
3007 * Register a set of register updates to be applied to the device
3008 * whenever the device registers are synchronised with the cache and
3009 * apply them immediately. Typically this is used to apply
3010 * corrections to be applied to the device defaults on startup, such
3011 * as the updates some vendors provide to undocumented registers.
Mark Brown56fb1c72014-03-18 10:53:26 +00003012 *
3013 * The caller must ensure that this function cannot be called
3014 * concurrently with either itself or regcache_sync().
Mark Brown22f0d902012-01-21 12:01:14 +00003015 */
Nariman Poushin8019ff62015-07-16 16:36:21 +01003016int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
Mark Brown22f0d902012-01-21 12:01:14 +00003017 int num_regs)
3018{
Nariman Poushin8019ff62015-07-16 16:36:21 +01003019 struct reg_sequence *p;
Charles Keepax6bf13102014-02-25 13:45:51 +00003020 int ret;
Mark Brown22f0d902012-01-21 12:01:14 +00003021 bool bypass;
3022
Cai Zhiyongbd60e382013-11-18 20:21:49 +08003023 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3024 num_regs))
3025 return 0;
3026
Mark Brownaab13eb2013-07-11 12:41:44 +01003027 p = krealloc(map->patch,
Nariman Poushin8019ff62015-07-16 16:36:21 +01003028 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
Mark Brownaab13eb2013-07-11 12:41:44 +01003029 GFP_KERNEL);
3030 if (p) {
3031 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3032 map->patch = p;
3033 map->patch_regs += num_regs;
Mark Brown22f0d902012-01-21 12:01:14 +00003034 } else {
Mark Brown56fb1c72014-03-18 10:53:26 +00003035 return -ENOMEM;
Mark Brown22f0d902012-01-21 12:01:14 +00003036 }
3037
Mark Brown22f0d902012-01-21 12:01:14 +00003038 map->lock(map->lock_arg);
3039
3040 bypass = map->cache_bypass;
3041
3042 map->cache_bypass = true;
3043 map->async = true;
3044
Charles Keepax6bf13102014-02-25 13:45:51 +00003045 ret = _regmap_multi_reg_write(map, regs, num_regs);
Mark Brown22f0d902012-01-21 12:01:14 +00003046
Mark Brown1a25f262013-10-10 20:55:03 +01003047 map->async = false;
Mark Brown22f0d902012-01-21 12:01:14 +00003048 map->cache_bypass = bypass;
3049
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02003050 map->unlock(map->lock_arg);
Mark Brown22f0d902012-01-21 12:01:14 +00003051
Mark Brown1a25f262013-10-10 20:55:03 +01003052 regmap_async_complete(map);
3053
Mark Brown22f0d902012-01-21 12:01:14 +00003054 return ret;
3055}
3056EXPORT_SYMBOL_GPL(regmap_register_patch);
3057
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00003058/**
3059 * regmap_get_val_bytes() - Report the size of a register value
3060 *
3061 * @map: Register map to operate on.
Mark Browna6539c32012-02-17 14:20:14 -08003062 *
3063 * Report the size of a register value, mainly intended to for use by
3064 * generic infrastructure built on top of regmap.
3065 */
3066int regmap_get_val_bytes(struct regmap *map)
3067{
3068 if (map->format.format_write)
3069 return -EINVAL;
3070
3071 return map->format.val_bytes;
3072}
3073EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3074
Srinivas Kandagatla668abc72015-05-21 17:42:43 +01003075/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00003076 * regmap_get_max_register() - Report the max register value
3077 *
3078 * @map: Register map to operate on.
Srinivas Kandagatla668abc72015-05-21 17:42:43 +01003079 *
3080 * Report the max register value, mainly intended to for use by
3081 * generic infrastructure built on top of regmap.
3082 */
3083int regmap_get_max_register(struct regmap *map)
3084{
3085 return map->max_register ? map->max_register : -EINVAL;
3086}
3087EXPORT_SYMBOL_GPL(regmap_get_max_register);
3088
Srinivas Kandagatlaa2f776c2015-05-21 17:42:54 +01003089/**
Charles Keepax2cf8e2d2017-01-12 11:17:39 +00003090 * regmap_get_reg_stride() - Report the register address stride
3091 *
3092 * @map: Register map to operate on.
Srinivas Kandagatlaa2f776c2015-05-21 17:42:54 +01003093 *
3094 * Report the register address stride, mainly intended to for use by
3095 * generic infrastructure built on top of regmap.
3096 */
3097int regmap_get_reg_stride(struct regmap *map)
3098{
3099 return map->reg_stride;
3100}
3101EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3102
Nenghua Cao13ff50c2014-02-19 18:44:13 +08003103int regmap_parse_val(struct regmap *map, const void *buf,
3104 unsigned int *val)
3105{
3106 if (!map->format.parse_val)
3107 return -EINVAL;
3108
3109 *val = map->format.parse_val(buf);
3110
3111 return 0;
3112}
3113EXPORT_SYMBOL_GPL(regmap_parse_val);
3114
Mark Brown31244e32011-07-20 22:56:53 +01003115static int __init regmap_initcall(void)
3116{
3117 regmap_debugfs_initcall();
3118
3119 return 0;
3120}
3121postcore_initcall(regmap_initcall);