blob: 927eb5f6003f054834502656d006d6b4257f0068 [file] [log] [blame]
Bartosz Golaszewskib1c1db92018-09-21 06:40:20 -07001// SPDX-License-Identifier: GPL-2.0
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +01002/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +01007 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -070014#include <linux/kref.h>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010015#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
Khouloud Touil2a127da2020-01-07 10:29:19 +010018#include <linux/gpio/consumer.h>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010019#include <linux/of.h>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010020#include <linux/slab.h>
Srinivas Kandagatla84400302020-03-25 13:19:51 +000021
22struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 nvmem_reg_read_t reg_read;
38 nvmem_reg_write_t reg_write;
39 struct gpio_desc *wp_gpio;
40 void *priv;
41};
42
43#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
44
45#define FLAG_COMPAT BIT(0)
Andrew Lunnb6c217a2016-02-26 20:59:19 +010046
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010047struct nvmem_cell {
48 const char *name;
49 int offset;
50 int bytes;
51 int bit_offset;
52 int nbits;
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +000053 struct device_node *np;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010054 struct nvmem_device *nvmem;
55 struct list_head node;
56};
57
58static DEFINE_MUTEX(nvmem_mutex);
59static DEFINE_IDA(nvmem_ida);
60
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -070061static DEFINE_MUTEX(nvmem_cell_mutex);
62static LIST_HEAD(nvmem_cell_tables);
63
Bartosz Golaszewski506157b2018-09-21 06:40:17 -070064static DEFINE_MUTEX(nvmem_lookup_mutex);
65static LIST_HEAD(nvmem_lookup_list);
66
Bartosz Golaszewskibee11382018-09-21 06:40:19 -070067static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
68
Michael Auchterb96fc542020-05-11 15:50:41 +010069static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
70 void *val, size_t bytes)
71{
72 if (nvmem->reg_read)
73 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
74
75 return -EINVAL;
76}
77
78static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
79 void *val, size_t bytes)
80{
81 int ret;
82
83 if (nvmem->reg_write) {
84 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
85 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
86 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
87 return ret;
88 }
89
90 return -EINVAL;
91}
92
Srinivas Kandagatla84400302020-03-25 13:19:51 +000093#ifdef CONFIG_NVMEM_SYSFS
94static const char * const nvmem_type_str[] = {
95 [NVMEM_TYPE_UNKNOWN] = "Unknown",
96 [NVMEM_TYPE_EEPROM] = "EEPROM",
97 [NVMEM_TYPE_OTP] = "OTP",
98 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
99};
100
101#ifdef CONFIG_DEBUG_LOCK_ALLOC
102static struct lock_class_key eeprom_lock_key;
103#endif
104
105static ssize_t type_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct nvmem_device *nvmem = to_nvmem_device(dev);
109
110 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
111}
112
113static DEVICE_ATTR_RO(type);
114
115static struct attribute *nvmem_attrs[] = {
116 &dev_attr_type.attr,
117 NULL,
118};
119
120static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
121 struct bin_attribute *attr, char *buf,
122 loff_t pos, size_t count)
123{
124 struct device *dev;
125 struct nvmem_device *nvmem;
126 int rc;
127
128 if (attr->private)
129 dev = attr->private;
130 else
131 dev = container_of(kobj, struct device, kobj);
132 nvmem = to_nvmem_device(dev);
133
134 /* Stop the user from reading */
135 if (pos >= nvmem->size)
136 return 0;
137
138 if (count < nvmem->word_size)
139 return -EINVAL;
140
141 if (pos + count > nvmem->size)
142 count = nvmem->size - pos;
143
144 count = round_down(count, nvmem->word_size);
145
146 if (!nvmem->reg_read)
147 return -EPERM;
148
Michael Auchterb96fc542020-05-11 15:50:41 +0100149 rc = nvmem_reg_read(nvmem, pos, buf, count);
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000150
151 if (rc)
152 return rc;
153
154 return count;
155}
156
157static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
158 struct bin_attribute *attr, char *buf,
159 loff_t pos, size_t count)
160{
161 struct device *dev;
162 struct nvmem_device *nvmem;
163 int rc;
164
165 if (attr->private)
166 dev = attr->private;
167 else
168 dev = container_of(kobj, struct device, kobj);
169 nvmem = to_nvmem_device(dev);
170
171 /* Stop the user from writing */
172 if (pos >= nvmem->size)
173 return -EFBIG;
174
175 if (count < nvmem->word_size)
176 return -EINVAL;
177
178 if (pos + count > nvmem->size)
179 count = nvmem->size - pos;
180
181 count = round_down(count, nvmem->word_size);
182
183 if (!nvmem->reg_write)
184 return -EPERM;
185
Michael Auchterb96fc542020-05-11 15:50:41 +0100186 rc = nvmem_reg_write(nvmem, pos, buf, count);
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000187
188 if (rc)
189 return rc;
190
191 return count;
192}
193
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100194static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000195{
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000196 umode_t mode = 0400;
197
198 if (!nvmem->root_only)
199 mode |= 0044;
200
201 if (!nvmem->read_only)
202 mode |= 0200;
203
204 if (!nvmem->reg_write)
205 mode &= ~0200;
206
207 if (!nvmem->reg_read)
208 mode &= ~0444;
209
210 return mode;
211}
212
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100213static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
214 struct bin_attribute *attr, int i)
215{
216 struct device *dev = container_of(kobj, struct device, kobj);
217 struct nvmem_device *nvmem = to_nvmem_device(dev);
218
219 return nvmem_bin_attr_get_umode(nvmem);
220}
221
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000222/* default read/write permissions */
223static struct bin_attribute bin_attr_rw_nvmem = {
224 .attr = {
225 .name = "nvmem",
226 .mode = 0644,
227 },
228 .read = bin_attr_nvmem_read,
229 .write = bin_attr_nvmem_write,
230};
231
232static struct bin_attribute *nvmem_bin_attributes[] = {
233 &bin_attr_rw_nvmem,
234 NULL,
235};
236
237static const struct attribute_group nvmem_bin_group = {
238 .bin_attrs = nvmem_bin_attributes,
239 .attrs = nvmem_attrs,
240 .is_bin_visible = nvmem_bin_attr_is_visible,
241};
242
243static const struct attribute_group *nvmem_dev_groups[] = {
244 &nvmem_bin_group,
245 NULL,
246};
247
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100248static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000249 .attr = {
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100250 .name = "eeprom",
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000251 },
252 .read = bin_attr_nvmem_read,
253 .write = bin_attr_nvmem_write,
254};
255
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000256/*
257 * nvmem_setup_compat() - Create an additional binary entry in
258 * drivers sys directory, to be backwards compatible with the older
259 * drivers/misc/eeprom drivers.
260 */
261static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
262 const struct nvmem_config *config)
263{
264 int rval;
265
266 if (!config->compat)
267 return 0;
268
269 if (!config->base_dev)
270 return -EINVAL;
271
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100272 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
273 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000274 nvmem->eeprom.size = nvmem->size;
275#ifdef CONFIG_DEBUG_LOCK_ALLOC
276 nvmem->eeprom.attr.key = &eeprom_lock_key;
277#endif
278 nvmem->eeprom.private = &nvmem->dev;
279 nvmem->base_dev = config->base_dev;
280
281 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
282 if (rval) {
283 dev_err(&nvmem->dev,
284 "Failed to create eeprom binary file %d\n", rval);
285 return rval;
286 }
287
288 nvmem->flags |= FLAG_COMPAT;
289
290 return 0;
291}
292
293static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
294 const struct nvmem_config *config)
295{
296 if (config->compat)
297 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
298}
299
300#else /* CONFIG_NVMEM_SYSFS */
301
302static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
303 const struct nvmem_config *config)
304{
305 return -ENOSYS;
306}
307static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
308 const struct nvmem_config *config)
309{
310}
311
312#endif /* CONFIG_NVMEM_SYSFS */
Andy Shevchenkoa8b44d52018-11-30 11:53:24 +0000313
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100314static void nvmem_release(struct device *dev)
315{
316 struct nvmem_device *nvmem = to_nvmem_device(dev);
317
318 ida_simple_remove(&nvmem_ida, nvmem->id);
Khouloud Touila9c37662020-03-10 13:22:50 +0000319 gpiod_put(nvmem->wp_gpio);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100320 kfree(nvmem);
321}
322
323static const struct device_type nvmem_provider_type = {
324 .release = nvmem_release,
325};
326
327static struct bus_type nvmem_bus_type = {
328 .name = "nvmem",
329};
330
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100331static void nvmem_cell_drop(struct nvmem_cell *cell)
332{
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700333 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700334 mutex_lock(&nvmem_mutex);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100335 list_del(&cell->node);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700336 mutex_unlock(&nvmem_mutex);
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +0000337 of_node_put(cell->np);
Bitan Biswas16bb7ab2020-01-09 10:40:17 +0000338 kfree_const(cell->name);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100339 kfree(cell);
340}
341
342static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
343{
Bartosz Golaszewski18521832018-09-21 06:40:05 -0700344 struct nvmem_cell *cell, *p;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100345
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700346 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
347 nvmem_cell_drop(cell);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100348}
349
350static void nvmem_cell_add(struct nvmem_cell *cell)
351{
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700352 mutex_lock(&nvmem_mutex);
353 list_add_tail(&cell->node, &cell->nvmem->cells);
354 mutex_unlock(&nvmem_mutex);
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700355 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100356}
357
358static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
359 const struct nvmem_cell_info *info,
360 struct nvmem_cell *cell)
361{
362 cell->nvmem = nvmem;
363 cell->offset = info->offset;
364 cell->bytes = info->bytes;
Bitan Biswas16bb7ab2020-01-09 10:40:17 +0000365 cell->name = kstrdup_const(info->name, GFP_KERNEL);
366 if (!cell->name)
367 return -ENOMEM;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100368
369 cell->bit_offset = info->bit_offset;
370 cell->nbits = info->nbits;
371
372 if (cell->nbits)
373 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
374 BITS_PER_BYTE);
375
376 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
377 dev_err(&nvmem->dev,
378 "cell %s unaligned to nvmem stride %d\n",
379 cell->name, nvmem->stride);
380 return -EINVAL;
381 }
382
383 return 0;
384}
385
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100386/**
387 * nvmem_add_cells() - Add cell information to an nvmem device
388 *
389 * @nvmem: nvmem device to add cells to.
390 * @info: nvmem cell info to add to the device
391 * @ncells: number of cells in info
392 *
393 * Return: 0 or negative error code on failure.
394 */
Srinivas Kandagatlaef92ab32018-09-21 06:40:26 -0700395static int nvmem_add_cells(struct nvmem_device *nvmem,
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100396 const struct nvmem_cell_info *info,
397 int ncells)
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100398{
399 struct nvmem_cell **cells;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100400 int i, rval;
401
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100402 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100403 if (!cells)
404 return -ENOMEM;
405
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100406 for (i = 0; i < ncells; i++) {
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100407 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
408 if (!cells[i]) {
409 rval = -ENOMEM;
410 goto err;
411 }
412
413 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200414 if (rval) {
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100415 kfree(cells[i]);
416 goto err;
417 }
418
419 nvmem_cell_add(cells[i]);
420 }
421
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100422 /* remove tmp array */
423 kfree(cells);
424
425 return 0;
426err:
Rasmus Villemoesdfdf1412016-02-08 22:04:29 +0100427 while (i--)
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100428 nvmem_cell_drop(cells[i]);
429
Rasmus Villemoesdfdf1412016-02-08 22:04:29 +0100430 kfree(cells);
431
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100432 return rval;
433}
434
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700435/**
436 * nvmem_register_notifier() - Register a notifier block for nvmem events.
437 *
438 * @nb: notifier block to be called on nvmem events.
439 *
440 * Return: 0 on success, negative error number on failure.
441 */
442int nvmem_register_notifier(struct notifier_block *nb)
443{
444 return blocking_notifier_chain_register(&nvmem_notifier, nb);
445}
446EXPORT_SYMBOL_GPL(nvmem_register_notifier);
447
448/**
449 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
450 *
451 * @nb: notifier block to be unregistered.
452 *
453 * Return: 0 on success, negative error number on failure.
454 */
455int nvmem_unregister_notifier(struct notifier_block *nb)
456{
457 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
458}
459EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
460
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -0700461static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
462{
463 const struct nvmem_cell_info *info;
464 struct nvmem_cell_table *table;
465 struct nvmem_cell *cell;
466 int rval = 0, i;
467
468 mutex_lock(&nvmem_cell_mutex);
469 list_for_each_entry(table, &nvmem_cell_tables, node) {
470 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
471 for (i = 0; i < table->ncells; i++) {
472 info = &table->cells[i];
473
474 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
475 if (!cell) {
476 rval = -ENOMEM;
477 goto out;
478 }
479
480 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
481 info,
482 cell);
483 if (rval) {
484 kfree(cell);
485 goto out;
486 }
487
488 nvmem_cell_add(cell);
489 }
490 }
491 }
492
493out:
494 mutex_unlock(&nvmem_cell_mutex);
495 return rval;
496}
497
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700498static struct nvmem_cell *
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700499nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
500{
Alban Bedel1c832672019-01-28 15:55:02 +0000501 struct nvmem_cell *iter, *cell = NULL;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700502
503 mutex_lock(&nvmem_mutex);
Alban Bedel1c832672019-01-28 15:55:02 +0000504 list_for_each_entry(iter, &nvmem->cells, node) {
505 if (strcmp(cell_id, iter->name) == 0) {
506 cell = iter;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700507 break;
Alban Bedel1c832672019-01-28 15:55:02 +0000508 }
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700509 }
510 mutex_unlock(&nvmem_mutex);
511
512 return cell;
513}
514
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700515static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
516{
517 struct device_node *parent, *child;
518 struct device *dev = &nvmem->dev;
519 struct nvmem_cell *cell;
520 const __be32 *addr;
521 int len;
522
523 parent = dev->of_node;
524
525 for_each_child_of_node(parent, child) {
526 addr = of_get_property(child, "reg", &len);
527 if (!addr || (len < 2 * sizeof(u32))) {
528 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
529 return -EINVAL;
530 }
531
532 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
533 if (!cell)
534 return -ENOMEM;
535
536 cell->nvmem = nvmem;
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +0000537 cell->np = of_node_get(child);
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700538 cell->offset = be32_to_cpup(addr++);
539 cell->bytes = be32_to_cpup(addr);
Rob Herringbadcdff2018-10-03 18:47:04 +0100540 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700541
542 addr = of_get_property(child, "bits", &len);
543 if (addr && len == (2 * sizeof(u32))) {
544 cell->bit_offset = be32_to_cpup(addr++);
545 cell->nbits = be32_to_cpup(addr);
546 }
547
548 if (cell->nbits)
549 cell->bytes = DIV_ROUND_UP(
550 cell->nbits + cell->bit_offset,
551 BITS_PER_BYTE);
552
553 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
554 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
555 cell->name, nvmem->stride);
556 /* Cells already added will be freed later. */
Bitan Biswas16bb7ab2020-01-09 10:40:17 +0000557 kfree_const(cell->name);
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700558 kfree(cell);
559 return -EINVAL;
560 }
561
562 nvmem_cell_add(cell);
563 }
564
565 return 0;
566}
567
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100568/**
569 * nvmem_register() - Register a nvmem device for given nvmem_config.
570 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
571 *
572 * @config: nvmem device configuration with which nvmem device is created.
573 *
574 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
575 * on success.
576 */
577
578struct nvmem_device *nvmem_register(const struct nvmem_config *config)
579{
580 struct nvmem_device *nvmem;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100581 int rval;
582
583 if (!config->dev)
584 return ERR_PTR(-EINVAL);
585
Srinivas Kandagatla061a3202020-03-10 13:22:51 +0000586 if (!config->reg_read && !config->reg_write)
587 return ERR_PTR(-EINVAL);
588
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100589 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
590 if (!nvmem)
591 return ERR_PTR(-ENOMEM);
592
593 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
594 if (rval < 0) {
595 kfree(nvmem);
596 return ERR_PTR(rval);
597 }
Bartosz Golaszewski31c6ff52020-03-10 13:22:48 +0000598
Khouloud Touil2a127da2020-01-07 10:29:19 +0100599 if (config->wp_gpio)
600 nvmem->wp_gpio = config->wp_gpio;
601 else
602 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
603 GPIOD_OUT_HIGH);
Bartosz Golaszewskif7d8d7d2020-03-10 13:22:49 +0000604 if (IS_ERR(nvmem->wp_gpio)) {
605 ida_simple_remove(&nvmem_ida, nvmem->id);
606 rval = PTR_ERR(nvmem->wp_gpio);
607 kfree(nvmem);
608 return ERR_PTR(rval);
609 }
Khouloud Touil2a127da2020-01-07 10:29:19 +0100610
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700611 kref_init(&nvmem->refcnt);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700612 INIT_LIST_HEAD(&nvmem->cells);
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700613
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100614 nvmem->id = rval;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100615 nvmem->owner = config->owner;
Masahiro Yamada17eb18d2017-10-21 01:57:42 +0900616 if (!nvmem->owner && config->dev->driver)
617 nvmem->owner = config->dev->driver->owner;
Heiner Kallweit99897ef2017-12-15 14:06:05 +0000618 nvmem->stride = config->stride ?: 1;
619 nvmem->word_size = config->word_size ?: 1;
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +0100620 nvmem->size = config->size;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100621 nvmem->dev.type = &nvmem_provider_type;
622 nvmem->dev.bus = &nvmem_bus_type;
623 nvmem->dev.parent = config->dev;
Srinivas Kandagatlae6de1792020-03-25 12:21:15 +0000624 nvmem->root_only = config->root_only;
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +0100625 nvmem->priv = config->priv;
Alexandre Belloni16688452018-11-30 11:53:20 +0000626 nvmem->type = config->type;
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +0100627 nvmem->reg_read = config->reg_read;
628 nvmem->reg_write = config->reg_write;
Bartosz Golaszewski517f14d2018-11-30 11:53:25 +0000629 if (!config->no_of_node)
630 nvmem->dev.of_node = config->dev->of_node;
Andrey Smirnovfd0f4902018-03-09 14:46:56 +0000631
632 if (config->id == -1 && config->name) {
633 dev_set_name(&nvmem->dev, "%s", config->name);
634 } else {
635 dev_set_name(&nvmem->dev, "%s%d",
636 config->name ? : "nvmem",
637 config->name ? config->id : nvmem->id);
638 }
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100639
Alban Bedel1716cfe2019-01-28 15:55:00 +0000640 nvmem->read_only = device_property_present(config->dev, "read-only") ||
641 config->read_only || !nvmem->reg_write;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100642
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000643#ifdef CONFIG_NVMEM_SYSFS
644 nvmem->dev.groups = nvmem_dev_groups;
645#endif
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100646
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100647 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
648
Srinivas Kandagatlaf60442d2020-03-24 17:15:58 +0000649 rval = device_register(&nvmem->dev);
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100650 if (rval)
Johan Hovold3360acd2017-06-09 10:59:07 +0100651 goto err_put_device;
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100652
653 if (config->compat) {
Srinivas Kandagatlaae0c2d72019-04-16 10:59:24 +0100654 rval = nvmem_sysfs_setup_compat(nvmem, config);
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100655 if (rval)
Johan Hovold3360acd2017-06-09 10:59:07 +0100656 goto err_device_del;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100657 }
658
Bartosz Golaszewskifa72d842018-09-21 06:40:07 -0700659 if (config->cells) {
660 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
661 if (rval)
662 goto err_teardown_compat;
663 }
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100664
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -0700665 rval = nvmem_add_cells_from_table(nvmem);
666 if (rval)
667 goto err_remove_cells;
668
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700669 rval = nvmem_add_cells_from_of(nvmem);
670 if (rval)
671 goto err_remove_cells;
672
Bartosz Golaszewskif4853e12019-02-15 11:42:59 +0100673 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700674
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100675 return nvmem;
Johan Hovold3360acd2017-06-09 10:59:07 +0100676
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -0700677err_remove_cells:
678 nvmem_device_remove_all_cells(nvmem);
Bartosz Golaszewskifa72d842018-09-21 06:40:07 -0700679err_teardown_compat:
680 if (config->compat)
Srinivas Kandagatlaae0c2d72019-04-16 10:59:24 +0100681 nvmem_sysfs_remove_compat(nvmem, config);
Johan Hovold3360acd2017-06-09 10:59:07 +0100682err_device_del:
683 device_del(&nvmem->dev);
684err_put_device:
685 put_device(&nvmem->dev);
686
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100687 return ERR_PTR(rval);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100688}
689EXPORT_SYMBOL_GPL(nvmem_register);
690
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700691static void nvmem_device_release(struct kref *kref)
692{
693 struct nvmem_device *nvmem;
694
695 nvmem = container_of(kref, struct nvmem_device, refcnt);
696
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700697 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
698
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700699 if (nvmem->flags & FLAG_COMPAT)
700 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
701
702 nvmem_device_remove_all_cells(nvmem);
Srinivas Kandagatlaf60442d2020-03-24 17:15:58 +0000703 device_unregister(&nvmem->dev);
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700704}
705
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100706/**
707 * nvmem_unregister() - Unregister previously registered nvmem device
708 *
709 * @nvmem: Pointer to previously registered nvmem device.
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100710 */
Bartosz Golaszewskibf58e882018-09-21 06:40:13 -0700711void nvmem_unregister(struct nvmem_device *nvmem)
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100712{
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700713 kref_put(&nvmem->refcnt, nvmem_device_release);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100714}
715EXPORT_SYMBOL_GPL(nvmem_unregister);
716
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000717static void devm_nvmem_release(struct device *dev, void *res)
718{
Bartosz Golaszewskibf58e882018-09-21 06:40:13 -0700719 nvmem_unregister(*(struct nvmem_device **)res);
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000720}
721
722/**
723 * devm_nvmem_register() - Register a managed nvmem device for given
724 * nvmem_config.
725 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
726 *
Srinivas Kandagatlab378c772018-05-11 12:07:02 +0100727 * @dev: Device that uses the nvmem device.
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000728 * @config: nvmem device configuration with which nvmem device is created.
729 *
730 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
731 * on success.
732 */
733struct nvmem_device *devm_nvmem_register(struct device *dev,
734 const struct nvmem_config *config)
735{
736 struct nvmem_device **ptr, *nvmem;
737
738 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
739 if (!ptr)
740 return ERR_PTR(-ENOMEM);
741
742 nvmem = nvmem_register(config);
743
744 if (!IS_ERR(nvmem)) {
745 *ptr = nvmem;
746 devres_add(dev, ptr);
747 } else {
748 devres_free(ptr);
749 }
750
751 return nvmem;
752}
753EXPORT_SYMBOL_GPL(devm_nvmem_register);
754
755static int devm_nvmem_match(struct device *dev, void *res, void *data)
756{
757 struct nvmem_device **r = res;
758
759 return *r == data;
760}
761
762/**
763 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
764 * device.
765 *
Srinivas Kandagatlab378c772018-05-11 12:07:02 +0100766 * @dev: Device that uses the nvmem device.
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000767 * @nvmem: Pointer to previously registered nvmem device.
768 *
769 * Return: Will be an negative on error or a zero on success.
770 */
771int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
772{
773 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
774}
775EXPORT_SYMBOL(devm_nvmem_unregister);
776
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200777static struct nvmem_device *__nvmem_device_get(void *data,
778 int (*match)(struct device *dev, const void *data))
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100779{
780 struct nvmem_device *nvmem = NULL;
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200781 struct device *dev;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100782
783 mutex_lock(&nvmem_mutex);
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200784 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
785 if (dev)
786 nvmem = to_nvmem_device(dev);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100787 mutex_unlock(&nvmem_mutex);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700788 if (!nvmem)
789 return ERR_PTR(-EPROBE_DEFER);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100790
791 if (!try_module_get(nvmem->owner)) {
792 dev_err(&nvmem->dev,
793 "could not increase module refcount for cell %s\n",
Bartosz Golaszewski5db652c2018-09-21 06:40:04 -0700794 nvmem_dev_name(nvmem));
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100795
Alban Bedel73e9dc42019-01-28 15:55:05 +0000796 put_device(&nvmem->dev);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100797 return ERR_PTR(-EINVAL);
798 }
799
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700800 kref_get(&nvmem->refcnt);
801
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100802 return nvmem;
803}
804
805static void __nvmem_device_put(struct nvmem_device *nvmem)
806{
Alban Bedel73e9dc42019-01-28 15:55:05 +0000807 put_device(&nvmem->dev);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100808 module_put(nvmem->owner);
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700809 kref_put(&nvmem->refcnt, nvmem_device_release);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100810}
811
Masahiro Yamadae701c672017-09-11 11:00:14 +0200812#if IS_ENABLED(CONFIG_OF)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100813/**
814 * of_nvmem_device_get() - Get nvmem device from a given id
815 *
Vivek Gautam29143262017-01-22 23:02:39 +0000816 * @np: Device tree node that uses the nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100817 * @id: nvmem name from nvmem-names property.
818 *
819 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
820 * on success.
821 */
822struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
823{
824
825 struct device_node *nvmem_np;
Alban Bedeld4e7fef2019-01-28 15:55:03 +0000826 int index = 0;
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100827
Alban Bedeld4e7fef2019-01-28 15:55:03 +0000828 if (id)
829 index = of_property_match_string(np, "nvmem-names", id);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100830
831 nvmem_np = of_parse_phandle(np, "nvmem", index);
832 if (!nvmem_np)
Alban Bedeld4e7fef2019-01-28 15:55:03 +0000833 return ERR_PTR(-ENOENT);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100834
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200835 return __nvmem_device_get(nvmem_np, device_match_of_node);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100836}
837EXPORT_SYMBOL_GPL(of_nvmem_device_get);
838#endif
839
840/**
841 * nvmem_device_get() - Get nvmem device from a given id
842 *
Vivek Gautam29143262017-01-22 23:02:39 +0000843 * @dev: Device that uses the nvmem device.
844 * @dev_name: name of the requested nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100845 *
846 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
847 * on success.
848 */
849struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
850{
851 if (dev->of_node) { /* try dt first */
852 struct nvmem_device *nvmem;
853
854 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
855
856 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
857 return nvmem;
858
859 }
860
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200861 return __nvmem_device_get((void *)dev_name, device_match_name);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100862}
863EXPORT_SYMBOL_GPL(nvmem_device_get);
864
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200865/**
866 * nvmem_device_find() - Find nvmem device with matching function
867 *
868 * @data: Data to pass to match function
869 * @match: Callback function to check device
870 *
871 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
872 * on success.
873 */
874struct nvmem_device *nvmem_device_find(void *data,
875 int (*match)(struct device *dev, const void *data))
876{
877 return __nvmem_device_get(data, match);
878}
879EXPORT_SYMBOL_GPL(nvmem_device_find);
880
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100881static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
882{
883 struct nvmem_device **nvmem = res;
884
885 if (WARN_ON(!nvmem || !*nvmem))
886 return 0;
887
888 return *nvmem == data;
889}
890
891static void devm_nvmem_device_release(struct device *dev, void *res)
892{
893 nvmem_device_put(*(struct nvmem_device **)res);
894}
895
896/**
897 * devm_nvmem_device_put() - put alredy got nvmem device
898 *
Vivek Gautam29143262017-01-22 23:02:39 +0000899 * @dev: Device that uses the nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100900 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
901 * that needs to be released.
902 */
903void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
904{
905 int ret;
906
907 ret = devres_release(dev, devm_nvmem_device_release,
908 devm_nvmem_device_match, nvmem);
909
910 WARN_ON(ret);
911}
912EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
913
914/**
915 * nvmem_device_put() - put alredy got nvmem device
916 *
917 * @nvmem: pointer to nvmem device that needs to be released.
918 */
919void nvmem_device_put(struct nvmem_device *nvmem)
920{
921 __nvmem_device_put(nvmem);
922}
923EXPORT_SYMBOL_GPL(nvmem_device_put);
924
925/**
926 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
927 *
Vivek Gautam29143262017-01-22 23:02:39 +0000928 * @dev: Device that requests the nvmem device.
929 * @id: name id for the requested nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100930 *
931 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
932 * on success. The nvmem_cell will be freed by the automatically once the
933 * device is freed.
934 */
935struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
936{
937 struct nvmem_device **ptr, *nvmem;
938
939 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
940 if (!ptr)
941 return ERR_PTR(-ENOMEM);
942
943 nvmem = nvmem_device_get(dev, id);
944 if (!IS_ERR(nvmem)) {
945 *ptr = nvmem;
946 devres_add(dev, ptr);
947 } else {
948 devres_free(ptr);
949 }
950
951 return nvmem;
952}
953EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
954
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700955static struct nvmem_cell *
956nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100957{
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700958 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
959 struct nvmem_cell_lookup *lookup;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100960 struct nvmem_device *nvmem;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700961 const char *dev_id;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100962
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700963 if (!dev)
964 return ERR_PTR(-EINVAL);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100965
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700966 dev_id = dev_name(dev);
967
968 mutex_lock(&nvmem_lookup_mutex);
969
970 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
971 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
972 (strcmp(lookup->con_id, con_id) == 0)) {
973 /* This is the right entry. */
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200974 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
975 device_match_name);
Bartosz Golaszewskicccb3b12018-10-03 09:31:11 +0200976 if (IS_ERR(nvmem)) {
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700977 /* Provider may not be registered yet. */
Bartosz Golaszewskicccb3b12018-10-03 09:31:11 +0200978 cell = ERR_CAST(nvmem);
Alban Bedel9bfd8192019-01-28 15:55:06 +0000979 break;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700980 }
981
982 cell = nvmem_find_cell_by_name(nvmem,
983 lookup->cell_name);
984 if (!cell) {
985 __nvmem_device_put(nvmem);
Bartosz Golaszewskicccb3b12018-10-03 09:31:11 +0200986 cell = ERR_PTR(-ENOENT);
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700987 }
Alban Bedel9bfd8192019-01-28 15:55:06 +0000988 break;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700989 }
990 }
991
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700992 mutex_unlock(&nvmem_lookup_mutex);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100993 return cell;
994}
995
Masahiro Yamadae701c672017-09-11 11:00:14 +0200996#if IS_ENABLED(CONFIG_OF)
Arnd Bergmann3c53e232018-10-02 23:11:12 +0200997static struct nvmem_cell *
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +0000998nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
Arnd Bergmann3c53e232018-10-02 23:11:12 +0200999{
Alban Bedel1c832672019-01-28 15:55:02 +00001000 struct nvmem_cell *iter, *cell = NULL;
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001001
1002 mutex_lock(&nvmem_mutex);
Alban Bedel1c832672019-01-28 15:55:02 +00001003 list_for_each_entry(iter, &nvmem->cells, node) {
1004 if (np == iter->np) {
1005 cell = iter;
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001006 break;
Alban Bedel1c832672019-01-28 15:55:02 +00001007 }
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001008 }
1009 mutex_unlock(&nvmem_mutex);
1010
1011 return cell;
1012}
1013
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001014/**
1015 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1016 *
Vivek Gautam29143262017-01-22 23:02:39 +00001017 * @np: Device tree node that uses the nvmem cell.
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001018 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1019 * for the cell at index 0 (the lone cell with no accompanying
1020 * nvmem-cell-names property).
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001021 *
1022 * Return: Will be an ERR_PTR() on error or a valid pointer
1023 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1024 * nvmem_cell_put().
1025 */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001026struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001027{
1028 struct device_node *cell_np, *nvmem_np;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001029 struct nvmem_device *nvmem;
Bartosz Golaszewskie888d442018-09-21 06:40:16 -07001030 struct nvmem_cell *cell;
Vivek Gautamfd0c4782017-01-22 23:02:40 +00001031 int index = 0;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001032
Vivek Gautamfd0c4782017-01-22 23:02:40 +00001033 /* if cell name exists, find index to the name */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001034 if (id)
1035 index = of_property_match_string(np, "nvmem-cell-names", id);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001036
1037 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1038 if (!cell_np)
Alban Bedel5087cc12019-01-28 15:55:01 +00001039 return ERR_PTR(-ENOENT);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001040
1041 nvmem_np = of_get_next_parent(cell_np);
1042 if (!nvmem_np)
1043 return ERR_PTR(-EINVAL);
1044
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +02001045 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
Masahiro Yamadaaad8d092017-09-11 11:00:12 +02001046 of_node_put(nvmem_np);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001047 if (IS_ERR(nvmem))
1048 return ERR_CAST(nvmem);
1049
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +00001050 cell = nvmem_find_cell_by_node(nvmem, cell_np);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001051 if (!cell) {
Bartosz Golaszewskie888d442018-09-21 06:40:16 -07001052 __nvmem_device_put(nvmem);
1053 return ERR_PTR(-ENOENT);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001054 }
1055
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001056 return cell;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001057}
1058EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1059#endif
1060
1061/**
1062 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1063 *
Vivek Gautam29143262017-01-22 23:02:39 +00001064 * @dev: Device that requests the nvmem cell.
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001065 * @id: nvmem cell name to get (this corresponds with the name from the
1066 * nvmem-cell-names property for DT systems and with the con_id from
1067 * the lookup entry for non-DT systems).
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001068 *
1069 * Return: Will be an ERR_PTR() on error or a valid pointer
1070 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1071 * nvmem_cell_put().
1072 */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001073struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001074{
1075 struct nvmem_cell *cell;
1076
1077 if (dev->of_node) { /* try dt first */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001078 cell = of_nvmem_cell_get(dev->of_node, id);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001079 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1080 return cell;
1081 }
1082
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001083 /* NULL cell id only allowed for device tree; invalid otherwise */
1084 if (!id)
Douglas Anderson87ed1402018-06-18 18:30:43 +01001085 return ERR_PTR(-EINVAL);
1086
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001087 return nvmem_cell_get_from_lookup(dev, id);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001088}
1089EXPORT_SYMBOL_GPL(nvmem_cell_get);
1090
1091static void devm_nvmem_cell_release(struct device *dev, void *res)
1092{
1093 nvmem_cell_put(*(struct nvmem_cell **)res);
1094}
1095
1096/**
1097 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1098 *
Vivek Gautam29143262017-01-22 23:02:39 +00001099 * @dev: Device that requests the nvmem cell.
1100 * @id: nvmem cell name id to get.
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001101 *
1102 * Return: Will be an ERR_PTR() on error or a valid pointer
1103 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1104 * automatically once the device is freed.
1105 */
1106struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1107{
1108 struct nvmem_cell **ptr, *cell;
1109
1110 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1111 if (!ptr)
1112 return ERR_PTR(-ENOMEM);
1113
1114 cell = nvmem_cell_get(dev, id);
1115 if (!IS_ERR(cell)) {
1116 *ptr = cell;
1117 devres_add(dev, ptr);
1118 } else {
1119 devres_free(ptr);
1120 }
1121
1122 return cell;
1123}
1124EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1125
1126static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1127{
1128 struct nvmem_cell **c = res;
1129
1130 if (WARN_ON(!c || !*c))
1131 return 0;
1132
1133 return *c == data;
1134}
1135
1136/**
1137 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1138 * from devm_nvmem_cell_get.
1139 *
Vivek Gautam29143262017-01-22 23:02:39 +00001140 * @dev: Device that requests the nvmem cell.
1141 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001142 */
1143void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1144{
1145 int ret;
1146
1147 ret = devres_release(dev, devm_nvmem_cell_release,
1148 devm_nvmem_cell_match, cell);
1149
1150 WARN_ON(ret);
1151}
1152EXPORT_SYMBOL(devm_nvmem_cell_put);
1153
1154/**
1155 * nvmem_cell_put() - Release previously allocated nvmem cell.
1156 *
Vivek Gautam29143262017-01-22 23:02:39 +00001157 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001158 */
1159void nvmem_cell_put(struct nvmem_cell *cell)
1160{
1161 struct nvmem_device *nvmem = cell->nvmem;
1162
1163 __nvmem_device_put(nvmem);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001164}
1165EXPORT_SYMBOL_GPL(nvmem_cell_put);
1166
Masahiro Yamadaf7c04f12017-09-11 11:00:13 +02001167static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001168{
1169 u8 *p, *b;
Jorge Ramirez-Ortiz2fe518fe2019-04-13 11:32:58 +01001170 int i, extra, bit_offset = cell->bit_offset;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001171
1172 p = b = buf;
1173 if (bit_offset) {
1174 /* First shift */
1175 *b++ >>= bit_offset;
1176
1177 /* setup rest of the bytes if any */
1178 for (i = 1; i < cell->bytes; i++) {
1179 /* Get bits from next byte and shift them towards msb */
1180 *p |= *b << (BITS_PER_BYTE - bit_offset);
1181
1182 p = b;
1183 *b++ >>= bit_offset;
1184 }
Jorge Ramirez-Ortiz2fe518fe2019-04-13 11:32:58 +01001185 } else {
1186 /* point to the msb */
1187 p += cell->bytes - 1;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001188 }
Jorge Ramirez-Ortiz2fe518fe2019-04-13 11:32:58 +01001189
1190 /* result fits in less bytes */
1191 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1192 while (--extra >= 0)
1193 *p-- = 0;
1194
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001195 /* clear msb bits if any leftover in the last byte */
1196 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1197}
1198
1199static int __nvmem_cell_read(struct nvmem_device *nvmem,
1200 struct nvmem_cell *cell,
1201 void *buf, size_t *len)
1202{
1203 int rc;
1204
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001205 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001206
Arnd Bergmann287980e2016-05-27 23:23:25 +02001207 if (rc)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001208 return rc;
1209
1210 /* shift bits in-place */
Axel Lincbf854a2015-09-30 13:35:15 +01001211 if (cell->bit_offset || cell->nbits)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001212 nvmem_shift_read_buffer_in_place(cell, buf);
1213
Vivek Gautam3b4a6872017-01-22 23:02:38 +00001214 if (len)
1215 *len = cell->bytes;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001216
1217 return 0;
1218}
1219
1220/**
1221 * nvmem_cell_read() - Read a given nvmem cell
1222 *
1223 * @cell: nvmem cell to be read.
Vivek Gautam3b4a6872017-01-22 23:02:38 +00001224 * @len: pointer to length of cell which will be populated on successful read;
1225 * can be NULL.
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001226 *
Brian Norrisb577faf2017-01-04 16:18:11 +00001227 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1228 * buffer should be freed by the consumer with a kfree().
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001229 */
1230void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1231{
1232 struct nvmem_device *nvmem = cell->nvmem;
1233 u8 *buf;
1234 int rc;
1235
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001236 if (!nvmem)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001237 return ERR_PTR(-EINVAL);
1238
1239 buf = kzalloc(cell->bytes, GFP_KERNEL);
1240 if (!buf)
1241 return ERR_PTR(-ENOMEM);
1242
1243 rc = __nvmem_cell_read(nvmem, cell, buf, len);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001244 if (rc) {
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001245 kfree(buf);
1246 return ERR_PTR(rc);
1247 }
1248
1249 return buf;
1250}
1251EXPORT_SYMBOL_GPL(nvmem_cell_read);
1252
Masahiro Yamadaf7c04f12017-09-11 11:00:13 +02001253static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1254 u8 *_buf, int len)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001255{
1256 struct nvmem_device *nvmem = cell->nvmem;
1257 int i, rc, nbits, bit_offset = cell->bit_offset;
1258 u8 v, *p, *buf, *b, pbyte, pbits;
1259
1260 nbits = cell->nbits;
1261 buf = kzalloc(cell->bytes, GFP_KERNEL);
1262 if (!buf)
1263 return ERR_PTR(-ENOMEM);
1264
1265 memcpy(buf, _buf, len);
1266 p = b = buf;
1267
1268 if (bit_offset) {
1269 pbyte = *b;
1270 *b <<= bit_offset;
1271
1272 /* setup the first byte with lsb bits from nvmem */
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001273 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
Mathieu Malaterre50808bf2018-05-11 12:07:03 +01001274 if (rc)
1275 goto err;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001276 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1277
1278 /* setup rest of the byte if any */
1279 for (i = 1; i < cell->bytes; i++) {
1280 /* Get last byte bits and shift them towards lsb */
1281 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1282 pbyte = *b;
1283 p = b;
1284 *b <<= bit_offset;
1285 *b++ |= pbits;
1286 }
1287 }
1288
1289 /* if it's not end on byte boundary */
1290 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1291 /* setup the last byte with msb bits from nvmem */
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001292 rc = nvmem_reg_read(nvmem,
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001293 cell->offset + cell->bytes - 1, &v, 1);
Mathieu Malaterre50808bf2018-05-11 12:07:03 +01001294 if (rc)
1295 goto err;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001296 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1297
1298 }
1299
1300 return buf;
Mathieu Malaterre50808bf2018-05-11 12:07:03 +01001301err:
1302 kfree(buf);
1303 return ERR_PTR(rc);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001304}
1305
1306/**
1307 * nvmem_cell_write() - Write to a given nvmem cell
1308 *
1309 * @cell: nvmem cell to be written.
1310 * @buf: Buffer to be written.
1311 * @len: length of buffer to be written to nvmem cell.
1312 *
1313 * Return: length of bytes written or negative on failure.
1314 */
1315int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1316{
1317 struct nvmem_device *nvmem = cell->nvmem;
1318 int rc;
1319
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001320 if (!nvmem || nvmem->read_only ||
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001321 (cell->bit_offset == 0 && len != cell->bytes))
1322 return -EINVAL;
1323
1324 if (cell->bit_offset || cell->nbits) {
1325 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1326 if (IS_ERR(buf))
1327 return PTR_ERR(buf);
1328 }
1329
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001330 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001331
1332 /* free the tmp buffer */
Axel Linace22172015-09-30 13:36:10 +01001333 if (cell->bit_offset || cell->nbits)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001334 kfree(buf);
1335
Arnd Bergmann287980e2016-05-27 23:23:25 +02001336 if (rc)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001337 return rc;
1338
1339 return len;
1340}
1341EXPORT_SYMBOL_GPL(nvmem_cell_write);
1342
Yangtao Li6bb317c2020-03-10 13:22:45 +00001343static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1344 void *val, size_t count)
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001345{
1346 struct nvmem_cell *cell;
1347 void *buf;
1348 size_t len;
1349
1350 cell = nvmem_cell_get(dev, cell_id);
1351 if (IS_ERR(cell))
1352 return PTR_ERR(cell);
1353
1354 buf = nvmem_cell_read(cell, &len);
1355 if (IS_ERR(buf)) {
1356 nvmem_cell_put(cell);
1357 return PTR_ERR(buf);
1358 }
Yangtao Li6bb317c2020-03-10 13:22:45 +00001359 if (len != count) {
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001360 kfree(buf);
1361 nvmem_cell_put(cell);
1362 return -EINVAL;
1363 }
Yangtao Li6bb317c2020-03-10 13:22:45 +00001364 memcpy(val, buf, count);
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001365 kfree(buf);
1366 nvmem_cell_put(cell);
1367
1368 return 0;
1369}
Yangtao Li6bb317c2020-03-10 13:22:45 +00001370
1371/**
1372 * nvmem_cell_read_u16() - Read a cell value as an u16
1373 *
1374 * @dev: Device that requests the nvmem cell.
1375 * @cell_id: Name of nvmem cell to read.
1376 * @val: pointer to output value.
1377 *
1378 * Return: 0 on success or negative errno.
1379 */
1380int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1381{
1382 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1383}
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001384EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1385
1386/**
Leonard Crestezd026d702017-07-26 11:34:46 +02001387 * nvmem_cell_read_u32() - Read a cell value as an u32
1388 *
1389 * @dev: Device that requests the nvmem cell.
1390 * @cell_id: Name of nvmem cell to read.
1391 * @val: pointer to output value.
1392 *
1393 * Return: 0 on success or negative errno.
1394 */
1395int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1396{
Yangtao Li6bb317c2020-03-10 13:22:45 +00001397 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
Leonard Crestezd026d702017-07-26 11:34:46 +02001398}
1399EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1400
1401/**
Yangtao Li8b977c52020-03-10 13:22:46 +00001402 * nvmem_cell_read_u64() - Read a cell value as an u64
1403 *
1404 * @dev: Device that requests the nvmem cell.
1405 * @cell_id: Name of nvmem cell to read.
1406 * @val: pointer to output value.
1407 *
1408 * Return: 0 on success or negative errno.
1409 */
1410int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1411{
1412 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1413}
1414EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1415
1416/**
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001417 * nvmem_device_cell_read() - Read a given nvmem device and cell
1418 *
1419 * @nvmem: nvmem device to read from.
1420 * @info: nvmem cell info to be read.
1421 * @buf: buffer pointer which will be populated on successful read.
1422 *
1423 * Return: length of successful bytes read on success and negative
1424 * error code on error.
1425 */
1426ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1427 struct nvmem_cell_info *info, void *buf)
1428{
1429 struct nvmem_cell cell;
1430 int rc;
1431 ssize_t len;
1432
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001433 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001434 return -EINVAL;
1435
1436 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001437 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001438 return rc;
1439
1440 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001441 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001442 return rc;
1443
1444 return len;
1445}
1446EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1447
1448/**
1449 * nvmem_device_cell_write() - Write cell to a given nvmem device
1450 *
1451 * @nvmem: nvmem device to be written to.
Vivek Gautam29143262017-01-22 23:02:39 +00001452 * @info: nvmem cell info to be written.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001453 * @buf: buffer to be written to cell.
1454 *
1455 * Return: length of bytes written or negative error code on failure.
Bartosz Golaszewski48f63a22018-09-21 06:40:23 -07001456 */
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001457int nvmem_device_cell_write(struct nvmem_device *nvmem,
1458 struct nvmem_cell_info *info, void *buf)
1459{
1460 struct nvmem_cell cell;
1461 int rc;
1462
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001463 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001464 return -EINVAL;
1465
1466 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001467 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001468 return rc;
1469
1470 return nvmem_cell_write(&cell, buf, cell.bytes);
1471}
1472EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1473
1474/**
1475 * nvmem_device_read() - Read from a given nvmem device
1476 *
1477 * @nvmem: nvmem device to read from.
1478 * @offset: offset in nvmem device.
1479 * @bytes: number of bytes to read.
1480 * @buf: buffer pointer which will be populated on successful read.
1481 *
1482 * Return: length of successful bytes read on success and negative
1483 * error code on error.
1484 */
1485int nvmem_device_read(struct nvmem_device *nvmem,
1486 unsigned int offset,
1487 size_t bytes, void *buf)
1488{
1489 int rc;
1490
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001491 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001492 return -EINVAL;
1493
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001494 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001495
Arnd Bergmann287980e2016-05-27 23:23:25 +02001496 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001497 return rc;
1498
1499 return bytes;
1500}
1501EXPORT_SYMBOL_GPL(nvmem_device_read);
1502
1503/**
1504 * nvmem_device_write() - Write cell to a given nvmem device
1505 *
1506 * @nvmem: nvmem device to be written to.
1507 * @offset: offset in nvmem device.
1508 * @bytes: number of bytes to write.
1509 * @buf: buffer to be written.
1510 *
1511 * Return: length of bytes written or negative error code on failure.
Bartosz Golaszewski48f63a22018-09-21 06:40:23 -07001512 */
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001513int nvmem_device_write(struct nvmem_device *nvmem,
1514 unsigned int offset,
1515 size_t bytes, void *buf)
1516{
1517 int rc;
1518
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001519 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001520 return -EINVAL;
1521
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001522 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001523
Arnd Bergmann287980e2016-05-27 23:23:25 +02001524 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001525 return rc;
1526
1527
1528 return bytes;
1529}
1530EXPORT_SYMBOL_GPL(nvmem_device_write);
1531
Bartosz Golaszewskid7b9fd12018-09-21 06:40:03 -07001532/**
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -07001533 * nvmem_add_cell_table() - register a table of cell info entries
1534 *
1535 * @table: table of cell info entries
1536 */
1537void nvmem_add_cell_table(struct nvmem_cell_table *table)
1538{
1539 mutex_lock(&nvmem_cell_mutex);
1540 list_add_tail(&table->node, &nvmem_cell_tables);
1541 mutex_unlock(&nvmem_cell_mutex);
1542}
1543EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1544
1545/**
1546 * nvmem_del_cell_table() - remove a previously registered cell info table
1547 *
1548 * @table: table of cell info entries
1549 */
1550void nvmem_del_cell_table(struct nvmem_cell_table *table)
1551{
1552 mutex_lock(&nvmem_cell_mutex);
1553 list_del(&table->node);
1554 mutex_unlock(&nvmem_cell_mutex);
1555}
1556EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1557
1558/**
Bartosz Golaszewski506157b2018-09-21 06:40:17 -07001559 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1560 *
1561 * @entries: array of cell lookup entries
1562 * @nentries: number of cell lookup entries in the array
1563 */
1564void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1565{
1566 int i;
1567
1568 mutex_lock(&nvmem_lookup_mutex);
1569 for (i = 0; i < nentries; i++)
1570 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1571 mutex_unlock(&nvmem_lookup_mutex);
1572}
1573EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1574
1575/**
1576 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1577 * entries
1578 *
1579 * @entries: array of cell lookup entries
1580 * @nentries: number of cell lookup entries in the array
1581 */
1582void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1583{
1584 int i;
1585
1586 mutex_lock(&nvmem_lookup_mutex);
1587 for (i = 0; i < nentries; i++)
1588 list_del(&entries[i].node);
1589 mutex_unlock(&nvmem_lookup_mutex);
1590}
1591EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1592
1593/**
Bartosz Golaszewskid7b9fd12018-09-21 06:40:03 -07001594 * nvmem_dev_name() - Get the name of a given nvmem device.
1595 *
1596 * @nvmem: nvmem device.
1597 *
1598 * Return: name of the nvmem device.
1599 */
1600const char *nvmem_dev_name(struct nvmem_device *nvmem)
1601{
1602 return dev_name(&nvmem->dev);
1603}
1604EXPORT_SYMBOL_GPL(nvmem_dev_name);
1605
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +01001606static int __init nvmem_init(void)
1607{
1608 return bus_register(&nvmem_bus_type);
1609}
1610
1611static void __exit nvmem_exit(void)
1612{
1613 bus_unregister(&nvmem_bus_type);
1614}
1615
1616subsys_initcall(nvmem_init);
1617module_exit(nvmem_exit);
1618
1619MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1620MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1621MODULE_DESCRIPTION("nvmem Driver Core");
1622MODULE_LICENSE("GPL v2");