blob: 95bed31391cd1c50057c7e4ac8f6017c551557d2 [file] [log] [blame]
Bartosz Golaszewskib1c1db92018-09-21 06:40:20 -07001// SPDX-License-Identifier: GPL-2.0
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +01002/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +01007 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -070014#include <linux/kref.h>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010015#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
Khouloud Touil2a127da2020-01-07 10:29:19 +010018#include <linux/gpio/consumer.h>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010019#include <linux/of.h>
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010020#include <linux/slab.h>
Srinivas Kandagatla84400302020-03-25 13:19:51 +000021
22struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 nvmem_reg_read_t reg_read;
38 nvmem_reg_write_t reg_write;
39 struct gpio_desc *wp_gpio;
40 void *priv;
41};
42
43#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
44
45#define FLAG_COMPAT BIT(0)
Andrew Lunnb6c217a2016-02-26 20:59:19 +010046
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010047struct nvmem_cell {
48 const char *name;
49 int offset;
50 int bytes;
51 int bit_offset;
52 int nbits;
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +000053 struct device_node *np;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +010054 struct nvmem_device *nvmem;
55 struct list_head node;
56};
57
58static DEFINE_MUTEX(nvmem_mutex);
59static DEFINE_IDA(nvmem_ida);
60
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -070061static DEFINE_MUTEX(nvmem_cell_mutex);
62static LIST_HEAD(nvmem_cell_tables);
63
Bartosz Golaszewski506157b2018-09-21 06:40:17 -070064static DEFINE_MUTEX(nvmem_lookup_mutex);
65static LIST_HEAD(nvmem_lookup_list);
66
Bartosz Golaszewskibee11382018-09-21 06:40:19 -070067static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
68
Michael Auchterb96fc542020-05-11 15:50:41 +010069static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
70 void *val, size_t bytes)
71{
72 if (nvmem->reg_read)
73 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
74
75 return -EINVAL;
76}
77
78static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
79 void *val, size_t bytes)
80{
81 int ret;
82
83 if (nvmem->reg_write) {
84 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
85 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
86 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
87 return ret;
88 }
89
90 return -EINVAL;
91}
92
Srinivas Kandagatla84400302020-03-25 13:19:51 +000093#ifdef CONFIG_NVMEM_SYSFS
94static const char * const nvmem_type_str[] = {
95 [NVMEM_TYPE_UNKNOWN] = "Unknown",
96 [NVMEM_TYPE_EEPROM] = "EEPROM",
97 [NVMEM_TYPE_OTP] = "OTP",
98 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
99};
100
101#ifdef CONFIG_DEBUG_LOCK_ALLOC
102static struct lock_class_key eeprom_lock_key;
103#endif
104
105static ssize_t type_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct nvmem_device *nvmem = to_nvmem_device(dev);
109
110 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
111}
112
113static DEVICE_ATTR_RO(type);
114
115static struct attribute *nvmem_attrs[] = {
116 &dev_attr_type.attr,
117 NULL,
118};
119
120static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
121 struct bin_attribute *attr, char *buf,
122 loff_t pos, size_t count)
123{
124 struct device *dev;
125 struct nvmem_device *nvmem;
126 int rc;
127
128 if (attr->private)
129 dev = attr->private;
130 else
131 dev = container_of(kobj, struct device, kobj);
132 nvmem = to_nvmem_device(dev);
133
134 /* Stop the user from reading */
135 if (pos >= nvmem->size)
136 return 0;
137
Douglas Anderson83566712020-07-22 11:06:54 +0100138 if (!IS_ALIGNED(pos, nvmem->stride))
139 return -EINVAL;
140
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000141 if (count < nvmem->word_size)
142 return -EINVAL;
143
144 if (pos + count > nvmem->size)
145 count = nvmem->size - pos;
146
147 count = round_down(count, nvmem->word_size);
148
149 if (!nvmem->reg_read)
150 return -EPERM;
151
Michael Auchterb96fc542020-05-11 15:50:41 +0100152 rc = nvmem_reg_read(nvmem, pos, buf, count);
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000153
154 if (rc)
155 return rc;
156
157 return count;
158}
159
160static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
161 struct bin_attribute *attr, char *buf,
162 loff_t pos, size_t count)
163{
164 struct device *dev;
165 struct nvmem_device *nvmem;
166 int rc;
167
168 if (attr->private)
169 dev = attr->private;
170 else
171 dev = container_of(kobj, struct device, kobj);
172 nvmem = to_nvmem_device(dev);
173
174 /* Stop the user from writing */
175 if (pos >= nvmem->size)
176 return -EFBIG;
177
Douglas Anderson83566712020-07-22 11:06:54 +0100178 if (!IS_ALIGNED(pos, nvmem->stride))
179 return -EINVAL;
180
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000181 if (count < nvmem->word_size)
182 return -EINVAL;
183
184 if (pos + count > nvmem->size)
185 count = nvmem->size - pos;
186
187 count = round_down(count, nvmem->word_size);
188
189 if (!nvmem->reg_write)
190 return -EPERM;
191
Michael Auchterb96fc542020-05-11 15:50:41 +0100192 rc = nvmem_reg_write(nvmem, pos, buf, count);
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000193
194 if (rc)
195 return rc;
196
197 return count;
198}
199
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100200static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000201{
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000202 umode_t mode = 0400;
203
204 if (!nvmem->root_only)
205 mode |= 0044;
206
207 if (!nvmem->read_only)
208 mode |= 0200;
209
210 if (!nvmem->reg_write)
211 mode &= ~0200;
212
213 if (!nvmem->reg_read)
214 mode &= ~0444;
215
216 return mode;
217}
218
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100219static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
220 struct bin_attribute *attr, int i)
221{
222 struct device *dev = container_of(kobj, struct device, kobj);
223 struct nvmem_device *nvmem = to_nvmem_device(dev);
224
225 return nvmem_bin_attr_get_umode(nvmem);
226}
227
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000228/* default read/write permissions */
229static struct bin_attribute bin_attr_rw_nvmem = {
230 .attr = {
231 .name = "nvmem",
232 .mode = 0644,
233 },
234 .read = bin_attr_nvmem_read,
235 .write = bin_attr_nvmem_write,
236};
237
238static struct bin_attribute *nvmem_bin_attributes[] = {
239 &bin_attr_rw_nvmem,
240 NULL,
241};
242
243static const struct attribute_group nvmem_bin_group = {
244 .bin_attrs = nvmem_bin_attributes,
245 .attrs = nvmem_attrs,
246 .is_bin_visible = nvmem_bin_attr_is_visible,
247};
248
249static const struct attribute_group *nvmem_dev_groups[] = {
250 &nvmem_bin_group,
251 NULL,
252};
253
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100254static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000255 .attr = {
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100256 .name = "eeprom",
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000257 },
258 .read = bin_attr_nvmem_read,
259 .write = bin_attr_nvmem_write,
260};
261
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000262/*
263 * nvmem_setup_compat() - Create an additional binary entry in
264 * drivers sys directory, to be backwards compatible with the older
265 * drivers/misc/eeprom drivers.
266 */
267static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
268 const struct nvmem_config *config)
269{
270 int rval;
271
272 if (!config->compat)
273 return 0;
274
275 if (!config->base_dev)
276 return -EINVAL;
277
Srinivas Kandagatla2a4542e2020-04-17 13:13:06 +0100278 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
279 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000280 nvmem->eeprom.size = nvmem->size;
281#ifdef CONFIG_DEBUG_LOCK_ALLOC
282 nvmem->eeprom.attr.key = &eeprom_lock_key;
283#endif
284 nvmem->eeprom.private = &nvmem->dev;
285 nvmem->base_dev = config->base_dev;
286
287 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
288 if (rval) {
289 dev_err(&nvmem->dev,
290 "Failed to create eeprom binary file %d\n", rval);
291 return rval;
292 }
293
294 nvmem->flags |= FLAG_COMPAT;
295
296 return 0;
297}
298
299static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
300 const struct nvmem_config *config)
301{
302 if (config->compat)
303 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
304}
305
306#else /* CONFIG_NVMEM_SYSFS */
307
308static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
309 const struct nvmem_config *config)
310{
311 return -ENOSYS;
312}
313static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
314 const struct nvmem_config *config)
315{
316}
317
318#endif /* CONFIG_NVMEM_SYSFS */
Andy Shevchenkoa8b44d52018-11-30 11:53:24 +0000319
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100320static void nvmem_release(struct device *dev)
321{
322 struct nvmem_device *nvmem = to_nvmem_device(dev);
323
324 ida_simple_remove(&nvmem_ida, nvmem->id);
Khouloud Touila9c37662020-03-10 13:22:50 +0000325 gpiod_put(nvmem->wp_gpio);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100326 kfree(nvmem);
327}
328
329static const struct device_type nvmem_provider_type = {
330 .release = nvmem_release,
331};
332
333static struct bus_type nvmem_bus_type = {
334 .name = "nvmem",
335};
336
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100337static void nvmem_cell_drop(struct nvmem_cell *cell)
338{
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700339 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700340 mutex_lock(&nvmem_mutex);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100341 list_del(&cell->node);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700342 mutex_unlock(&nvmem_mutex);
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +0000343 of_node_put(cell->np);
Bitan Biswas16bb7ab2020-01-09 10:40:17 +0000344 kfree_const(cell->name);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100345 kfree(cell);
346}
347
348static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
349{
Bartosz Golaszewski18521832018-09-21 06:40:05 -0700350 struct nvmem_cell *cell, *p;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100351
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700352 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
353 nvmem_cell_drop(cell);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100354}
355
356static void nvmem_cell_add(struct nvmem_cell *cell)
357{
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700358 mutex_lock(&nvmem_mutex);
359 list_add_tail(&cell->node, &cell->nvmem->cells);
360 mutex_unlock(&nvmem_mutex);
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700361 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100362}
363
364static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
365 const struct nvmem_cell_info *info,
366 struct nvmem_cell *cell)
367{
368 cell->nvmem = nvmem;
369 cell->offset = info->offset;
370 cell->bytes = info->bytes;
Bitan Biswas16bb7ab2020-01-09 10:40:17 +0000371 cell->name = kstrdup_const(info->name, GFP_KERNEL);
372 if (!cell->name)
373 return -ENOMEM;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100374
375 cell->bit_offset = info->bit_offset;
376 cell->nbits = info->nbits;
377
378 if (cell->nbits)
379 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
380 BITS_PER_BYTE);
381
382 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
383 dev_err(&nvmem->dev,
384 "cell %s unaligned to nvmem stride %d\n",
385 cell->name, nvmem->stride);
386 return -EINVAL;
387 }
388
389 return 0;
390}
391
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100392/**
393 * nvmem_add_cells() - Add cell information to an nvmem device
394 *
395 * @nvmem: nvmem device to add cells to.
396 * @info: nvmem cell info to add to the device
397 * @ncells: number of cells in info
398 *
399 * Return: 0 or negative error code on failure.
400 */
Srinivas Kandagatlaef92ab32018-09-21 06:40:26 -0700401static int nvmem_add_cells(struct nvmem_device *nvmem,
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100402 const struct nvmem_cell_info *info,
403 int ncells)
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100404{
405 struct nvmem_cell **cells;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100406 int i, rval;
407
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100408 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100409 if (!cells)
410 return -ENOMEM;
411
Andrew Lunnb3db17e2018-05-11 12:06:56 +0100412 for (i = 0; i < ncells; i++) {
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100413 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
414 if (!cells[i]) {
415 rval = -ENOMEM;
416 goto err;
417 }
418
419 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200420 if (rval) {
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100421 kfree(cells[i]);
422 goto err;
423 }
424
425 nvmem_cell_add(cells[i]);
426 }
427
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100428 /* remove tmp array */
429 kfree(cells);
430
431 return 0;
432err:
Rasmus Villemoesdfdf1412016-02-08 22:04:29 +0100433 while (i--)
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100434 nvmem_cell_drop(cells[i]);
435
Rasmus Villemoesdfdf1412016-02-08 22:04:29 +0100436 kfree(cells);
437
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100438 return rval;
439}
440
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700441/**
442 * nvmem_register_notifier() - Register a notifier block for nvmem events.
443 *
444 * @nb: notifier block to be called on nvmem events.
445 *
446 * Return: 0 on success, negative error number on failure.
447 */
448int nvmem_register_notifier(struct notifier_block *nb)
449{
450 return blocking_notifier_chain_register(&nvmem_notifier, nb);
451}
452EXPORT_SYMBOL_GPL(nvmem_register_notifier);
453
454/**
455 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
456 *
457 * @nb: notifier block to be unregistered.
458 *
459 * Return: 0 on success, negative error number on failure.
460 */
461int nvmem_unregister_notifier(struct notifier_block *nb)
462{
463 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
464}
465EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
466
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -0700467static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
468{
469 const struct nvmem_cell_info *info;
470 struct nvmem_cell_table *table;
471 struct nvmem_cell *cell;
472 int rval = 0, i;
473
474 mutex_lock(&nvmem_cell_mutex);
475 list_for_each_entry(table, &nvmem_cell_tables, node) {
476 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
477 for (i = 0; i < table->ncells; i++) {
478 info = &table->cells[i];
479
480 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
481 if (!cell) {
482 rval = -ENOMEM;
483 goto out;
484 }
485
486 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
487 info,
488 cell);
489 if (rval) {
490 kfree(cell);
491 goto out;
492 }
493
494 nvmem_cell_add(cell);
495 }
496 }
497 }
498
499out:
500 mutex_unlock(&nvmem_cell_mutex);
501 return rval;
502}
503
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700504static struct nvmem_cell *
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700505nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
506{
Alban Bedel1c832672019-01-28 15:55:02 +0000507 struct nvmem_cell *iter, *cell = NULL;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700508
509 mutex_lock(&nvmem_mutex);
Alban Bedel1c832672019-01-28 15:55:02 +0000510 list_for_each_entry(iter, &nvmem->cells, node) {
511 if (strcmp(cell_id, iter->name) == 0) {
512 cell = iter;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700513 break;
Alban Bedel1c832672019-01-28 15:55:02 +0000514 }
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700515 }
516 mutex_unlock(&nvmem_mutex);
517
518 return cell;
519}
520
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700521static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
522{
523 struct device_node *parent, *child;
524 struct device *dev = &nvmem->dev;
525 struct nvmem_cell *cell;
526 const __be32 *addr;
527 int len;
528
529 parent = dev->of_node;
530
531 for_each_child_of_node(parent, child) {
532 addr = of_get_property(child, "reg", &len);
533 if (!addr || (len < 2 * sizeof(u32))) {
534 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
535 return -EINVAL;
536 }
537
538 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
539 if (!cell)
540 return -ENOMEM;
541
542 cell->nvmem = nvmem;
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +0000543 cell->np = of_node_get(child);
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700544 cell->offset = be32_to_cpup(addr++);
545 cell->bytes = be32_to_cpup(addr);
Rob Herringbadcdff2018-10-03 18:47:04 +0100546 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700547
548 addr = of_get_property(child, "bits", &len);
549 if (addr && len == (2 * sizeof(u32))) {
550 cell->bit_offset = be32_to_cpup(addr++);
551 cell->nbits = be32_to_cpup(addr);
552 }
553
554 if (cell->nbits)
555 cell->bytes = DIV_ROUND_UP(
556 cell->nbits + cell->bit_offset,
557 BITS_PER_BYTE);
558
559 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
560 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
561 cell->name, nvmem->stride);
562 /* Cells already added will be freed later. */
Bitan Biswas16bb7ab2020-01-09 10:40:17 +0000563 kfree_const(cell->name);
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700564 kfree(cell);
565 return -EINVAL;
566 }
567
568 nvmem_cell_add(cell);
569 }
570
571 return 0;
572}
573
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100574/**
575 * nvmem_register() - Register a nvmem device for given nvmem_config.
Andreas Färber3a758072020-07-22 11:06:56 +0100576 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100577 *
578 * @config: nvmem device configuration with which nvmem device is created.
579 *
580 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
581 * on success.
582 */
583
584struct nvmem_device *nvmem_register(const struct nvmem_config *config)
585{
586 struct nvmem_device *nvmem;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100587 int rval;
588
589 if (!config->dev)
590 return ERR_PTR(-EINVAL);
591
Srinivas Kandagatla061a3202020-03-10 13:22:51 +0000592 if (!config->reg_read && !config->reg_write)
593 return ERR_PTR(-EINVAL);
594
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100595 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
596 if (!nvmem)
597 return ERR_PTR(-ENOMEM);
598
599 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
600 if (rval < 0) {
601 kfree(nvmem);
602 return ERR_PTR(rval);
603 }
Bartosz Golaszewski31c6ff52020-03-10 13:22:48 +0000604
Khouloud Touil2a127da2020-01-07 10:29:19 +0100605 if (config->wp_gpio)
606 nvmem->wp_gpio = config->wp_gpio;
607 else
608 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
609 GPIOD_OUT_HIGH);
Bartosz Golaszewskif7d8d7d2020-03-10 13:22:49 +0000610 if (IS_ERR(nvmem->wp_gpio)) {
611 ida_simple_remove(&nvmem_ida, nvmem->id);
612 rval = PTR_ERR(nvmem->wp_gpio);
613 kfree(nvmem);
614 return ERR_PTR(rval);
615 }
Khouloud Touil2a127da2020-01-07 10:29:19 +0100616
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700617 kref_init(&nvmem->refcnt);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700618 INIT_LIST_HEAD(&nvmem->cells);
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700619
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100620 nvmem->id = rval;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100621 nvmem->owner = config->owner;
Masahiro Yamada17eb18d2017-10-21 01:57:42 +0900622 if (!nvmem->owner && config->dev->driver)
623 nvmem->owner = config->dev->driver->owner;
Heiner Kallweit99897ef2017-12-15 14:06:05 +0000624 nvmem->stride = config->stride ?: 1;
625 nvmem->word_size = config->word_size ?: 1;
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +0100626 nvmem->size = config->size;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100627 nvmem->dev.type = &nvmem_provider_type;
628 nvmem->dev.bus = &nvmem_bus_type;
629 nvmem->dev.parent = config->dev;
Srinivas Kandagatlae6de1792020-03-25 12:21:15 +0000630 nvmem->root_only = config->root_only;
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +0100631 nvmem->priv = config->priv;
Alexandre Belloni16688452018-11-30 11:53:20 +0000632 nvmem->type = config->type;
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +0100633 nvmem->reg_read = config->reg_read;
634 nvmem->reg_write = config->reg_write;
Bartosz Golaszewski517f14d2018-11-30 11:53:25 +0000635 if (!config->no_of_node)
636 nvmem->dev.of_node = config->dev->of_node;
Andrey Smirnovfd0f4902018-03-09 14:46:56 +0000637
638 if (config->id == -1 && config->name) {
639 dev_set_name(&nvmem->dev, "%s", config->name);
640 } else {
641 dev_set_name(&nvmem->dev, "%s%d",
642 config->name ? : "nvmem",
643 config->name ? config->id : nvmem->id);
644 }
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100645
Alban Bedel1716cfe2019-01-28 15:55:00 +0000646 nvmem->read_only = device_property_present(config->dev, "read-only") ||
647 config->read_only || !nvmem->reg_write;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100648
Srinivas Kandagatla84400302020-03-25 13:19:51 +0000649#ifdef CONFIG_NVMEM_SYSFS
650 nvmem->dev.groups = nvmem_dev_groups;
651#endif
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100652
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100653 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
654
Srinivas Kandagatlaf60442d2020-03-24 17:15:58 +0000655 rval = device_register(&nvmem->dev);
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100656 if (rval)
Johan Hovold3360acd2017-06-09 10:59:07 +0100657 goto err_put_device;
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100658
659 if (config->compat) {
Srinivas Kandagatlaae0c2d72019-04-16 10:59:24 +0100660 rval = nvmem_sysfs_setup_compat(nvmem, config);
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100661 if (rval)
Johan Hovold3360acd2017-06-09 10:59:07 +0100662 goto err_device_del;
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100663 }
664
Bartosz Golaszewskifa72d842018-09-21 06:40:07 -0700665 if (config->cells) {
666 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
667 if (rval)
668 goto err_teardown_compat;
669 }
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100670
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -0700671 rval = nvmem_add_cells_from_table(nvmem);
672 if (rval)
673 goto err_remove_cells;
674
Bartosz Golaszewskie888d442018-09-21 06:40:16 -0700675 rval = nvmem_add_cells_from_of(nvmem);
676 if (rval)
677 goto err_remove_cells;
678
Bartosz Golaszewskif4853e12019-02-15 11:42:59 +0100679 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700680
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100681 return nvmem;
Johan Hovold3360acd2017-06-09 10:59:07 +0100682
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -0700683err_remove_cells:
684 nvmem_device_remove_all_cells(nvmem);
Bartosz Golaszewskifa72d842018-09-21 06:40:07 -0700685err_teardown_compat:
686 if (config->compat)
Srinivas Kandagatlaae0c2d72019-04-16 10:59:24 +0100687 nvmem_sysfs_remove_compat(nvmem, config);
Johan Hovold3360acd2017-06-09 10:59:07 +0100688err_device_del:
689 device_del(&nvmem->dev);
690err_put_device:
691 put_device(&nvmem->dev);
692
Andrew Lunnb6c217a2016-02-26 20:59:19 +0100693 return ERR_PTR(rval);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100694}
695EXPORT_SYMBOL_GPL(nvmem_register);
696
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700697static void nvmem_device_release(struct kref *kref)
698{
699 struct nvmem_device *nvmem;
700
701 nvmem = container_of(kref, struct nvmem_device, refcnt);
702
Bartosz Golaszewskibee11382018-09-21 06:40:19 -0700703 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
704
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700705 if (nvmem->flags & FLAG_COMPAT)
706 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
707
708 nvmem_device_remove_all_cells(nvmem);
Srinivas Kandagatlaf60442d2020-03-24 17:15:58 +0000709 device_unregister(&nvmem->dev);
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700710}
711
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100712/**
713 * nvmem_unregister() - Unregister previously registered nvmem device
714 *
715 * @nvmem: Pointer to previously registered nvmem device.
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100716 */
Bartosz Golaszewskibf58e882018-09-21 06:40:13 -0700717void nvmem_unregister(struct nvmem_device *nvmem)
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100718{
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700719 kref_put(&nvmem->refcnt, nvmem_device_release);
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +0100720}
721EXPORT_SYMBOL_GPL(nvmem_unregister);
722
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000723static void devm_nvmem_release(struct device *dev, void *res)
724{
Bartosz Golaszewskibf58e882018-09-21 06:40:13 -0700725 nvmem_unregister(*(struct nvmem_device **)res);
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000726}
727
728/**
729 * devm_nvmem_register() - Register a managed nvmem device for given
730 * nvmem_config.
Andreas Färber3a758072020-07-22 11:06:56 +0100731 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000732 *
Srinivas Kandagatlab378c772018-05-11 12:07:02 +0100733 * @dev: Device that uses the nvmem device.
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000734 * @config: nvmem device configuration with which nvmem device is created.
735 *
736 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
737 * on success.
738 */
739struct nvmem_device *devm_nvmem_register(struct device *dev,
740 const struct nvmem_config *config)
741{
742 struct nvmem_device **ptr, *nvmem;
743
744 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
745 if (!ptr)
746 return ERR_PTR(-ENOMEM);
747
748 nvmem = nvmem_register(config);
749
750 if (!IS_ERR(nvmem)) {
751 *ptr = nvmem;
752 devres_add(dev, ptr);
753 } else {
754 devres_free(ptr);
755 }
756
757 return nvmem;
758}
759EXPORT_SYMBOL_GPL(devm_nvmem_register);
760
761static int devm_nvmem_match(struct device *dev, void *res, void *data)
762{
763 struct nvmem_device **r = res;
764
765 return *r == data;
766}
767
768/**
769 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
770 * device.
771 *
Srinivas Kandagatlab378c772018-05-11 12:07:02 +0100772 * @dev: Device that uses the nvmem device.
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000773 * @nvmem: Pointer to previously registered nvmem device.
774 *
Andreas Färber3a758072020-07-22 11:06:56 +0100775 * Return: Will be negative on error or zero on success.
Andrey Smirnovf1f50ec2018-03-09 14:46:57 +0000776 */
777int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
778{
779 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
780}
781EXPORT_SYMBOL(devm_nvmem_unregister);
782
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200783static struct nvmem_device *__nvmem_device_get(void *data,
784 int (*match)(struct device *dev, const void *data))
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100785{
786 struct nvmem_device *nvmem = NULL;
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200787 struct device *dev;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100788
789 mutex_lock(&nvmem_mutex);
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200790 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
791 if (dev)
792 nvmem = to_nvmem_device(dev);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100793 mutex_unlock(&nvmem_mutex);
Bartosz Golaszewskic7235ee2018-09-21 06:40:14 -0700794 if (!nvmem)
795 return ERR_PTR(-EPROBE_DEFER);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100796
797 if (!try_module_get(nvmem->owner)) {
798 dev_err(&nvmem->dev,
799 "could not increase module refcount for cell %s\n",
Bartosz Golaszewski5db652c2018-09-21 06:40:04 -0700800 nvmem_dev_name(nvmem));
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100801
Alban Bedel73e9dc42019-01-28 15:55:05 +0000802 put_device(&nvmem->dev);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100803 return ERR_PTR(-EINVAL);
804 }
805
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700806 kref_get(&nvmem->refcnt);
807
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100808 return nvmem;
809}
810
811static void __nvmem_device_put(struct nvmem_device *nvmem)
812{
Alban Bedel73e9dc42019-01-28 15:55:05 +0000813 put_device(&nvmem->dev);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100814 module_put(nvmem->owner);
Bartosz Golaszewskic1de7f42018-09-21 06:40:08 -0700815 kref_put(&nvmem->refcnt, nvmem_device_release);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100816}
817
Masahiro Yamadae701c672017-09-11 11:00:14 +0200818#if IS_ENABLED(CONFIG_OF)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100819/**
820 * of_nvmem_device_get() - Get nvmem device from a given id
821 *
Vivek Gautam29143262017-01-22 23:02:39 +0000822 * @np: Device tree node that uses the nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100823 * @id: nvmem name from nvmem-names property.
824 *
825 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
826 * on success.
827 */
828struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
829{
830
831 struct device_node *nvmem_np;
Alban Bedeld4e7fef2019-01-28 15:55:03 +0000832 int index = 0;
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100833
Alban Bedeld4e7fef2019-01-28 15:55:03 +0000834 if (id)
835 index = of_property_match_string(np, "nvmem-names", id);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100836
837 nvmem_np = of_parse_phandle(np, "nvmem", index);
838 if (!nvmem_np)
Alban Bedeld4e7fef2019-01-28 15:55:03 +0000839 return ERR_PTR(-ENOENT);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100840
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200841 return __nvmem_device_get(nvmem_np, device_match_of_node);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100842}
843EXPORT_SYMBOL_GPL(of_nvmem_device_get);
844#endif
845
846/**
847 * nvmem_device_get() - Get nvmem device from a given id
848 *
Vivek Gautam29143262017-01-22 23:02:39 +0000849 * @dev: Device that uses the nvmem device.
850 * @dev_name: name of the requested nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100851 *
852 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
853 * on success.
854 */
855struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
856{
857 if (dev->of_node) { /* try dt first */
858 struct nvmem_device *nvmem;
859
860 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
861
862 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
863 return nvmem;
864
865 }
866
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200867 return __nvmem_device_get((void *)dev_name, device_match_name);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100868}
869EXPORT_SYMBOL_GPL(nvmem_device_get);
870
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200871/**
872 * nvmem_device_find() - Find nvmem device with matching function
873 *
874 * @data: Data to pass to match function
875 * @match: Callback function to check device
876 *
877 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
878 * on success.
879 */
880struct nvmem_device *nvmem_device_find(void *data,
881 int (*match)(struct device *dev, const void *data))
882{
883 return __nvmem_device_get(data, match);
884}
885EXPORT_SYMBOL_GPL(nvmem_device_find);
886
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100887static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
888{
889 struct nvmem_device **nvmem = res;
890
891 if (WARN_ON(!nvmem || !*nvmem))
892 return 0;
893
894 return *nvmem == data;
895}
896
897static void devm_nvmem_device_release(struct device *dev, void *res)
898{
899 nvmem_device_put(*(struct nvmem_device **)res);
900}
901
902/**
903 * devm_nvmem_device_put() - put alredy got nvmem device
904 *
Vivek Gautam29143262017-01-22 23:02:39 +0000905 * @dev: Device that uses the nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100906 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
907 * that needs to be released.
908 */
909void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
910{
911 int ret;
912
913 ret = devres_release(dev, devm_nvmem_device_release,
914 devm_nvmem_device_match, nvmem);
915
916 WARN_ON(ret);
917}
918EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
919
920/**
921 * nvmem_device_put() - put alredy got nvmem device
922 *
923 * @nvmem: pointer to nvmem device that needs to be released.
924 */
925void nvmem_device_put(struct nvmem_device *nvmem)
926{
927 __nvmem_device_put(nvmem);
928}
929EXPORT_SYMBOL_GPL(nvmem_device_put);
930
931/**
932 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
933 *
Vivek Gautam29143262017-01-22 23:02:39 +0000934 * @dev: Device that requests the nvmem device.
935 * @id: name id for the requested nvmem device.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +0100936 *
937 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
938 * on success. The nvmem_cell will be freed by the automatically once the
939 * device is freed.
940 */
941struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
942{
943 struct nvmem_device **ptr, *nvmem;
944
945 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
946 if (!ptr)
947 return ERR_PTR(-ENOMEM);
948
949 nvmem = nvmem_device_get(dev, id);
950 if (!IS_ERR(nvmem)) {
951 *ptr = nvmem;
952 devres_add(dev, ptr);
953 } else {
954 devres_free(ptr);
955 }
956
957 return nvmem;
958}
959EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
960
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700961static struct nvmem_cell *
962nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100963{
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700964 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
965 struct nvmem_cell_lookup *lookup;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100966 struct nvmem_device *nvmem;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700967 const char *dev_id;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100968
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700969 if (!dev)
970 return ERR_PTR(-EINVAL);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100971
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700972 dev_id = dev_name(dev);
973
974 mutex_lock(&nvmem_lookup_mutex);
975
976 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
977 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
978 (strcmp(lookup->con_id, con_id) == 0)) {
979 /* This is the right entry. */
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +0200980 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
981 device_match_name);
Bartosz Golaszewskicccb3b12018-10-03 09:31:11 +0200982 if (IS_ERR(nvmem)) {
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700983 /* Provider may not be registered yet. */
Bartosz Golaszewskicccb3b12018-10-03 09:31:11 +0200984 cell = ERR_CAST(nvmem);
Alban Bedel9bfd8192019-01-28 15:55:06 +0000985 break;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700986 }
987
988 cell = nvmem_find_cell_by_name(nvmem,
989 lookup->cell_name);
990 if (!cell) {
991 __nvmem_device_put(nvmem);
Bartosz Golaszewskicccb3b12018-10-03 09:31:11 +0200992 cell = ERR_PTR(-ENOENT);
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700993 }
Alban Bedel9bfd8192019-01-28 15:55:06 +0000994 break;
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700995 }
996 }
997
Bartosz Golaszewski506157b2018-09-21 06:40:17 -0700998 mutex_unlock(&nvmem_lookup_mutex);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +0100999 return cell;
1000}
1001
Masahiro Yamadae701c672017-09-11 11:00:14 +02001002#if IS_ENABLED(CONFIG_OF)
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001003static struct nvmem_cell *
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +00001004nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001005{
Alban Bedel1c832672019-01-28 15:55:02 +00001006 struct nvmem_cell *iter, *cell = NULL;
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001007
1008 mutex_lock(&nvmem_mutex);
Alban Bedel1c832672019-01-28 15:55:02 +00001009 list_for_each_entry(iter, &nvmem->cells, node) {
1010 if (np == iter->np) {
1011 cell = iter;
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001012 break;
Alban Bedel1c832672019-01-28 15:55:02 +00001013 }
Arnd Bergmann3c53e232018-10-02 23:11:12 +02001014 }
1015 mutex_unlock(&nvmem_mutex);
1016
1017 return cell;
1018}
1019
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001020/**
1021 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1022 *
Vivek Gautam29143262017-01-22 23:02:39 +00001023 * @np: Device tree node that uses the nvmem cell.
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001024 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1025 * for the cell at index 0 (the lone cell with no accompanying
1026 * nvmem-cell-names property).
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001027 *
1028 * Return: Will be an ERR_PTR() on error or a valid pointer
1029 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1030 * nvmem_cell_put().
1031 */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001032struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001033{
1034 struct device_node *cell_np, *nvmem_np;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001035 struct nvmem_device *nvmem;
Bartosz Golaszewskie888d442018-09-21 06:40:16 -07001036 struct nvmem_cell *cell;
Vivek Gautamfd0c4782017-01-22 23:02:40 +00001037 int index = 0;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001038
Vivek Gautamfd0c4782017-01-22 23:02:40 +00001039 /* if cell name exists, find index to the name */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001040 if (id)
1041 index = of_property_match_string(np, "nvmem-cell-names", id);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001042
1043 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1044 if (!cell_np)
Alban Bedel5087cc12019-01-28 15:55:01 +00001045 return ERR_PTR(-ENOENT);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001046
1047 nvmem_np = of_get_next_parent(cell_np);
1048 if (!nvmem_np)
1049 return ERR_PTR(-EINVAL);
1050
Thomas Bogendoerfer8c2a2b82019-10-03 11:52:29 +02001051 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
Masahiro Yamadaaad8d092017-09-11 11:00:12 +02001052 of_node_put(nvmem_np);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001053 if (IS_ERR(nvmem))
1054 return ERR_CAST(nvmem);
1055
Srinivas Kandagatla0749aa22018-11-06 15:41:41 +00001056 cell = nvmem_find_cell_by_node(nvmem, cell_np);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001057 if (!cell) {
Bartosz Golaszewskie888d442018-09-21 06:40:16 -07001058 __nvmem_device_put(nvmem);
1059 return ERR_PTR(-ENOENT);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001060 }
1061
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001062 return cell;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001063}
1064EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1065#endif
1066
1067/**
1068 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1069 *
Vivek Gautam29143262017-01-22 23:02:39 +00001070 * @dev: Device that requests the nvmem cell.
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001071 * @id: nvmem cell name to get (this corresponds with the name from the
1072 * nvmem-cell-names property for DT systems and with the con_id from
1073 * the lookup entry for non-DT systems).
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001074 *
1075 * Return: Will be an ERR_PTR() on error or a valid pointer
1076 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1077 * nvmem_cell_put().
1078 */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001079struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001080{
1081 struct nvmem_cell *cell;
1082
1083 if (dev->of_node) { /* try dt first */
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001084 cell = of_nvmem_cell_get(dev->of_node, id);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001085 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1086 return cell;
1087 }
1088
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001089 /* NULL cell id only allowed for device tree; invalid otherwise */
1090 if (!id)
Douglas Anderson87ed1402018-06-18 18:30:43 +01001091 return ERR_PTR(-EINVAL);
1092
Bartosz Golaszewski165589f2018-09-21 06:40:21 -07001093 return nvmem_cell_get_from_lookup(dev, id);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001094}
1095EXPORT_SYMBOL_GPL(nvmem_cell_get);
1096
1097static void devm_nvmem_cell_release(struct device *dev, void *res)
1098{
1099 nvmem_cell_put(*(struct nvmem_cell **)res);
1100}
1101
1102/**
1103 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1104 *
Vivek Gautam29143262017-01-22 23:02:39 +00001105 * @dev: Device that requests the nvmem cell.
1106 * @id: nvmem cell name id to get.
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001107 *
1108 * Return: Will be an ERR_PTR() on error or a valid pointer
1109 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1110 * automatically once the device is freed.
1111 */
1112struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1113{
1114 struct nvmem_cell **ptr, *cell;
1115
1116 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1117 if (!ptr)
1118 return ERR_PTR(-ENOMEM);
1119
1120 cell = nvmem_cell_get(dev, id);
1121 if (!IS_ERR(cell)) {
1122 *ptr = cell;
1123 devres_add(dev, ptr);
1124 } else {
1125 devres_free(ptr);
1126 }
1127
1128 return cell;
1129}
1130EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1131
1132static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1133{
1134 struct nvmem_cell **c = res;
1135
1136 if (WARN_ON(!c || !*c))
1137 return 0;
1138
1139 return *c == data;
1140}
1141
1142/**
1143 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1144 * from devm_nvmem_cell_get.
1145 *
Vivek Gautam29143262017-01-22 23:02:39 +00001146 * @dev: Device that requests the nvmem cell.
1147 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001148 */
1149void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1150{
1151 int ret;
1152
1153 ret = devres_release(dev, devm_nvmem_cell_release,
1154 devm_nvmem_cell_match, cell);
1155
1156 WARN_ON(ret);
1157}
1158EXPORT_SYMBOL(devm_nvmem_cell_put);
1159
1160/**
1161 * nvmem_cell_put() - Release previously allocated nvmem cell.
1162 *
Vivek Gautam29143262017-01-22 23:02:39 +00001163 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001164 */
1165void nvmem_cell_put(struct nvmem_cell *cell)
1166{
1167 struct nvmem_device *nvmem = cell->nvmem;
1168
1169 __nvmem_device_put(nvmem);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001170}
1171EXPORT_SYMBOL_GPL(nvmem_cell_put);
1172
Masahiro Yamadaf7c04f12017-09-11 11:00:13 +02001173static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001174{
1175 u8 *p, *b;
Jorge Ramirez-Ortiz2fe518fe2019-04-13 11:32:58 +01001176 int i, extra, bit_offset = cell->bit_offset;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001177
1178 p = b = buf;
1179 if (bit_offset) {
1180 /* First shift */
1181 *b++ >>= bit_offset;
1182
1183 /* setup rest of the bytes if any */
1184 for (i = 1; i < cell->bytes; i++) {
1185 /* Get bits from next byte and shift them towards msb */
1186 *p |= *b << (BITS_PER_BYTE - bit_offset);
1187
1188 p = b;
1189 *b++ >>= bit_offset;
1190 }
Jorge Ramirez-Ortiz2fe518fe2019-04-13 11:32:58 +01001191 } else {
1192 /* point to the msb */
1193 p += cell->bytes - 1;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001194 }
Jorge Ramirez-Ortiz2fe518fe2019-04-13 11:32:58 +01001195
1196 /* result fits in less bytes */
1197 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1198 while (--extra >= 0)
1199 *p-- = 0;
1200
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001201 /* clear msb bits if any leftover in the last byte */
1202 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1203}
1204
1205static int __nvmem_cell_read(struct nvmem_device *nvmem,
1206 struct nvmem_cell *cell,
1207 void *buf, size_t *len)
1208{
1209 int rc;
1210
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001211 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001212
Arnd Bergmann287980e2016-05-27 23:23:25 +02001213 if (rc)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001214 return rc;
1215
1216 /* shift bits in-place */
Axel Lincbf854a2015-09-30 13:35:15 +01001217 if (cell->bit_offset || cell->nbits)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001218 nvmem_shift_read_buffer_in_place(cell, buf);
1219
Vivek Gautam3b4a6872017-01-22 23:02:38 +00001220 if (len)
1221 *len = cell->bytes;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001222
1223 return 0;
1224}
1225
1226/**
1227 * nvmem_cell_read() - Read a given nvmem cell
1228 *
1229 * @cell: nvmem cell to be read.
Vivek Gautam3b4a6872017-01-22 23:02:38 +00001230 * @len: pointer to length of cell which will be populated on successful read;
1231 * can be NULL.
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001232 *
Brian Norrisb577faf2017-01-04 16:18:11 +00001233 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1234 * buffer should be freed by the consumer with a kfree().
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001235 */
1236void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1237{
1238 struct nvmem_device *nvmem = cell->nvmem;
1239 u8 *buf;
1240 int rc;
1241
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001242 if (!nvmem)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001243 return ERR_PTR(-EINVAL);
1244
1245 buf = kzalloc(cell->bytes, GFP_KERNEL);
1246 if (!buf)
1247 return ERR_PTR(-ENOMEM);
1248
1249 rc = __nvmem_cell_read(nvmem, cell, buf, len);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001250 if (rc) {
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001251 kfree(buf);
1252 return ERR_PTR(rc);
1253 }
1254
1255 return buf;
1256}
1257EXPORT_SYMBOL_GPL(nvmem_cell_read);
1258
Masahiro Yamadaf7c04f12017-09-11 11:00:13 +02001259static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1260 u8 *_buf, int len)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001261{
1262 struct nvmem_device *nvmem = cell->nvmem;
1263 int i, rc, nbits, bit_offset = cell->bit_offset;
1264 u8 v, *p, *buf, *b, pbyte, pbits;
1265
1266 nbits = cell->nbits;
1267 buf = kzalloc(cell->bytes, GFP_KERNEL);
1268 if (!buf)
1269 return ERR_PTR(-ENOMEM);
1270
1271 memcpy(buf, _buf, len);
1272 p = b = buf;
1273
1274 if (bit_offset) {
1275 pbyte = *b;
1276 *b <<= bit_offset;
1277
1278 /* setup the first byte with lsb bits from nvmem */
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001279 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
Mathieu Malaterre50808bf2018-05-11 12:07:03 +01001280 if (rc)
1281 goto err;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001282 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1283
1284 /* setup rest of the byte if any */
1285 for (i = 1; i < cell->bytes; i++) {
1286 /* Get last byte bits and shift them towards lsb */
1287 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1288 pbyte = *b;
1289 p = b;
1290 *b <<= bit_offset;
1291 *b++ |= pbits;
1292 }
1293 }
1294
1295 /* if it's not end on byte boundary */
1296 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1297 /* setup the last byte with msb bits from nvmem */
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001298 rc = nvmem_reg_read(nvmem,
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001299 cell->offset + cell->bytes - 1, &v, 1);
Mathieu Malaterre50808bf2018-05-11 12:07:03 +01001300 if (rc)
1301 goto err;
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001302 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1303
1304 }
1305
1306 return buf;
Mathieu Malaterre50808bf2018-05-11 12:07:03 +01001307err:
1308 kfree(buf);
1309 return ERR_PTR(rc);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001310}
1311
1312/**
1313 * nvmem_cell_write() - Write to a given nvmem cell
1314 *
1315 * @cell: nvmem cell to be written.
1316 * @buf: Buffer to be written.
1317 * @len: length of buffer to be written to nvmem cell.
1318 *
1319 * Return: length of bytes written or negative on failure.
1320 */
1321int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1322{
1323 struct nvmem_device *nvmem = cell->nvmem;
1324 int rc;
1325
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001326 if (!nvmem || nvmem->read_only ||
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001327 (cell->bit_offset == 0 && len != cell->bytes))
1328 return -EINVAL;
1329
1330 if (cell->bit_offset || cell->nbits) {
1331 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1332 if (IS_ERR(buf))
1333 return PTR_ERR(buf);
1334 }
1335
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001336 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001337
1338 /* free the tmp buffer */
Axel Linace22172015-09-30 13:36:10 +01001339 if (cell->bit_offset || cell->nbits)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001340 kfree(buf);
1341
Arnd Bergmann287980e2016-05-27 23:23:25 +02001342 if (rc)
Srinivas Kandagatla69aba792015-07-27 12:13:34 +01001343 return rc;
1344
1345 return len;
1346}
1347EXPORT_SYMBOL_GPL(nvmem_cell_write);
1348
Yangtao Li6bb317c2020-03-10 13:22:45 +00001349static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1350 void *val, size_t count)
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001351{
1352 struct nvmem_cell *cell;
1353 void *buf;
1354 size_t len;
1355
1356 cell = nvmem_cell_get(dev, cell_id);
1357 if (IS_ERR(cell))
1358 return PTR_ERR(cell);
1359
1360 buf = nvmem_cell_read(cell, &len);
1361 if (IS_ERR(buf)) {
1362 nvmem_cell_put(cell);
1363 return PTR_ERR(buf);
1364 }
Yangtao Li6bb317c2020-03-10 13:22:45 +00001365 if (len != count) {
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001366 kfree(buf);
1367 nvmem_cell_put(cell);
1368 return -EINVAL;
1369 }
Yangtao Li6bb317c2020-03-10 13:22:45 +00001370 memcpy(val, buf, count);
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001371 kfree(buf);
1372 nvmem_cell_put(cell);
1373
1374 return 0;
1375}
Yangtao Li6bb317c2020-03-10 13:22:45 +00001376
1377/**
Andreas Färber3a758072020-07-22 11:06:56 +01001378 * nvmem_cell_read_u16() - Read a cell value as a u16
Yangtao Li6bb317c2020-03-10 13:22:45 +00001379 *
1380 * @dev: Device that requests the nvmem cell.
1381 * @cell_id: Name of nvmem cell to read.
1382 * @val: pointer to output value.
1383 *
1384 * Return: 0 on success or negative errno.
1385 */
1386int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1387{
1388 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1389}
Fabrice Gasnier0a9b2d12019-04-13 11:32:57 +01001390EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1391
1392/**
Andreas Färber3a758072020-07-22 11:06:56 +01001393 * nvmem_cell_read_u32() - Read a cell value as a u32
Leonard Crestezd026d702017-07-26 11:34:46 +02001394 *
1395 * @dev: Device that requests the nvmem cell.
1396 * @cell_id: Name of nvmem cell to read.
1397 * @val: pointer to output value.
1398 *
1399 * Return: 0 on success or negative errno.
1400 */
1401int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1402{
Yangtao Li6bb317c2020-03-10 13:22:45 +00001403 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
Leonard Crestezd026d702017-07-26 11:34:46 +02001404}
1405EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1406
1407/**
Andreas Färber3a758072020-07-22 11:06:56 +01001408 * nvmem_cell_read_u64() - Read a cell value as a u64
Yangtao Li8b977c52020-03-10 13:22:46 +00001409 *
1410 * @dev: Device that requests the nvmem cell.
1411 * @cell_id: Name of nvmem cell to read.
1412 * @val: pointer to output value.
1413 *
1414 * Return: 0 on success or negative errno.
1415 */
1416int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1417{
1418 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1419}
1420EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1421
1422/**
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001423 * nvmem_device_cell_read() - Read a given nvmem device and cell
1424 *
1425 * @nvmem: nvmem device to read from.
1426 * @info: nvmem cell info to be read.
1427 * @buf: buffer pointer which will be populated on successful read.
1428 *
1429 * Return: length of successful bytes read on success and negative
1430 * error code on error.
1431 */
1432ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1433 struct nvmem_cell_info *info, void *buf)
1434{
1435 struct nvmem_cell cell;
1436 int rc;
1437 ssize_t len;
1438
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001439 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001440 return -EINVAL;
1441
1442 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001443 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001444 return rc;
1445
1446 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001447 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001448 return rc;
1449
1450 return len;
1451}
1452EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1453
1454/**
1455 * nvmem_device_cell_write() - Write cell to a given nvmem device
1456 *
1457 * @nvmem: nvmem device to be written to.
Vivek Gautam29143262017-01-22 23:02:39 +00001458 * @info: nvmem cell info to be written.
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001459 * @buf: buffer to be written to cell.
1460 *
1461 * Return: length of bytes written or negative error code on failure.
Bartosz Golaszewski48f63a22018-09-21 06:40:23 -07001462 */
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001463int nvmem_device_cell_write(struct nvmem_device *nvmem,
1464 struct nvmem_cell_info *info, void *buf)
1465{
1466 struct nvmem_cell cell;
1467 int rc;
1468
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001469 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001470 return -EINVAL;
1471
1472 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001473 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001474 return rc;
1475
1476 return nvmem_cell_write(&cell, buf, cell.bytes);
1477}
1478EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1479
1480/**
1481 * nvmem_device_read() - Read from a given nvmem device
1482 *
1483 * @nvmem: nvmem device to read from.
1484 * @offset: offset in nvmem device.
1485 * @bytes: number of bytes to read.
1486 * @buf: buffer pointer which will be populated on successful read.
1487 *
1488 * Return: length of successful bytes read on success and negative
1489 * error code on error.
1490 */
1491int nvmem_device_read(struct nvmem_device *nvmem,
1492 unsigned int offset,
1493 size_t bytes, void *buf)
1494{
1495 int rc;
1496
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001497 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001498 return -EINVAL;
1499
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001500 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001501
Arnd Bergmann287980e2016-05-27 23:23:25 +02001502 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001503 return rc;
1504
1505 return bytes;
1506}
1507EXPORT_SYMBOL_GPL(nvmem_device_read);
1508
1509/**
1510 * nvmem_device_write() - Write cell to a given nvmem device
1511 *
1512 * @nvmem: nvmem device to be written to.
1513 * @offset: offset in nvmem device.
1514 * @bytes: number of bytes to write.
1515 * @buf: buffer to be written.
1516 *
1517 * Return: length of bytes written or negative error code on failure.
Bartosz Golaszewski48f63a22018-09-21 06:40:23 -07001518 */
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001519int nvmem_device_write(struct nvmem_device *nvmem,
1520 unsigned int offset,
1521 size_t bytes, void *buf)
1522{
1523 int rc;
1524
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001525 if (!nvmem)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001526 return -EINVAL;
1527
Srinivas Kandagatla795ddd12016-04-24 20:28:05 +01001528 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001529
Arnd Bergmann287980e2016-05-27 23:23:25 +02001530 if (rc)
Srinivas Kandagatlae2a54022015-07-27 12:13:45 +01001531 return rc;
1532
1533
1534 return bytes;
1535}
1536EXPORT_SYMBOL_GPL(nvmem_device_write);
1537
Bartosz Golaszewskid7b9fd12018-09-21 06:40:03 -07001538/**
Bartosz Golaszewskib985f4c2018-09-21 06:40:15 -07001539 * nvmem_add_cell_table() - register a table of cell info entries
1540 *
1541 * @table: table of cell info entries
1542 */
1543void nvmem_add_cell_table(struct nvmem_cell_table *table)
1544{
1545 mutex_lock(&nvmem_cell_mutex);
1546 list_add_tail(&table->node, &nvmem_cell_tables);
1547 mutex_unlock(&nvmem_cell_mutex);
1548}
1549EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1550
1551/**
1552 * nvmem_del_cell_table() - remove a previously registered cell info table
1553 *
1554 * @table: table of cell info entries
1555 */
1556void nvmem_del_cell_table(struct nvmem_cell_table *table)
1557{
1558 mutex_lock(&nvmem_cell_mutex);
1559 list_del(&table->node);
1560 mutex_unlock(&nvmem_cell_mutex);
1561}
1562EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1563
1564/**
Bartosz Golaszewski506157b2018-09-21 06:40:17 -07001565 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1566 *
1567 * @entries: array of cell lookup entries
1568 * @nentries: number of cell lookup entries in the array
1569 */
1570void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1571{
1572 int i;
1573
1574 mutex_lock(&nvmem_lookup_mutex);
1575 for (i = 0; i < nentries; i++)
1576 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1577 mutex_unlock(&nvmem_lookup_mutex);
1578}
1579EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1580
1581/**
1582 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1583 * entries
1584 *
1585 * @entries: array of cell lookup entries
1586 * @nentries: number of cell lookup entries in the array
1587 */
1588void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1589{
1590 int i;
1591
1592 mutex_lock(&nvmem_lookup_mutex);
1593 for (i = 0; i < nentries; i++)
1594 list_del(&entries[i].node);
1595 mutex_unlock(&nvmem_lookup_mutex);
1596}
1597EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1598
1599/**
Bartosz Golaszewskid7b9fd12018-09-21 06:40:03 -07001600 * nvmem_dev_name() - Get the name of a given nvmem device.
1601 *
1602 * @nvmem: nvmem device.
1603 *
1604 * Return: name of the nvmem device.
1605 */
1606const char *nvmem_dev_name(struct nvmem_device *nvmem)
1607{
1608 return dev_name(&nvmem->dev);
1609}
1610EXPORT_SYMBOL_GPL(nvmem_dev_name);
1611
Srinivas Kandagatlaeace75c2015-07-27 12:13:19 +01001612static int __init nvmem_init(void)
1613{
1614 return bus_register(&nvmem_bus_type);
1615}
1616
1617static void __exit nvmem_exit(void)
1618{
1619 bus_unregister(&nvmem_bus_type);
1620}
1621
1622subsys_initcall(nvmem_init);
1623module_exit(nvmem_exit);
1624
1625MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1626MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1627MODULE_DESCRIPTION("nvmem Driver Core");
1628MODULE_LICENSE("GPL v2");