core.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362
  1. /*
  2. * nvmem framework core.
  3. *
  4. * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
  5. * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 and
  9. * only version 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/device.h>
  17. #include <linux/export.h>
  18. #include <linux/fs.h>
  19. #include <linux/idr.h>
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/nvmem-consumer.h>
  23. #include <linux/nvmem-provider.h>
  24. #include <linux/of.h>
  25. #include <linux/slab.h>
  26. struct nvmem_device {
  27. const char *name;
  28. struct module *owner;
  29. struct device dev;
  30. int stride;
  31. int word_size;
  32. int id;
  33. int users;
  34. size_t size;
  35. bool read_only;
  36. int flags;
  37. struct bin_attribute eeprom;
  38. struct device *base_dev;
  39. nvmem_reg_read_t reg_read;
  40. nvmem_reg_write_t reg_write;
  41. void *priv;
  42. };
  43. #define FLAG_COMPAT BIT(0)
  44. struct nvmem_cell {
  45. const char *name;
  46. int offset;
  47. int bytes;
  48. int bit_offset;
  49. int nbits;
  50. struct nvmem_device *nvmem;
  51. struct list_head node;
  52. };
  53. static DEFINE_MUTEX(nvmem_mutex);
  54. static DEFINE_IDA(nvmem_ida);
  55. static LIST_HEAD(nvmem_cells);
  56. static DEFINE_MUTEX(nvmem_cells_mutex);
  57. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  58. static struct lock_class_key eeprom_lock_key;
  59. #endif
  60. #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
  61. static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
  62. void *val, size_t bytes)
  63. {
  64. if (nvmem->reg_read)
  65. return nvmem->reg_read(nvmem->priv, offset, val, bytes);
  66. return -EINVAL;
  67. }
  68. static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
  69. void *val, size_t bytes)
  70. {
  71. if (nvmem->reg_write)
  72. return nvmem->reg_write(nvmem->priv, offset, val, bytes);
  73. return -EINVAL;
  74. }
  75. static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
  76. struct bin_attribute *attr,
  77. char *buf, loff_t pos, size_t count)
  78. {
  79. struct device *dev;
  80. struct nvmem_device *nvmem;
  81. int rc;
  82. if (attr->private)
  83. dev = attr->private;
  84. else
  85. dev = container_of(kobj, struct device, kobj);
  86. nvmem = to_nvmem_device(dev);
  87. /* Stop the user from reading */
  88. if (pos >= nvmem->size)
  89. return 0;
  90. if (count < nvmem->word_size)
  91. return -EINVAL;
  92. if (pos + count > nvmem->size)
  93. count = nvmem->size - pos;
  94. count = round_down(count, nvmem->word_size);
  95. rc = nvmem_reg_read(nvmem, pos, buf, count);
  96. if (rc)
  97. return rc;
  98. return count;
  99. }
  100. static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
  101. struct bin_attribute *attr,
  102. char *buf, loff_t pos, size_t count)
  103. {
  104. struct device *dev;
  105. struct nvmem_device *nvmem;
  106. int rc;
  107. if (attr->private)
  108. dev = attr->private;
  109. else
  110. dev = container_of(kobj, struct device, kobj);
  111. nvmem = to_nvmem_device(dev);
  112. /* Stop the user from writing */
  113. if (pos >= nvmem->size)
  114. return -EFBIG;
  115. if (count < nvmem->word_size)
  116. return -EINVAL;
  117. if (pos + count > nvmem->size)
  118. count = nvmem->size - pos;
  119. count = round_down(count, nvmem->word_size);
  120. rc = nvmem_reg_write(nvmem, pos, buf, count);
  121. if (rc)
  122. return rc;
  123. return count;
  124. }
  125. /* default read/write permissions */
  126. static struct bin_attribute bin_attr_rw_nvmem = {
  127. .attr = {
  128. .name = "nvmem",
  129. .mode = S_IWUSR | S_IRUGO,
  130. },
  131. .read = bin_attr_nvmem_read,
  132. .write = bin_attr_nvmem_write,
  133. };
  134. static struct bin_attribute *nvmem_bin_rw_attributes[] = {
  135. &bin_attr_rw_nvmem,
  136. NULL,
  137. };
  138. static const struct attribute_group nvmem_bin_rw_group = {
  139. .bin_attrs = nvmem_bin_rw_attributes,
  140. };
  141. static const struct attribute_group *nvmem_rw_dev_groups[] = {
  142. &nvmem_bin_rw_group,
  143. NULL,
  144. };
  145. /* read only permission */
  146. static struct bin_attribute bin_attr_ro_nvmem = {
  147. .attr = {
  148. .name = "nvmem",
  149. .mode = S_IRUGO,
  150. },
  151. .read = bin_attr_nvmem_read,
  152. };
  153. static struct bin_attribute *nvmem_bin_ro_attributes[] = {
  154. &bin_attr_ro_nvmem,
  155. NULL,
  156. };
  157. static const struct attribute_group nvmem_bin_ro_group = {
  158. .bin_attrs = nvmem_bin_ro_attributes,
  159. };
  160. static const struct attribute_group *nvmem_ro_dev_groups[] = {
  161. &nvmem_bin_ro_group,
  162. NULL,
  163. };
  164. /* default read/write permissions, root only */
  165. static struct bin_attribute bin_attr_rw_root_nvmem = {
  166. .attr = {
  167. .name = "nvmem",
  168. .mode = S_IWUSR | S_IRUSR,
  169. },
  170. .read = bin_attr_nvmem_read,
  171. .write = bin_attr_nvmem_write,
  172. };
  173. static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
  174. &bin_attr_rw_root_nvmem,
  175. NULL,
  176. };
  177. static const struct attribute_group nvmem_bin_rw_root_group = {
  178. .bin_attrs = nvmem_bin_rw_root_attributes,
  179. };
  180. static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
  181. &nvmem_bin_rw_root_group,
  182. NULL,
  183. };
  184. /* read only permission, root only */
  185. static struct bin_attribute bin_attr_ro_root_nvmem = {
  186. .attr = {
  187. .name = "nvmem",
  188. .mode = S_IRUSR,
  189. },
  190. .read = bin_attr_nvmem_read,
  191. };
  192. static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
  193. &bin_attr_ro_root_nvmem,
  194. NULL,
  195. };
  196. static const struct attribute_group nvmem_bin_ro_root_group = {
  197. .bin_attrs = nvmem_bin_ro_root_attributes,
  198. };
  199. static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
  200. &nvmem_bin_ro_root_group,
  201. NULL,
  202. };
  203. static void nvmem_release(struct device *dev)
  204. {
  205. struct nvmem_device *nvmem = to_nvmem_device(dev);
  206. ida_simple_remove(&nvmem_ida, nvmem->id);
  207. kfree(nvmem);
  208. }
  209. static const struct device_type nvmem_provider_type = {
  210. .release = nvmem_release,
  211. };
  212. static struct bus_type nvmem_bus_type = {
  213. .name = "nvmem",
  214. };
  215. static int of_nvmem_match(struct device *dev, void *nvmem_np)
  216. {
  217. return dev->of_node == nvmem_np;
  218. }
  219. static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
  220. {
  221. struct device *d;
  222. if (!nvmem_np)
  223. return NULL;
  224. d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
  225. if (!d)
  226. return NULL;
  227. return to_nvmem_device(d);
  228. }
  229. static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
  230. {
  231. struct nvmem_cell *p;
  232. mutex_lock(&nvmem_cells_mutex);
  233. list_for_each_entry(p, &nvmem_cells, node)
  234. if (!strcmp(p->name, cell_id)) {
  235. mutex_unlock(&nvmem_cells_mutex);
  236. return p;
  237. }
  238. mutex_unlock(&nvmem_cells_mutex);
  239. return NULL;
  240. }
  241. static void nvmem_cell_drop(struct nvmem_cell *cell)
  242. {
  243. mutex_lock(&nvmem_cells_mutex);
  244. list_del(&cell->node);
  245. mutex_unlock(&nvmem_cells_mutex);
  246. kfree(cell);
  247. }
  248. static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
  249. {
  250. struct nvmem_cell *cell;
  251. struct list_head *p, *n;
  252. list_for_each_safe(p, n, &nvmem_cells) {
  253. cell = list_entry(p, struct nvmem_cell, node);
  254. if (cell->nvmem == nvmem)
  255. nvmem_cell_drop(cell);
  256. }
  257. }
  258. static void nvmem_cell_add(struct nvmem_cell *cell)
  259. {
  260. mutex_lock(&nvmem_cells_mutex);
  261. list_add_tail(&cell->node, &nvmem_cells);
  262. mutex_unlock(&nvmem_cells_mutex);
  263. }
  264. static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
  265. const struct nvmem_cell_info *info,
  266. struct nvmem_cell *cell)
  267. {
  268. cell->nvmem = nvmem;
  269. cell->offset = info->offset;
  270. cell->bytes = info->bytes;
  271. cell->name = info->name;
  272. cell->bit_offset = info->bit_offset;
  273. cell->nbits = info->nbits;
  274. if (cell->nbits)
  275. cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
  276. BITS_PER_BYTE);
  277. if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
  278. dev_err(&nvmem->dev,
  279. "cell %s unaligned to nvmem stride %d\n",
  280. cell->name, nvmem->stride);
  281. return -EINVAL;
  282. }
  283. return 0;
  284. }
  285. /**
  286. * nvmem_add_cells() - Add cell information to an nvmem device
  287. *
  288. * @nvmem: nvmem device to add cells to.
  289. * @info: nvmem cell info to add to the device
  290. * @ncells: number of cells in info
  291. *
  292. * Return: 0 or negative error code on failure.
  293. */
  294. int nvmem_add_cells(struct nvmem_device *nvmem,
  295. const struct nvmem_cell_info *info,
  296. int ncells)
  297. {
  298. struct nvmem_cell **cells;
  299. int i, rval;
  300. cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
  301. if (!cells)
  302. return -ENOMEM;
  303. for (i = 0; i < ncells; i++) {
  304. cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
  305. if (!cells[i]) {
  306. rval = -ENOMEM;
  307. goto err;
  308. }
  309. rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
  310. if (rval) {
  311. kfree(cells[i]);
  312. goto err;
  313. }
  314. nvmem_cell_add(cells[i]);
  315. }
  316. /* remove tmp array */
  317. kfree(cells);
  318. return 0;
  319. err:
  320. while (i--)
  321. nvmem_cell_drop(cells[i]);
  322. kfree(cells);
  323. return rval;
  324. }
  325. EXPORT_SYMBOL_GPL(nvmem_add_cells);
  326. /*
  327. * nvmem_setup_compat() - Create an additional binary entry in
  328. * drivers sys directory, to be backwards compatible with the older
  329. * drivers/misc/eeprom drivers.
  330. */
  331. static int nvmem_setup_compat(struct nvmem_device *nvmem,
  332. const struct nvmem_config *config)
  333. {
  334. int rval;
  335. if (!config->base_dev)
  336. return -EINVAL;
  337. if (nvmem->read_only)
  338. nvmem->eeprom = bin_attr_ro_root_nvmem;
  339. else
  340. nvmem->eeprom = bin_attr_rw_root_nvmem;
  341. nvmem->eeprom.attr.name = "eeprom";
  342. nvmem->eeprom.size = nvmem->size;
  343. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  344. nvmem->eeprom.attr.key = &eeprom_lock_key;
  345. #endif
  346. nvmem->eeprom.private = &nvmem->dev;
  347. nvmem->base_dev = config->base_dev;
  348. rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
  349. if (rval) {
  350. dev_err(&nvmem->dev,
  351. "Failed to create eeprom binary file %d\n", rval);
  352. return rval;
  353. }
  354. nvmem->flags |= FLAG_COMPAT;
  355. return 0;
  356. }
  357. /**
  358. * nvmem_register() - Register a nvmem device for given nvmem_config.
  359. * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
  360. *
  361. * @config: nvmem device configuration with which nvmem device is created.
  362. *
  363. * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
  364. * on success.
  365. */
  366. struct nvmem_device *nvmem_register(const struct nvmem_config *config)
  367. {
  368. struct nvmem_device *nvmem;
  369. int rval;
  370. if (!config->dev)
  371. return ERR_PTR(-EINVAL);
  372. nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
  373. if (!nvmem)
  374. return ERR_PTR(-ENOMEM);
  375. rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
  376. if (rval < 0) {
  377. kfree(nvmem);
  378. return ERR_PTR(rval);
  379. }
  380. nvmem->id = rval;
  381. nvmem->owner = config->owner;
  382. if (!nvmem->owner && config->dev->driver)
  383. nvmem->owner = config->dev->driver->owner;
  384. nvmem->stride = config->stride ?: 1;
  385. nvmem->word_size = config->word_size ?: 1;
  386. nvmem->size = config->size;
  387. nvmem->dev.type = &nvmem_provider_type;
  388. nvmem->dev.bus = &nvmem_bus_type;
  389. nvmem->dev.parent = config->dev;
  390. nvmem->priv = config->priv;
  391. nvmem->reg_read = config->reg_read;
  392. nvmem->reg_write = config->reg_write;
  393. nvmem->dev.of_node = config->dev->of_node;
  394. if (config->id == -1 && config->name) {
  395. dev_set_name(&nvmem->dev, "%s", config->name);
  396. } else {
  397. dev_set_name(&nvmem->dev, "%s%d",
  398. config->name ? : "nvmem",
  399. config->name ? config->id : nvmem->id);
  400. }
  401. nvmem->read_only = device_property_present(config->dev, "read-only") |
  402. config->read_only;
  403. if (config->root_only)
  404. nvmem->dev.groups = nvmem->read_only ?
  405. nvmem_ro_root_dev_groups :
  406. nvmem_rw_root_dev_groups;
  407. else
  408. nvmem->dev.groups = nvmem->read_only ?
  409. nvmem_ro_dev_groups :
  410. nvmem_rw_dev_groups;
  411. device_initialize(&nvmem->dev);
  412. dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
  413. rval = device_add(&nvmem->dev);
  414. if (rval)
  415. goto err_put_device;
  416. if (config->compat) {
  417. rval = nvmem_setup_compat(nvmem, config);
  418. if (rval)
  419. goto err_device_del;
  420. }
  421. if (config->cells)
  422. nvmem_add_cells(nvmem, config->cells, config->ncells);
  423. return nvmem;
  424. err_device_del:
  425. device_del(&nvmem->dev);
  426. err_put_device:
  427. put_device(&nvmem->dev);
  428. return ERR_PTR(rval);
  429. }
  430. EXPORT_SYMBOL_GPL(nvmem_register);
  431. /**
  432. * nvmem_unregister() - Unregister previously registered nvmem device
  433. *
  434. * @nvmem: Pointer to previously registered nvmem device.
  435. *
  436. * Return: Will be an negative on error or a zero on success.
  437. */
  438. int nvmem_unregister(struct nvmem_device *nvmem)
  439. {
  440. mutex_lock(&nvmem_mutex);
  441. if (nvmem->users) {
  442. mutex_unlock(&nvmem_mutex);
  443. return -EBUSY;
  444. }
  445. mutex_unlock(&nvmem_mutex);
  446. if (nvmem->flags & FLAG_COMPAT)
  447. device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
  448. nvmem_device_remove_all_cells(nvmem);
  449. device_del(&nvmem->dev);
  450. put_device(&nvmem->dev);
  451. return 0;
  452. }
  453. EXPORT_SYMBOL_GPL(nvmem_unregister);
  454. static void devm_nvmem_release(struct device *dev, void *res)
  455. {
  456. WARN_ON(nvmem_unregister(*(struct nvmem_device **)res));
  457. }
  458. /**
  459. * devm_nvmem_register() - Register a managed nvmem device for given
  460. * nvmem_config.
  461. * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
  462. *
  463. * @dev: Device that uses the nvmem device.
  464. * @config: nvmem device configuration with which nvmem device is created.
  465. *
  466. * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
  467. * on success.
  468. */
  469. struct nvmem_device *devm_nvmem_register(struct device *dev,
  470. const struct nvmem_config *config)
  471. {
  472. struct nvmem_device **ptr, *nvmem;
  473. ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
  474. if (!ptr)
  475. return ERR_PTR(-ENOMEM);
  476. nvmem = nvmem_register(config);
  477. if (!IS_ERR(nvmem)) {
  478. *ptr = nvmem;
  479. devres_add(dev, ptr);
  480. } else {
  481. devres_free(ptr);
  482. }
  483. return nvmem;
  484. }
  485. EXPORT_SYMBOL_GPL(devm_nvmem_register);
  486. static int devm_nvmem_match(struct device *dev, void *res, void *data)
  487. {
  488. struct nvmem_device **r = res;
  489. return *r == data;
  490. }
  491. /**
  492. * devm_nvmem_unregister() - Unregister previously registered managed nvmem
  493. * device.
  494. *
  495. * @dev: Device that uses the nvmem device.
  496. * @nvmem: Pointer to previously registered nvmem device.
  497. *
  498. * Return: Will be an negative on error or a zero on success.
  499. */
  500. int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
  501. {
  502. return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
  503. }
  504. EXPORT_SYMBOL(devm_nvmem_unregister);
  505. static struct nvmem_device *__nvmem_device_get(struct device_node *np,
  506. struct nvmem_cell **cellp,
  507. const char *cell_id)
  508. {
  509. struct nvmem_device *nvmem = NULL;
  510. mutex_lock(&nvmem_mutex);
  511. if (np) {
  512. nvmem = of_nvmem_find(np);
  513. if (!nvmem) {
  514. mutex_unlock(&nvmem_mutex);
  515. return ERR_PTR(-EPROBE_DEFER);
  516. }
  517. } else {
  518. struct nvmem_cell *cell = nvmem_find_cell(cell_id);
  519. if (cell) {
  520. nvmem = cell->nvmem;
  521. *cellp = cell;
  522. }
  523. if (!nvmem) {
  524. mutex_unlock(&nvmem_mutex);
  525. return ERR_PTR(-ENOENT);
  526. }
  527. }
  528. nvmem->users++;
  529. mutex_unlock(&nvmem_mutex);
  530. if (!try_module_get(nvmem->owner)) {
  531. dev_err(&nvmem->dev,
  532. "could not increase module refcount for cell %s\n",
  533. nvmem->name);
  534. mutex_lock(&nvmem_mutex);
  535. nvmem->users--;
  536. mutex_unlock(&nvmem_mutex);
  537. return ERR_PTR(-EINVAL);
  538. }
  539. return nvmem;
  540. }
  541. static void __nvmem_device_put(struct nvmem_device *nvmem)
  542. {
  543. module_put(nvmem->owner);
  544. mutex_lock(&nvmem_mutex);
  545. nvmem->users--;
  546. mutex_unlock(&nvmem_mutex);
  547. }
  548. static struct nvmem_device *nvmem_find(const char *name)
  549. {
  550. struct device *d;
  551. d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
  552. if (!d)
  553. return NULL;
  554. return to_nvmem_device(d);
  555. }
  556. #if IS_ENABLED(CONFIG_OF)
  557. /**
  558. * of_nvmem_device_get() - Get nvmem device from a given id
  559. *
  560. * @np: Device tree node that uses the nvmem device.
  561. * @id: nvmem name from nvmem-names property.
  562. *
  563. * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
  564. * on success.
  565. */
  566. struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
  567. {
  568. struct device_node *nvmem_np;
  569. int index;
  570. index = of_property_match_string(np, "nvmem-names", id);
  571. nvmem_np = of_parse_phandle(np, "nvmem", index);
  572. if (!nvmem_np)
  573. return ERR_PTR(-EINVAL);
  574. return __nvmem_device_get(nvmem_np, NULL, NULL);
  575. }
  576. EXPORT_SYMBOL_GPL(of_nvmem_device_get);
  577. #endif
  578. /**
  579. * nvmem_device_get() - Get nvmem device from a given id
  580. *
  581. * @dev: Device that uses the nvmem device.
  582. * @dev_name: name of the requested nvmem device.
  583. *
  584. * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
  585. * on success.
  586. */
  587. struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
  588. {
  589. if (dev->of_node) { /* try dt first */
  590. struct nvmem_device *nvmem;
  591. nvmem = of_nvmem_device_get(dev->of_node, dev_name);
  592. if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
  593. return nvmem;
  594. }
  595. return nvmem_find(dev_name);
  596. }
  597. EXPORT_SYMBOL_GPL(nvmem_device_get);
  598. static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
  599. {
  600. struct nvmem_device **nvmem = res;
  601. if (WARN_ON(!nvmem || !*nvmem))
  602. return 0;
  603. return *nvmem == data;
  604. }
  605. static void devm_nvmem_device_release(struct device *dev, void *res)
  606. {
  607. nvmem_device_put(*(struct nvmem_device **)res);
  608. }
  609. /**
  610. * devm_nvmem_device_put() - put alredy got nvmem device
  611. *
  612. * @dev: Device that uses the nvmem device.
  613. * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
  614. * that needs to be released.
  615. */
  616. void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
  617. {
  618. int ret;
  619. ret = devres_release(dev, devm_nvmem_device_release,
  620. devm_nvmem_device_match, nvmem);
  621. WARN_ON(ret);
  622. }
  623. EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
  624. /**
  625. * nvmem_device_put() - put alredy got nvmem device
  626. *
  627. * @nvmem: pointer to nvmem device that needs to be released.
  628. */
  629. void nvmem_device_put(struct nvmem_device *nvmem)
  630. {
  631. __nvmem_device_put(nvmem);
  632. }
  633. EXPORT_SYMBOL_GPL(nvmem_device_put);
  634. /**
  635. * devm_nvmem_device_get() - Get nvmem cell of device form a given id
  636. *
  637. * @dev: Device that requests the nvmem device.
  638. * @id: name id for the requested nvmem device.
  639. *
  640. * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
  641. * on success. The nvmem_cell will be freed by the automatically once the
  642. * device is freed.
  643. */
  644. struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
  645. {
  646. struct nvmem_device **ptr, *nvmem;
  647. ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
  648. if (!ptr)
  649. return ERR_PTR(-ENOMEM);
  650. nvmem = nvmem_device_get(dev, id);
  651. if (!IS_ERR(nvmem)) {
  652. *ptr = nvmem;
  653. devres_add(dev, ptr);
  654. } else {
  655. devres_free(ptr);
  656. }
  657. return nvmem;
  658. }
  659. EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
  660. static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
  661. {
  662. struct nvmem_cell *cell = NULL;
  663. struct nvmem_device *nvmem;
  664. nvmem = __nvmem_device_get(NULL, &cell, cell_id);
  665. if (IS_ERR(nvmem))
  666. return ERR_CAST(nvmem);
  667. return cell;
  668. }
  669. #if IS_ENABLED(CONFIG_OF)
  670. /**
  671. * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
  672. *
  673. * @np: Device tree node that uses the nvmem cell.
  674. * @name: nvmem cell name from nvmem-cell-names property, or NULL
  675. * for the cell at index 0 (the lone cell with no accompanying
  676. * nvmem-cell-names property).
  677. *
  678. * Return: Will be an ERR_PTR() on error or a valid pointer
  679. * to a struct nvmem_cell. The nvmem_cell will be freed by the
  680. * nvmem_cell_put().
  681. */
  682. struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
  683. const char *name)
  684. {
  685. struct device_node *cell_np, *nvmem_np;
  686. struct nvmem_cell *cell;
  687. struct nvmem_device *nvmem;
  688. const __be32 *addr;
  689. int rval, len;
  690. int index = 0;
  691. /* if cell name exists, find index to the name */
  692. if (name)
  693. index = of_property_match_string(np, "nvmem-cell-names", name);
  694. cell_np = of_parse_phandle(np, "nvmem-cells", index);
  695. if (!cell_np)
  696. return ERR_PTR(-EINVAL);
  697. nvmem_np = of_get_next_parent(cell_np);
  698. if (!nvmem_np)
  699. return ERR_PTR(-EINVAL);
  700. nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
  701. of_node_put(nvmem_np);
  702. if (IS_ERR(nvmem))
  703. return ERR_CAST(nvmem);
  704. addr = of_get_property(cell_np, "reg", &len);
  705. if (!addr || (len < 2 * sizeof(u32))) {
  706. dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
  707. cell_np);
  708. rval = -EINVAL;
  709. goto err_mem;
  710. }
  711. cell = kzalloc(sizeof(*cell), GFP_KERNEL);
  712. if (!cell) {
  713. rval = -ENOMEM;
  714. goto err_mem;
  715. }
  716. cell->nvmem = nvmem;
  717. cell->offset = be32_to_cpup(addr++);
  718. cell->bytes = be32_to_cpup(addr);
  719. cell->name = cell_np->name;
  720. addr = of_get_property(cell_np, "bits", &len);
  721. if (addr && len == (2 * sizeof(u32))) {
  722. cell->bit_offset = be32_to_cpup(addr++);
  723. cell->nbits = be32_to_cpup(addr);
  724. }
  725. if (cell->nbits)
  726. cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
  727. BITS_PER_BYTE);
  728. if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
  729. dev_err(&nvmem->dev,
  730. "cell %s unaligned to nvmem stride %d\n",
  731. cell->name, nvmem->stride);
  732. rval = -EINVAL;
  733. goto err_sanity;
  734. }
  735. nvmem_cell_add(cell);
  736. return cell;
  737. err_sanity:
  738. kfree(cell);
  739. err_mem:
  740. __nvmem_device_put(nvmem);
  741. return ERR_PTR(rval);
  742. }
  743. EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
  744. #endif
  745. /**
  746. * nvmem_cell_get() - Get nvmem cell of device form a given cell name
  747. *
  748. * @dev: Device that requests the nvmem cell.
  749. * @cell_id: nvmem cell name to get.
  750. *
  751. * Return: Will be an ERR_PTR() on error or a valid pointer
  752. * to a struct nvmem_cell. The nvmem_cell will be freed by the
  753. * nvmem_cell_put().
  754. */
  755. struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
  756. {
  757. struct nvmem_cell *cell;
  758. if (dev->of_node) { /* try dt first */
  759. cell = of_nvmem_cell_get(dev->of_node, cell_id);
  760. if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
  761. return cell;
  762. }
  763. /* NULL cell_id only allowed for device tree; invalid otherwise */
  764. if (!cell_id)
  765. return ERR_PTR(-EINVAL);
  766. return nvmem_cell_get_from_list(cell_id);
  767. }
  768. EXPORT_SYMBOL_GPL(nvmem_cell_get);
  769. static void devm_nvmem_cell_release(struct device *dev, void *res)
  770. {
  771. nvmem_cell_put(*(struct nvmem_cell **)res);
  772. }
  773. /**
  774. * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
  775. *
  776. * @dev: Device that requests the nvmem cell.
  777. * @id: nvmem cell name id to get.
  778. *
  779. * Return: Will be an ERR_PTR() on error or a valid pointer
  780. * to a struct nvmem_cell. The nvmem_cell will be freed by the
  781. * automatically once the device is freed.
  782. */
  783. struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
  784. {
  785. struct nvmem_cell **ptr, *cell;
  786. ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
  787. if (!ptr)
  788. return ERR_PTR(-ENOMEM);
  789. cell = nvmem_cell_get(dev, id);
  790. if (!IS_ERR(cell)) {
  791. *ptr = cell;
  792. devres_add(dev, ptr);
  793. } else {
  794. devres_free(ptr);
  795. }
  796. return cell;
  797. }
  798. EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
  799. static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
  800. {
  801. struct nvmem_cell **c = res;
  802. if (WARN_ON(!c || !*c))
  803. return 0;
  804. return *c == data;
  805. }
  806. /**
  807. * devm_nvmem_cell_put() - Release previously allocated nvmem cell
  808. * from devm_nvmem_cell_get.
  809. *
  810. * @dev: Device that requests the nvmem cell.
  811. * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
  812. */
  813. void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
  814. {
  815. int ret;
  816. ret = devres_release(dev, devm_nvmem_cell_release,
  817. devm_nvmem_cell_match, cell);
  818. WARN_ON(ret);
  819. }
  820. EXPORT_SYMBOL(devm_nvmem_cell_put);
  821. /**
  822. * nvmem_cell_put() - Release previously allocated nvmem cell.
  823. *
  824. * @cell: Previously allocated nvmem cell by nvmem_cell_get().
  825. */
  826. void nvmem_cell_put(struct nvmem_cell *cell)
  827. {
  828. struct nvmem_device *nvmem = cell->nvmem;
  829. __nvmem_device_put(nvmem);
  830. nvmem_cell_drop(cell);
  831. }
  832. EXPORT_SYMBOL_GPL(nvmem_cell_put);
  833. static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
  834. {
  835. u8 *p, *b;
  836. int i, bit_offset = cell->bit_offset;
  837. p = b = buf;
  838. if (bit_offset) {
  839. /* First shift */
  840. *b++ >>= bit_offset;
  841. /* setup rest of the bytes if any */
  842. for (i = 1; i < cell->bytes; i++) {
  843. /* Get bits from next byte and shift them towards msb */
  844. *p |= *b << (BITS_PER_BYTE - bit_offset);
  845. p = b;
  846. *b++ >>= bit_offset;
  847. }
  848. /* result fits in less bytes */
  849. if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
  850. *p-- = 0;
  851. }
  852. /* clear msb bits if any leftover in the last byte */
  853. *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
  854. }
  855. static int __nvmem_cell_read(struct nvmem_device *nvmem,
  856. struct nvmem_cell *cell,
  857. void *buf, size_t *len)
  858. {
  859. int rc;
  860. rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
  861. if (rc)
  862. return rc;
  863. /* shift bits in-place */
  864. if (cell->bit_offset || cell->nbits)
  865. nvmem_shift_read_buffer_in_place(cell, buf);
  866. if (len)
  867. *len = cell->bytes;
  868. return 0;
  869. }
  870. /**
  871. * nvmem_cell_read() - Read a given nvmem cell
  872. *
  873. * @cell: nvmem cell to be read.
  874. * @len: pointer to length of cell which will be populated on successful read;
  875. * can be NULL.
  876. *
  877. * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
  878. * buffer should be freed by the consumer with a kfree().
  879. */
  880. void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
  881. {
  882. struct nvmem_device *nvmem = cell->nvmem;
  883. u8 *buf;
  884. int rc;
  885. if (!nvmem)
  886. return ERR_PTR(-EINVAL);
  887. buf = kzalloc(cell->bytes, GFP_KERNEL);
  888. if (!buf)
  889. return ERR_PTR(-ENOMEM);
  890. rc = __nvmem_cell_read(nvmem, cell, buf, len);
  891. if (rc) {
  892. kfree(buf);
  893. return ERR_PTR(rc);
  894. }
  895. return buf;
  896. }
  897. EXPORT_SYMBOL_GPL(nvmem_cell_read);
  898. static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
  899. u8 *_buf, int len)
  900. {
  901. struct nvmem_device *nvmem = cell->nvmem;
  902. int i, rc, nbits, bit_offset = cell->bit_offset;
  903. u8 v, *p, *buf, *b, pbyte, pbits;
  904. nbits = cell->nbits;
  905. buf = kzalloc(cell->bytes, GFP_KERNEL);
  906. if (!buf)
  907. return ERR_PTR(-ENOMEM);
  908. memcpy(buf, _buf, len);
  909. p = b = buf;
  910. if (bit_offset) {
  911. pbyte = *b;
  912. *b <<= bit_offset;
  913. /* setup the first byte with lsb bits from nvmem */
  914. rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
  915. if (rc)
  916. goto err;
  917. *b++ |= GENMASK(bit_offset - 1, 0) & v;
  918. /* setup rest of the byte if any */
  919. for (i = 1; i < cell->bytes; i++) {
  920. /* Get last byte bits and shift them towards lsb */
  921. pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
  922. pbyte = *b;
  923. p = b;
  924. *b <<= bit_offset;
  925. *b++ |= pbits;
  926. }
  927. }
  928. /* if it's not end on byte boundary */
  929. if ((nbits + bit_offset) % BITS_PER_BYTE) {
  930. /* setup the last byte with msb bits from nvmem */
  931. rc = nvmem_reg_read(nvmem,
  932. cell->offset + cell->bytes - 1, &v, 1);
  933. if (rc)
  934. goto err;
  935. *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
  936. }
  937. return buf;
  938. err:
  939. kfree(buf);
  940. return ERR_PTR(rc);
  941. }
  942. /**
  943. * nvmem_cell_write() - Write to a given nvmem cell
  944. *
  945. * @cell: nvmem cell to be written.
  946. * @buf: Buffer to be written.
  947. * @len: length of buffer to be written to nvmem cell.
  948. *
  949. * Return: length of bytes written or negative on failure.
  950. */
  951. int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
  952. {
  953. struct nvmem_device *nvmem = cell->nvmem;
  954. int rc;
  955. if (!nvmem || nvmem->read_only ||
  956. (cell->bit_offset == 0 && len != cell->bytes))
  957. return -EINVAL;
  958. if (cell->bit_offset || cell->nbits) {
  959. buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
  960. if (IS_ERR(buf))
  961. return PTR_ERR(buf);
  962. }
  963. rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
  964. /* free the tmp buffer */
  965. if (cell->bit_offset || cell->nbits)
  966. kfree(buf);
  967. if (rc)
  968. return rc;
  969. return len;
  970. }
  971. EXPORT_SYMBOL_GPL(nvmem_cell_write);
  972. /**
  973. * nvmem_cell_read_u32() - Read a cell value as an u32
  974. *
  975. * @dev: Device that requests the nvmem cell.
  976. * @cell_id: Name of nvmem cell to read.
  977. * @val: pointer to output value.
  978. *
  979. * Return: 0 on success or negative errno.
  980. */
  981. int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
  982. {
  983. struct nvmem_cell *cell;
  984. void *buf;
  985. size_t len;
  986. cell = nvmem_cell_get(dev, cell_id);
  987. if (IS_ERR(cell))
  988. return PTR_ERR(cell);
  989. buf = nvmem_cell_read(cell, &len);
  990. if (IS_ERR(buf)) {
  991. nvmem_cell_put(cell);
  992. return PTR_ERR(buf);
  993. }
  994. if (len != sizeof(*val)) {
  995. kfree(buf);
  996. nvmem_cell_put(cell);
  997. return -EINVAL;
  998. }
  999. memcpy(val, buf, sizeof(*val));
  1000. kfree(buf);
  1001. nvmem_cell_put(cell);
  1002. return 0;
  1003. }
  1004. EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
  1005. /**
  1006. * nvmem_device_cell_read() - Read a given nvmem device and cell
  1007. *
  1008. * @nvmem: nvmem device to read from.
  1009. * @info: nvmem cell info to be read.
  1010. * @buf: buffer pointer which will be populated on successful read.
  1011. *
  1012. * Return: length of successful bytes read on success and negative
  1013. * error code on error.
  1014. */
  1015. ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
  1016. struct nvmem_cell_info *info, void *buf)
  1017. {
  1018. struct nvmem_cell cell;
  1019. int rc;
  1020. ssize_t len;
  1021. if (!nvmem)
  1022. return -EINVAL;
  1023. rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
  1024. if (rc)
  1025. return rc;
  1026. rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
  1027. if (rc)
  1028. return rc;
  1029. return len;
  1030. }
  1031. EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
  1032. /**
  1033. * nvmem_device_cell_write() - Write cell to a given nvmem device
  1034. *
  1035. * @nvmem: nvmem device to be written to.
  1036. * @info: nvmem cell info to be written.
  1037. * @buf: buffer to be written to cell.
  1038. *
  1039. * Return: length of bytes written or negative error code on failure.
  1040. * */
  1041. int nvmem_device_cell_write(struct nvmem_device *nvmem,
  1042. struct nvmem_cell_info *info, void *buf)
  1043. {
  1044. struct nvmem_cell cell;
  1045. int rc;
  1046. if (!nvmem)
  1047. return -EINVAL;
  1048. rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
  1049. if (rc)
  1050. return rc;
  1051. return nvmem_cell_write(&cell, buf, cell.bytes);
  1052. }
  1053. EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
  1054. /**
  1055. * nvmem_device_read() - Read from a given nvmem device
  1056. *
  1057. * @nvmem: nvmem device to read from.
  1058. * @offset: offset in nvmem device.
  1059. * @bytes: number of bytes to read.
  1060. * @buf: buffer pointer which will be populated on successful read.
  1061. *
  1062. * Return: length of successful bytes read on success and negative
  1063. * error code on error.
  1064. */
  1065. int nvmem_device_read(struct nvmem_device *nvmem,
  1066. unsigned int offset,
  1067. size_t bytes, void *buf)
  1068. {
  1069. int rc;
  1070. if (!nvmem)
  1071. return -EINVAL;
  1072. rc = nvmem_reg_read(nvmem, offset, buf, bytes);
  1073. if (rc)
  1074. return rc;
  1075. return bytes;
  1076. }
  1077. EXPORT_SYMBOL_GPL(nvmem_device_read);
  1078. /**
  1079. * nvmem_device_write() - Write cell to a given nvmem device
  1080. *
  1081. * @nvmem: nvmem device to be written to.
  1082. * @offset: offset in nvmem device.
  1083. * @bytes: number of bytes to write.
  1084. * @buf: buffer to be written.
  1085. *
  1086. * Return: length of bytes written or negative error code on failure.
  1087. * */
  1088. int nvmem_device_write(struct nvmem_device *nvmem,
  1089. unsigned int offset,
  1090. size_t bytes, void *buf)
  1091. {
  1092. int rc;
  1093. if (!nvmem)
  1094. return -EINVAL;
  1095. rc = nvmem_reg_write(nvmem, offset, buf, bytes);
  1096. if (rc)
  1097. return rc;
  1098. return bytes;
  1099. }
  1100. EXPORT_SYMBOL_GPL(nvmem_device_write);
  1101. static int __init nvmem_init(void)
  1102. {
  1103. return bus_register(&nvmem_bus_type);
  1104. }
  1105. static void __exit nvmem_exit(void)
  1106. {
  1107. bus_unregister(&nvmem_bus_type);
  1108. }
  1109. subsys_initcall(nvmem_init);
  1110. module_exit(nvmem_exit);
  1111. MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
  1112. MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
  1113. MODULE_DESCRIPTION("nvmem Driver Core");
  1114. MODULE_LICENSE("GPL v2");