omap2.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * OneNAND driver for OMAP2 / OMAP3
  3. *
  4. * Copyright © 2005-2006 Nokia Corporation
  5. *
  6. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  7. * IRQ and DMA support written by Timo Teras
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License version 2 as published by
  11. * the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; see the file COPYING. If not, write to the Free Software
  20. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. *
  22. */
  23. #include <linux/device.h>
  24. #include <linux/module.h>
  25. #include <linux/mtd/mtd.h>
  26. #include <linux/mtd/onenand.h>
  27. #include <linux/mtd/partitions.h>
  28. #include <linux/of_device.h>
  29. #include <linux/omap-gpmc.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/delay.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/dmaengine.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/gpio/consumer.h>
  38. #include <asm/mach/flash.h>
  39. #define DRIVER_NAME "omap2-onenand"
  40. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  41. struct omap2_onenand {
  42. struct platform_device *pdev;
  43. int gpmc_cs;
  44. unsigned long phys_base;
  45. struct gpio_desc *int_gpiod;
  46. struct mtd_info mtd;
  47. struct onenand_chip onenand;
  48. struct completion irq_done;
  49. struct completion dma_done;
  50. struct dma_chan *dma_chan;
  51. };
  52. static void omap2_onenand_dma_complete_func(void *completion)
  53. {
  54. complete(completion);
  55. }
  56. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  57. {
  58. struct omap2_onenand *c = dev_id;
  59. complete(&c->irq_done);
  60. return IRQ_HANDLED;
  61. }
  62. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  63. {
  64. return readw(c->onenand.base + reg);
  65. }
  66. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  67. int reg)
  68. {
  69. writew(value, c->onenand.base + reg);
  70. }
  71. static int omap2_onenand_set_cfg(struct omap2_onenand *c,
  72. bool sr, bool sw,
  73. int latency, int burst_len)
  74. {
  75. unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
  76. reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
  77. switch (burst_len) {
  78. case 0: /* continuous */
  79. break;
  80. case 4:
  81. reg |= ONENAND_SYS_CFG1_BL_4;
  82. break;
  83. case 8:
  84. reg |= ONENAND_SYS_CFG1_BL_8;
  85. break;
  86. case 16:
  87. reg |= ONENAND_SYS_CFG1_BL_16;
  88. break;
  89. case 32:
  90. reg |= ONENAND_SYS_CFG1_BL_32;
  91. break;
  92. default:
  93. return -EINVAL;
  94. }
  95. if (latency > 5)
  96. reg |= ONENAND_SYS_CFG1_HF;
  97. if (latency > 7)
  98. reg |= ONENAND_SYS_CFG1_VHF;
  99. if (sr)
  100. reg |= ONENAND_SYS_CFG1_SYNC_READ;
  101. if (sw)
  102. reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
  103. write_reg(c, reg, ONENAND_REG_SYS_CFG1);
  104. return 0;
  105. }
  106. static int omap2_onenand_get_freq(int ver)
  107. {
  108. switch ((ver >> 4) & 0xf) {
  109. case 0:
  110. return 40;
  111. case 1:
  112. return 54;
  113. case 2:
  114. return 66;
  115. case 3:
  116. return 83;
  117. case 4:
  118. return 104;
  119. }
  120. return -EINVAL;
  121. }
  122. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  123. {
  124. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  125. msg, state, ctrl, intr);
  126. }
  127. static void wait_warn(char *msg, int state, unsigned int ctrl,
  128. unsigned int intr)
  129. {
  130. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  131. "intr 0x%04x\n", msg, state, ctrl, intr);
  132. }
  133. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  134. {
  135. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  136. struct onenand_chip *this = mtd->priv;
  137. unsigned int intr = 0;
  138. unsigned int ctrl, ctrl_mask;
  139. unsigned long timeout;
  140. u32 syscfg;
  141. if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
  142. state == FL_VERIFYING_ERASE) {
  143. int i = 21;
  144. unsigned int intr_flags = ONENAND_INT_MASTER;
  145. switch (state) {
  146. case FL_RESETING:
  147. intr_flags |= ONENAND_INT_RESET;
  148. break;
  149. case FL_PREPARING_ERASE:
  150. intr_flags |= ONENAND_INT_ERASE;
  151. break;
  152. case FL_VERIFYING_ERASE:
  153. i = 101;
  154. break;
  155. }
  156. while (--i) {
  157. udelay(1);
  158. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  159. if (intr & ONENAND_INT_MASTER)
  160. break;
  161. }
  162. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  163. if (ctrl & ONENAND_CTRL_ERROR) {
  164. wait_err("controller error", state, ctrl, intr);
  165. return -EIO;
  166. }
  167. if ((intr & intr_flags) == intr_flags)
  168. return 0;
  169. /* Continue in wait for interrupt branch */
  170. }
  171. if (state != FL_READING) {
  172. int result;
  173. /* Turn interrupts on */
  174. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  175. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  176. syscfg |= ONENAND_SYS_CFG1_IOBE;
  177. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  178. /* Add a delay to let GPIO settle */
  179. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  180. }
  181. reinit_completion(&c->irq_done);
  182. result = gpiod_get_value(c->int_gpiod);
  183. if (result < 0) {
  184. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  185. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  186. wait_err("gpio error", state, ctrl, intr);
  187. return result;
  188. } else if (result == 0) {
  189. int retry_cnt = 0;
  190. retry:
  191. if (!wait_for_completion_io_timeout(&c->irq_done,
  192. msecs_to_jiffies(20))) {
  193. /* Timeout after 20ms */
  194. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  195. if (ctrl & ONENAND_CTRL_ONGO &&
  196. !this->ongoing) {
  197. /*
  198. * The operation seems to be still going
  199. * so give it some more time.
  200. */
  201. retry_cnt += 1;
  202. if (retry_cnt < 3)
  203. goto retry;
  204. intr = read_reg(c,
  205. ONENAND_REG_INTERRUPT);
  206. wait_err("timeout", state, ctrl, intr);
  207. return -EIO;
  208. }
  209. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  210. if ((intr & ONENAND_INT_MASTER) == 0)
  211. wait_warn("timeout", state, ctrl, intr);
  212. }
  213. }
  214. } else {
  215. int retry_cnt = 0;
  216. /* Turn interrupts off */
  217. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  218. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  219. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  220. timeout = jiffies + msecs_to_jiffies(20);
  221. while (1) {
  222. if (time_before(jiffies, timeout)) {
  223. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  224. if (intr & ONENAND_INT_MASTER)
  225. break;
  226. } else {
  227. /* Timeout after 20ms */
  228. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  229. if (ctrl & ONENAND_CTRL_ONGO) {
  230. /*
  231. * The operation seems to be still going
  232. * so give it some more time.
  233. */
  234. retry_cnt += 1;
  235. if (retry_cnt < 3) {
  236. timeout = jiffies +
  237. msecs_to_jiffies(20);
  238. continue;
  239. }
  240. }
  241. break;
  242. }
  243. }
  244. }
  245. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  246. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  247. if (intr & ONENAND_INT_READ) {
  248. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  249. if (ecc) {
  250. unsigned int addr1, addr8;
  251. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  252. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  253. if (ecc & ONENAND_ECC_2BIT_ALL) {
  254. printk(KERN_ERR "onenand_wait: ECC error = "
  255. "0x%04x, addr1 %#x, addr8 %#x\n",
  256. ecc, addr1, addr8);
  257. mtd->ecc_stats.failed++;
  258. return -EBADMSG;
  259. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  260. printk(KERN_NOTICE "onenand_wait: correctable "
  261. "ECC error = 0x%04x, addr1 %#x, "
  262. "addr8 %#x\n", ecc, addr1, addr8);
  263. mtd->ecc_stats.corrected++;
  264. }
  265. }
  266. } else if (state == FL_READING) {
  267. wait_err("timeout", state, ctrl, intr);
  268. return -EIO;
  269. }
  270. if (ctrl & ONENAND_CTRL_ERROR) {
  271. wait_err("controller error", state, ctrl, intr);
  272. if (ctrl & ONENAND_CTRL_LOCK)
  273. printk(KERN_ERR "onenand_wait: "
  274. "Device is write protected!!!\n");
  275. return -EIO;
  276. }
  277. ctrl_mask = 0xFE9F;
  278. if (this->ongoing)
  279. ctrl_mask &= ~0x8000;
  280. if (ctrl & ctrl_mask)
  281. wait_warn("unexpected controller status", state, ctrl, intr);
  282. return 0;
  283. }
  284. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  285. {
  286. struct onenand_chip *this = mtd->priv;
  287. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  288. if (area == ONENAND_DATARAM)
  289. return this->writesize;
  290. if (area == ONENAND_SPARERAM)
  291. return mtd->oobsize;
  292. }
  293. return 0;
  294. }
  295. static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
  296. dma_addr_t src, dma_addr_t dst,
  297. size_t count)
  298. {
  299. struct dma_async_tx_descriptor *tx;
  300. dma_cookie_t cookie;
  301. tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
  302. if (!tx) {
  303. dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
  304. return -EIO;
  305. }
  306. reinit_completion(&c->dma_done);
  307. tx->callback = omap2_onenand_dma_complete_func;
  308. tx->callback_param = &c->dma_done;
  309. cookie = tx->tx_submit(tx);
  310. if (dma_submit_error(cookie)) {
  311. dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
  312. return -EIO;
  313. }
  314. dma_async_issue_pending(c->dma_chan);
  315. if (!wait_for_completion_io_timeout(&c->dma_done,
  316. msecs_to_jiffies(20))) {
  317. dmaengine_terminate_sync(c->dma_chan);
  318. return -ETIMEDOUT;
  319. }
  320. return 0;
  321. }
  322. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  323. unsigned char *buffer, int offset,
  324. size_t count)
  325. {
  326. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  327. struct onenand_chip *this = mtd->priv;
  328. dma_addr_t dma_src, dma_dst;
  329. int bram_offset;
  330. void *buf = (void *)buffer;
  331. size_t xtra;
  332. int ret;
  333. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  334. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  335. goto out_copy;
  336. /* panic_write() may be in an interrupt context */
  337. if (in_interrupt() || oops_in_progress)
  338. goto out_copy;
  339. if (buf >= high_memory) {
  340. struct page *p1;
  341. if (((size_t)buf & PAGE_MASK) !=
  342. ((size_t)(buf + count - 1) & PAGE_MASK))
  343. goto out_copy;
  344. p1 = vmalloc_to_page(buf);
  345. if (!p1)
  346. goto out_copy;
  347. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  348. }
  349. xtra = count & 3;
  350. if (xtra) {
  351. count -= xtra;
  352. memcpy(buf + count, this->base + bram_offset + count, xtra);
  353. }
  354. dma_src = c->phys_base + bram_offset;
  355. dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
  356. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  357. dev_err(&c->pdev->dev,
  358. "Couldn't DMA map a %d byte buffer\n",
  359. count);
  360. goto out_copy;
  361. }
  362. ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
  363. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  364. if (ret) {
  365. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  366. goto out_copy;
  367. }
  368. return 0;
  369. out_copy:
  370. memcpy(buf, this->base + bram_offset, count);
  371. return 0;
  372. }
  373. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  374. const unsigned char *buffer,
  375. int offset, size_t count)
  376. {
  377. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  378. struct onenand_chip *this = mtd->priv;
  379. dma_addr_t dma_src, dma_dst;
  380. int bram_offset;
  381. void *buf = (void *)buffer;
  382. int ret;
  383. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  384. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  385. goto out_copy;
  386. /* panic_write() may be in an interrupt context */
  387. if (in_interrupt() || oops_in_progress)
  388. goto out_copy;
  389. if (buf >= high_memory) {
  390. struct page *p1;
  391. if (((size_t)buf & PAGE_MASK) !=
  392. ((size_t)(buf + count - 1) & PAGE_MASK))
  393. goto out_copy;
  394. p1 = vmalloc_to_page(buf);
  395. if (!p1)
  396. goto out_copy;
  397. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  398. }
  399. dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
  400. dma_dst = c->phys_base + bram_offset;
  401. if (dma_mapping_error(&c->pdev->dev, dma_src)) {
  402. dev_err(&c->pdev->dev,
  403. "Couldn't DMA map a %d byte buffer\n",
  404. count);
  405. return -1;
  406. }
  407. ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
  408. dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
  409. if (ret) {
  410. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  411. goto out_copy;
  412. }
  413. return 0;
  414. out_copy:
  415. memcpy(this->base + bram_offset, buf, count);
  416. return 0;
  417. }
  418. static void omap2_onenand_shutdown(struct platform_device *pdev)
  419. {
  420. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  421. /* With certain content in the buffer RAM, the OMAP boot ROM code
  422. * can recognize the flash chip incorrectly. Zero it out before
  423. * soft reset.
  424. */
  425. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  426. }
  427. static int omap2_onenand_probe(struct platform_device *pdev)
  428. {
  429. u32 val;
  430. dma_cap_mask_t mask;
  431. int freq, latency, r;
  432. struct resource *res;
  433. struct omap2_onenand *c;
  434. struct gpmc_onenand_info info;
  435. struct device *dev = &pdev->dev;
  436. struct device_node *np = dev->of_node;
  437. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  438. if (!res) {
  439. dev_err(dev, "error getting memory resource\n");
  440. return -EINVAL;
  441. }
  442. r = of_property_read_u32(np, "reg", &val);
  443. if (r) {
  444. dev_err(dev, "reg not found in DT\n");
  445. return r;
  446. }
  447. c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
  448. if (!c)
  449. return -ENOMEM;
  450. init_completion(&c->irq_done);
  451. init_completion(&c->dma_done);
  452. c->gpmc_cs = val;
  453. c->phys_base = res->start;
  454. c->onenand.base = devm_ioremap_resource(dev, res);
  455. if (IS_ERR(c->onenand.base))
  456. return PTR_ERR(c->onenand.base);
  457. c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
  458. if (IS_ERR(c->int_gpiod)) {
  459. r = PTR_ERR(c->int_gpiod);
  460. /* Just try again if this happens */
  461. if (r != -EPROBE_DEFER)
  462. dev_err(dev, "error getting gpio: %d\n", r);
  463. return r;
  464. }
  465. if (c->int_gpiod) {
  466. r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
  467. omap2_onenand_interrupt,
  468. IRQF_TRIGGER_RISING, "onenand", c);
  469. if (r)
  470. return r;
  471. c->onenand.wait = omap2_onenand_wait;
  472. }
  473. dma_cap_zero(mask);
  474. dma_cap_set(DMA_MEMCPY, mask);
  475. c->dma_chan = dma_request_channel(mask, NULL, NULL);
  476. if (c->dma_chan) {
  477. c->onenand.read_bufferram = omap2_onenand_read_bufferram;
  478. c->onenand.write_bufferram = omap2_onenand_write_bufferram;
  479. }
  480. c->pdev = pdev;
  481. c->mtd.priv = &c->onenand;
  482. c->mtd.dev.parent = dev;
  483. mtd_set_of_node(&c->mtd, dev->of_node);
  484. dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
  485. c->gpmc_cs, c->phys_base, c->onenand.base,
  486. c->dma_chan ? "DMA" : "PIO");
  487. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  488. goto err_release_dma;
  489. freq = omap2_onenand_get_freq(c->onenand.version_id);
  490. if (freq > 0) {
  491. switch (freq) {
  492. case 104:
  493. latency = 7;
  494. break;
  495. case 83:
  496. latency = 6;
  497. break;
  498. case 66:
  499. latency = 5;
  500. break;
  501. case 56:
  502. latency = 4;
  503. break;
  504. default: /* 40 MHz or lower */
  505. latency = 3;
  506. break;
  507. }
  508. r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
  509. freq, latency, &info);
  510. if (r)
  511. goto err_release_onenand;
  512. r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
  513. latency, info.burst_len);
  514. if (r)
  515. goto err_release_onenand;
  516. if (info.sync_read || info.sync_write)
  517. dev_info(dev, "optimized timings for %d MHz\n", freq);
  518. }
  519. r = mtd_device_register(&c->mtd, NULL, 0);
  520. if (r)
  521. goto err_release_onenand;
  522. platform_set_drvdata(pdev, c);
  523. return 0;
  524. err_release_onenand:
  525. onenand_release(&c->mtd);
  526. err_release_dma:
  527. if (c->dma_chan)
  528. dma_release_channel(c->dma_chan);
  529. return r;
  530. }
  531. static int omap2_onenand_remove(struct platform_device *pdev)
  532. {
  533. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  534. onenand_release(&c->mtd);
  535. if (c->dma_chan)
  536. dma_release_channel(c->dma_chan);
  537. omap2_onenand_shutdown(pdev);
  538. return 0;
  539. }
  540. static const struct of_device_id omap2_onenand_id_table[] = {
  541. { .compatible = "ti,omap2-onenand", },
  542. {},
  543. };
  544. MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
  545. static struct platform_driver omap2_onenand_driver = {
  546. .probe = omap2_onenand_probe,
  547. .remove = omap2_onenand_remove,
  548. .shutdown = omap2_onenand_shutdown,
  549. .driver = {
  550. .name = DRIVER_NAME,
  551. .of_match_table = omap2_onenand_id_table,
  552. },
  553. };
  554. module_platform_driver(omap2_onenand_driver);
  555. MODULE_ALIAS("platform:" DRIVER_NAME);
  556. MODULE_LICENSE("GPL");
  557. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  558. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");