at91-sama5d2_adc.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280
  1. /*
  2. * Atmel ADC driver for SAMA5D2 devices and compatible.
  3. *
  4. * Copyright (C) 2015 Atmel,
  5. * 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/bitops.h>
  17. #include <linux/clk.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/io.h>
  22. #include <linux/module.h>
  23. #include <linux/of_device.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/sched.h>
  26. #include <linux/wait.h>
  27. #include <linux/iio/iio.h>
  28. #include <linux/iio/sysfs.h>
  29. #include <linux/iio/buffer.h>
  30. #include <linux/iio/trigger.h>
  31. #include <linux/iio/trigger_consumer.h>
  32. #include <linux/iio/triggered_buffer.h>
  33. #include <linux/pinctrl/consumer.h>
  34. #include <linux/regulator/consumer.h>
  35. /* Control Register */
  36. #define AT91_SAMA5D2_CR 0x00
  37. /* Software Reset */
  38. #define AT91_SAMA5D2_CR_SWRST BIT(0)
  39. /* Start Conversion */
  40. #define AT91_SAMA5D2_CR_START BIT(1)
  41. /* Touchscreen Calibration */
  42. #define AT91_SAMA5D2_CR_TSCALIB BIT(2)
  43. /* Comparison Restart */
  44. #define AT91_SAMA5D2_CR_CMPRST BIT(4)
  45. /* Mode Register */
  46. #define AT91_SAMA5D2_MR 0x04
  47. /* Trigger Selection */
  48. #define AT91_SAMA5D2_MR_TRGSEL(v) ((v) << 1)
  49. /* ADTRG */
  50. #define AT91_SAMA5D2_MR_TRGSEL_TRIG0 0
  51. /* TIOA0 */
  52. #define AT91_SAMA5D2_MR_TRGSEL_TRIG1 1
  53. /* TIOA1 */
  54. #define AT91_SAMA5D2_MR_TRGSEL_TRIG2 2
  55. /* TIOA2 */
  56. #define AT91_SAMA5D2_MR_TRGSEL_TRIG3 3
  57. /* PWM event line 0 */
  58. #define AT91_SAMA5D2_MR_TRGSEL_TRIG4 4
  59. /* PWM event line 1 */
  60. #define AT91_SAMA5D2_MR_TRGSEL_TRIG5 5
  61. /* TIOA3 */
  62. #define AT91_SAMA5D2_MR_TRGSEL_TRIG6 6
  63. /* RTCOUT0 */
  64. #define AT91_SAMA5D2_MR_TRGSEL_TRIG7 7
  65. /* Sleep Mode */
  66. #define AT91_SAMA5D2_MR_SLEEP BIT(5)
  67. /* Fast Wake Up */
  68. #define AT91_SAMA5D2_MR_FWUP BIT(6)
  69. /* Prescaler Rate Selection */
  70. #define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
  71. #define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8
  72. #define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff
  73. #define AT91_SAMA5D2_MR_PRESCAL_MASK GENMASK(15, 8)
  74. /* Startup Time */
  75. #define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
  76. #define AT91_SAMA5D2_MR_STARTUP_MASK GENMASK(19, 16)
  77. /* Analog Change */
  78. #define AT91_SAMA5D2_MR_ANACH BIT(23)
  79. /* Tracking Time */
  80. #define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
  81. #define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
  82. /* Transfer Time */
  83. #define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
  84. #define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
  85. /* Use Sequence Enable */
  86. #define AT91_SAMA5D2_MR_USEQ BIT(31)
  87. /* Channel Sequence Register 1 */
  88. #define AT91_SAMA5D2_SEQR1 0x08
  89. /* Channel Sequence Register 2 */
  90. #define AT91_SAMA5D2_SEQR2 0x0c
  91. /* Channel Enable Register */
  92. #define AT91_SAMA5D2_CHER 0x10
  93. /* Channel Disable Register */
  94. #define AT91_SAMA5D2_CHDR 0x14
  95. /* Channel Status Register */
  96. #define AT91_SAMA5D2_CHSR 0x18
  97. /* Last Converted Data Register */
  98. #define AT91_SAMA5D2_LCDR 0x20
  99. /* Interrupt Enable Register */
  100. #define AT91_SAMA5D2_IER 0x24
  101. /* Interrupt Enable Register - general overrun error */
  102. #define AT91_SAMA5D2_IER_GOVRE BIT(25)
  103. /* Interrupt Disable Register */
  104. #define AT91_SAMA5D2_IDR 0x28
  105. /* Interrupt Mask Register */
  106. #define AT91_SAMA5D2_IMR 0x2c
  107. /* Interrupt Status Register */
  108. #define AT91_SAMA5D2_ISR 0x30
  109. /* Last Channel Trigger Mode Register */
  110. #define AT91_SAMA5D2_LCTMR 0x34
  111. /* Last Channel Compare Window Register */
  112. #define AT91_SAMA5D2_LCCWR 0x38
  113. /* Overrun Status Register */
  114. #define AT91_SAMA5D2_OVER 0x3c
  115. /* Extended Mode Register */
  116. #define AT91_SAMA5D2_EMR 0x40
  117. /* Compare Window Register */
  118. #define AT91_SAMA5D2_CWR 0x44
  119. /* Channel Gain Register */
  120. #define AT91_SAMA5D2_CGR 0x48
  121. /* Channel Offset Register */
  122. #define AT91_SAMA5D2_COR 0x4c
  123. #define AT91_SAMA5D2_COR_DIFF_OFFSET 16
  124. /* Channel Data Register 0 */
  125. #define AT91_SAMA5D2_CDR0 0x50
  126. /* Analog Control Register */
  127. #define AT91_SAMA5D2_ACR 0x94
  128. /* Touchscreen Mode Register */
  129. #define AT91_SAMA5D2_TSMR 0xb0
  130. /* Touchscreen X Position Register */
  131. #define AT91_SAMA5D2_XPOSR 0xb4
  132. /* Touchscreen Y Position Register */
  133. #define AT91_SAMA5D2_YPOSR 0xb8
  134. /* Touchscreen Pressure Register */
  135. #define AT91_SAMA5D2_PRESSR 0xbc
  136. /* Trigger Register */
  137. #define AT91_SAMA5D2_TRGR 0xc0
  138. /* Mask for TRGMOD field of TRGR register */
  139. #define AT91_SAMA5D2_TRGR_TRGMOD_MASK GENMASK(2, 0)
  140. /* No trigger, only software trigger can start conversions */
  141. #define AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER 0
  142. /* Trigger Mode external trigger rising edge */
  143. #define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE 1
  144. /* Trigger Mode external trigger falling edge */
  145. #define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL 2
  146. /* Trigger Mode external trigger any edge */
  147. #define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY 3
  148. /* Correction Select Register */
  149. #define AT91_SAMA5D2_COSR 0xd0
  150. /* Correction Value Register */
  151. #define AT91_SAMA5D2_CVR 0xd4
  152. /* Channel Error Correction Register */
  153. #define AT91_SAMA5D2_CECR 0xd8
  154. /* Write Protection Mode Register */
  155. #define AT91_SAMA5D2_WPMR 0xe4
  156. /* Write Protection Status Register */
  157. #define AT91_SAMA5D2_WPSR 0xe8
  158. /* Version Register */
  159. #define AT91_SAMA5D2_VERSION 0xfc
  160. #define AT91_SAMA5D2_HW_TRIG_CNT 3
  161. #define AT91_SAMA5D2_SINGLE_CHAN_CNT 12
  162. #define AT91_SAMA5D2_DIFF_CHAN_CNT 6
  163. /*
  164. * Maximum number of bytes to hold conversion from all channels
  165. * without the timestamp.
  166. */
  167. #define AT91_BUFFER_MAX_CONVERSION_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \
  168. AT91_SAMA5D2_DIFF_CHAN_CNT) * 2)
  169. /* This total must also include the timestamp */
  170. #define AT91_BUFFER_MAX_BYTES (AT91_BUFFER_MAX_CONVERSION_BYTES + 8)
  171. #define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2)
  172. #define AT91_HWFIFO_MAX_SIZE_STR "128"
  173. #define AT91_HWFIFO_MAX_SIZE 128
  174. #define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
  175. { \
  176. .type = IIO_VOLTAGE, \
  177. .channel = num, \
  178. .address = addr, \
  179. .scan_index = num, \
  180. .scan_type = { \
  181. .sign = 'u', \
  182. .realbits = 12, \
  183. .storagebits = 16, \
  184. }, \
  185. .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
  186. .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
  187. .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
  188. .datasheet_name = "CH"#num, \
  189. .indexed = 1, \
  190. }
  191. #define AT91_SAMA5D2_CHAN_DIFF(num, num2, addr) \
  192. { \
  193. .type = IIO_VOLTAGE, \
  194. .differential = 1, \
  195. .channel = num, \
  196. .channel2 = num2, \
  197. .address = addr, \
  198. .scan_index = num + AT91_SAMA5D2_SINGLE_CHAN_CNT, \
  199. .scan_type = { \
  200. .sign = 's', \
  201. .realbits = 12, \
  202. .storagebits = 16, \
  203. }, \
  204. .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
  205. .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
  206. .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
  207. .datasheet_name = "CH"#num"-CH"#num2, \
  208. .indexed = 1, \
  209. }
  210. #define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
  211. #define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
  212. struct at91_adc_soc_info {
  213. unsigned startup_time;
  214. unsigned min_sample_rate;
  215. unsigned max_sample_rate;
  216. };
  217. struct at91_adc_trigger {
  218. char *name;
  219. unsigned int trgmod_value;
  220. unsigned int edge_type;
  221. bool hw_trig;
  222. };
  223. /**
  224. * at91_adc_dma - at91-sama5d2 dma information struct
  225. * @dma_chan: the dma channel acquired
  226. * @rx_buf: dma coherent allocated area
  227. * @rx_dma_buf: dma handler for the buffer
  228. * @phys_addr: physical address of the ADC base register
  229. * @buf_idx: index inside the dma buffer where reading was last done
  230. * @rx_buf_sz: size of buffer used by DMA operation
  231. * @watermark: number of conversions to copy before DMA triggers irq
  232. * @dma_ts: hold the start timestamp of dma operation
  233. */
  234. struct at91_adc_dma {
  235. struct dma_chan *dma_chan;
  236. u8 *rx_buf;
  237. dma_addr_t rx_dma_buf;
  238. phys_addr_t phys_addr;
  239. int buf_idx;
  240. int rx_buf_sz;
  241. int watermark;
  242. s64 dma_ts;
  243. };
  244. struct at91_adc_state {
  245. void __iomem *base;
  246. int irq;
  247. struct clk *per_clk;
  248. struct regulator *reg;
  249. struct regulator *vref;
  250. int vref_uv;
  251. struct iio_trigger *trig;
  252. const struct at91_adc_trigger *selected_trig;
  253. const struct iio_chan_spec *chan;
  254. bool conversion_done;
  255. u32 conversion_value;
  256. struct at91_adc_soc_info soc_info;
  257. wait_queue_head_t wq_data_available;
  258. struct at91_adc_dma dma_st;
  259. u16 buffer[AT91_BUFFER_MAX_HWORDS];
  260. /*
  261. * lock to prevent concurrent 'single conversion' requests through
  262. * sysfs.
  263. */
  264. struct mutex lock;
  265. };
  266. static const struct at91_adc_trigger at91_adc_trigger_list[] = {
  267. {
  268. .name = "external_rising",
  269. .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
  270. .edge_type = IRQ_TYPE_EDGE_RISING,
  271. .hw_trig = true,
  272. },
  273. {
  274. .name = "external_falling",
  275. .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
  276. .edge_type = IRQ_TYPE_EDGE_FALLING,
  277. .hw_trig = true,
  278. },
  279. {
  280. .name = "external_any",
  281. .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
  282. .edge_type = IRQ_TYPE_EDGE_BOTH,
  283. .hw_trig = true,
  284. },
  285. {
  286. .name = "software",
  287. .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER,
  288. .edge_type = IRQ_TYPE_NONE,
  289. .hw_trig = false,
  290. },
  291. };
  292. static const struct iio_chan_spec at91_adc_channels[] = {
  293. AT91_SAMA5D2_CHAN_SINGLE(0, 0x50),
  294. AT91_SAMA5D2_CHAN_SINGLE(1, 0x54),
  295. AT91_SAMA5D2_CHAN_SINGLE(2, 0x58),
  296. AT91_SAMA5D2_CHAN_SINGLE(3, 0x5c),
  297. AT91_SAMA5D2_CHAN_SINGLE(4, 0x60),
  298. AT91_SAMA5D2_CHAN_SINGLE(5, 0x64),
  299. AT91_SAMA5D2_CHAN_SINGLE(6, 0x68),
  300. AT91_SAMA5D2_CHAN_SINGLE(7, 0x6c),
  301. AT91_SAMA5D2_CHAN_SINGLE(8, 0x70),
  302. AT91_SAMA5D2_CHAN_SINGLE(9, 0x74),
  303. AT91_SAMA5D2_CHAN_SINGLE(10, 0x78),
  304. AT91_SAMA5D2_CHAN_SINGLE(11, 0x7c),
  305. AT91_SAMA5D2_CHAN_DIFF(0, 1, 0x50),
  306. AT91_SAMA5D2_CHAN_DIFF(2, 3, 0x58),
  307. AT91_SAMA5D2_CHAN_DIFF(4, 5, 0x60),
  308. AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
  309. AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
  310. AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
  311. IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_SINGLE_CHAN_CNT
  312. + AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
  313. };
  314. static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
  315. {
  316. struct iio_dev *indio = iio_trigger_get_drvdata(trig);
  317. struct at91_adc_state *st = iio_priv(indio);
  318. u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR);
  319. u8 bit;
  320. /* clear TRGMOD */
  321. status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK;
  322. if (state)
  323. status |= st->selected_trig->trgmod_value;
  324. /* set/unset hw trigger */
  325. at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
  326. for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
  327. struct iio_chan_spec const *chan = indio->channels + bit;
  328. if (state) {
  329. at91_adc_writel(st, AT91_SAMA5D2_CHER,
  330. BIT(chan->channel));
  331. /* enable irq only if not using DMA */
  332. if (!st->dma_st.dma_chan) {
  333. at91_adc_writel(st, AT91_SAMA5D2_IER,
  334. BIT(chan->channel));
  335. }
  336. } else {
  337. /* disable irq only if not using DMA */
  338. if (!st->dma_st.dma_chan) {
  339. at91_adc_writel(st, AT91_SAMA5D2_IDR,
  340. BIT(chan->channel));
  341. }
  342. at91_adc_writel(st, AT91_SAMA5D2_CHDR,
  343. BIT(chan->channel));
  344. }
  345. }
  346. return 0;
  347. }
  348. static int at91_adc_reenable_trigger(struct iio_trigger *trig)
  349. {
  350. struct iio_dev *indio = iio_trigger_get_drvdata(trig);
  351. struct at91_adc_state *st = iio_priv(indio);
  352. /* if we are using DMA, we must not reenable irq after each trigger */
  353. if (st->dma_st.dma_chan)
  354. return 0;
  355. enable_irq(st->irq);
  356. /* Needed to ACK the DRDY interruption */
  357. at91_adc_readl(st, AT91_SAMA5D2_LCDR);
  358. return 0;
  359. }
  360. static const struct iio_trigger_ops at91_adc_trigger_ops = {
  361. .set_trigger_state = &at91_adc_configure_trigger,
  362. .try_reenable = &at91_adc_reenable_trigger,
  363. .validate_device = iio_trigger_validate_own_device,
  364. };
  365. static int at91_adc_dma_size_done(struct at91_adc_state *st)
  366. {
  367. struct dma_tx_state state;
  368. enum dma_status status;
  369. int i, size;
  370. status = dmaengine_tx_status(st->dma_st.dma_chan,
  371. st->dma_st.dma_chan->cookie,
  372. &state);
  373. if (status != DMA_IN_PROGRESS)
  374. return 0;
  375. /* Transferred length is size in bytes from end of buffer */
  376. i = st->dma_st.rx_buf_sz - state.residue;
  377. /* Return available bytes */
  378. if (i >= st->dma_st.buf_idx)
  379. size = i - st->dma_st.buf_idx;
  380. else
  381. size = st->dma_st.rx_buf_sz + i - st->dma_st.buf_idx;
  382. return size;
  383. }
  384. static void at91_dma_buffer_done(void *data)
  385. {
  386. struct iio_dev *indio_dev = data;
  387. iio_trigger_poll_chained(indio_dev->trig);
  388. }
  389. static int at91_adc_dma_start(struct iio_dev *indio_dev)
  390. {
  391. struct at91_adc_state *st = iio_priv(indio_dev);
  392. struct dma_async_tx_descriptor *desc;
  393. dma_cookie_t cookie;
  394. int ret;
  395. u8 bit;
  396. if (!st->dma_st.dma_chan)
  397. return 0;
  398. /* we start a new DMA, so set buffer index to start */
  399. st->dma_st.buf_idx = 0;
  400. /*
  401. * compute buffer size w.r.t. watermark and enabled channels.
  402. * scan_bytes is aligned so we need an exact size for DMA
  403. */
  404. st->dma_st.rx_buf_sz = 0;
  405. for_each_set_bit(bit, indio_dev->active_scan_mask,
  406. indio_dev->num_channels) {
  407. struct iio_chan_spec const *chan = indio_dev->channels + bit;
  408. st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
  409. }
  410. st->dma_st.rx_buf_sz *= st->dma_st.watermark;
  411. /* Prepare a DMA cyclic transaction */
  412. desc = dmaengine_prep_dma_cyclic(st->dma_st.dma_chan,
  413. st->dma_st.rx_dma_buf,
  414. st->dma_st.rx_buf_sz,
  415. st->dma_st.rx_buf_sz / 2,
  416. DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
  417. if (!desc) {
  418. dev_err(&indio_dev->dev, "cannot prepare DMA cyclic\n");
  419. return -EBUSY;
  420. }
  421. desc->callback = at91_dma_buffer_done;
  422. desc->callback_param = indio_dev;
  423. cookie = dmaengine_submit(desc);
  424. ret = dma_submit_error(cookie);
  425. if (ret) {
  426. dev_err(&indio_dev->dev, "cannot submit DMA cyclic\n");
  427. dmaengine_terminate_async(st->dma_st.dma_chan);
  428. return ret;
  429. }
  430. /* enable general overrun error signaling */
  431. at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_GOVRE);
  432. /* Issue pending DMA requests */
  433. dma_async_issue_pending(st->dma_st.dma_chan);
  434. /* consider current time as DMA start time for timestamps */
  435. st->dma_st.dma_ts = iio_get_time_ns(indio_dev);
  436. dev_dbg(&indio_dev->dev, "DMA cyclic started\n");
  437. return 0;
  438. }
  439. static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
  440. {
  441. int ret;
  442. ret = at91_adc_dma_start(indio_dev);
  443. if (ret) {
  444. dev_err(&indio_dev->dev, "buffer postenable failed\n");
  445. return ret;
  446. }
  447. return iio_triggered_buffer_postenable(indio_dev);
  448. }
  449. static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
  450. {
  451. struct at91_adc_state *st = iio_priv(indio_dev);
  452. int ret;
  453. u8 bit;
  454. ret = iio_triggered_buffer_predisable(indio_dev);
  455. if (ret < 0)
  456. dev_err(&indio_dev->dev, "buffer predisable failed\n");
  457. if (!st->dma_st.dma_chan)
  458. return ret;
  459. /* if we are using DMA we must clear registers and end DMA */
  460. dmaengine_terminate_sync(st->dma_st.dma_chan);
  461. /*
  462. * For each enabled channel we must read the last converted value
  463. * to clear EOC status and not get a possible interrupt later.
  464. * This value is being read by DMA from LCDR anyway
  465. */
  466. for_each_set_bit(bit, indio_dev->active_scan_mask,
  467. indio_dev->num_channels) {
  468. struct iio_chan_spec const *chan = indio_dev->channels + bit;
  469. if (st->dma_st.dma_chan)
  470. at91_adc_readl(st, chan->address);
  471. }
  472. /* read overflow register to clear possible overflow status */
  473. at91_adc_readl(st, AT91_SAMA5D2_OVER);
  474. return ret;
  475. }
  476. static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
  477. .postenable = &at91_adc_buffer_postenable,
  478. .predisable = &at91_adc_buffer_predisable,
  479. };
  480. static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
  481. char *trigger_name)
  482. {
  483. struct iio_trigger *trig;
  484. int ret;
  485. trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
  486. indio->id, trigger_name);
  487. if (!trig)
  488. return NULL;
  489. trig->dev.parent = indio->dev.parent;
  490. iio_trigger_set_drvdata(trig, indio);
  491. trig->ops = &at91_adc_trigger_ops;
  492. ret = devm_iio_trigger_register(&indio->dev, trig);
  493. if (ret)
  494. return ERR_PTR(ret);
  495. return trig;
  496. }
  497. static int at91_adc_trigger_init(struct iio_dev *indio)
  498. {
  499. struct at91_adc_state *st = iio_priv(indio);
  500. st->trig = at91_adc_allocate_trigger(indio, st->selected_trig->name);
  501. if (IS_ERR(st->trig)) {
  502. dev_err(&indio->dev,
  503. "could not allocate trigger\n");
  504. return PTR_ERR(st->trig);
  505. }
  506. return 0;
  507. }
  508. static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
  509. struct iio_poll_func *pf)
  510. {
  511. struct at91_adc_state *st = iio_priv(indio_dev);
  512. int i = 0;
  513. u8 bit;
  514. for_each_set_bit(bit, indio_dev->active_scan_mask,
  515. indio_dev->num_channels) {
  516. struct iio_chan_spec const *chan = indio_dev->channels + bit;
  517. st->buffer[i] = at91_adc_readl(st, chan->address);
  518. i++;
  519. }
  520. iio_push_to_buffers_with_timestamp(indio_dev, st->buffer,
  521. pf->timestamp);
  522. }
  523. static void at91_adc_trigger_handler_dma(struct iio_dev *indio_dev)
  524. {
  525. struct at91_adc_state *st = iio_priv(indio_dev);
  526. int transferred_len = at91_adc_dma_size_done(st);
  527. s64 ns = iio_get_time_ns(indio_dev);
  528. s64 interval;
  529. int sample_index = 0, sample_count, sample_size;
  530. u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
  531. /* if we reached this point, we cannot sample faster */
  532. if (status & AT91_SAMA5D2_IER_GOVRE)
  533. pr_info_ratelimited("%s: conversion overrun detected\n",
  534. indio_dev->name);
  535. sample_size = div_s64(st->dma_st.rx_buf_sz, st->dma_st.watermark);
  536. sample_count = div_s64(transferred_len, sample_size);
  537. /*
  538. * interval between samples is total time since last transfer handling
  539. * divided by the number of samples (total size divided by sample size)
  540. */
  541. interval = div_s64((ns - st->dma_st.dma_ts), sample_count);
  542. while (transferred_len >= sample_size) {
  543. iio_push_to_buffers_with_timestamp(indio_dev,
  544. (st->dma_st.rx_buf + st->dma_st.buf_idx),
  545. (st->dma_st.dma_ts + interval * sample_index));
  546. /* adjust remaining length */
  547. transferred_len -= sample_size;
  548. /* adjust buffer index */
  549. st->dma_st.buf_idx += sample_size;
  550. /* in case of reaching end of buffer, reset index */
  551. if (st->dma_st.buf_idx >= st->dma_st.rx_buf_sz)
  552. st->dma_st.buf_idx = 0;
  553. sample_index++;
  554. }
  555. /* adjust saved time for next transfer handling */
  556. st->dma_st.dma_ts = iio_get_time_ns(indio_dev);
  557. }
  558. static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
  559. {
  560. struct iio_poll_func *pf = p;
  561. struct iio_dev *indio_dev = pf->indio_dev;
  562. struct at91_adc_state *st = iio_priv(indio_dev);
  563. if (st->dma_st.dma_chan)
  564. at91_adc_trigger_handler_dma(indio_dev);
  565. else
  566. at91_adc_trigger_handler_nodma(indio_dev, pf);
  567. iio_trigger_notify_done(indio_dev->trig);
  568. return IRQ_HANDLED;
  569. }
  570. static int at91_adc_buffer_init(struct iio_dev *indio)
  571. {
  572. return devm_iio_triggered_buffer_setup(&indio->dev, indio,
  573. &iio_pollfunc_store_time,
  574. &at91_adc_trigger_handler, &at91_buffer_setup_ops);
  575. }
  576. static unsigned at91_adc_startup_time(unsigned startup_time_min,
  577. unsigned adc_clk_khz)
  578. {
  579. static const unsigned int startup_lookup[] = {
  580. 0, 8, 16, 24,
  581. 64, 80, 96, 112,
  582. 512, 576, 640, 704,
  583. 768, 832, 896, 960
  584. };
  585. unsigned ticks_min, i;
  586. /*
  587. * Since the adc frequency is checked before, there is no reason
  588. * to not meet the startup time constraint.
  589. */
  590. ticks_min = startup_time_min * adc_clk_khz / 1000;
  591. for (i = 0; i < ARRAY_SIZE(startup_lookup); i++)
  592. if (startup_lookup[i] > ticks_min)
  593. break;
  594. return i;
  595. }
  596. static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
  597. {
  598. struct iio_dev *indio_dev = iio_priv_to_dev(st);
  599. unsigned f_per, prescal, startup, mr;
  600. f_per = clk_get_rate(st->per_clk);
  601. prescal = (f_per / (2 * freq)) - 1;
  602. startup = at91_adc_startup_time(st->soc_info.startup_time,
  603. freq / 1000);
  604. mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
  605. mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
  606. mr |= AT91_SAMA5D2_MR_STARTUP(startup);
  607. mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
  608. at91_adc_writel(st, AT91_SAMA5D2_MR, mr);
  609. dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
  610. freq, startup, prescal);
  611. }
  612. static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
  613. {
  614. unsigned f_adc, f_per = clk_get_rate(st->per_clk);
  615. unsigned mr, prescal;
  616. mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
  617. prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET)
  618. & AT91_SAMA5D2_MR_PRESCAL_MAX;
  619. f_adc = f_per / (2 * (prescal + 1));
  620. return f_adc;
  621. }
  622. static irqreturn_t at91_adc_interrupt(int irq, void *private)
  623. {
  624. struct iio_dev *indio = private;
  625. struct at91_adc_state *st = iio_priv(indio);
  626. u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
  627. u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
  628. if (!(status & imr))
  629. return IRQ_NONE;
  630. if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
  631. disable_irq_nosync(irq);
  632. iio_trigger_poll(indio->trig);
  633. } else if (iio_buffer_enabled(indio) && st->dma_st.dma_chan) {
  634. disable_irq_nosync(irq);
  635. WARN(true, "Unexpected irq occurred\n");
  636. } else if (!iio_buffer_enabled(indio)) {
  637. st->conversion_value = at91_adc_readl(st, st->chan->address);
  638. st->conversion_done = true;
  639. wake_up_interruptible(&st->wq_data_available);
  640. }
  641. return IRQ_HANDLED;
  642. }
  643. static int at91_adc_read_raw(struct iio_dev *indio_dev,
  644. struct iio_chan_spec const *chan,
  645. int *val, int *val2, long mask)
  646. {
  647. struct at91_adc_state *st = iio_priv(indio_dev);
  648. u32 cor = 0;
  649. int ret;
  650. switch (mask) {
  651. case IIO_CHAN_INFO_RAW:
  652. /* we cannot use software trigger if hw trigger enabled */
  653. ret = iio_device_claim_direct_mode(indio_dev);
  654. if (ret)
  655. return ret;
  656. mutex_lock(&st->lock);
  657. st->chan = chan;
  658. if (chan->differential)
  659. cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
  660. AT91_SAMA5D2_COR_DIFF_OFFSET;
  661. at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
  662. at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
  663. at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
  664. at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
  665. ret = wait_event_interruptible_timeout(st->wq_data_available,
  666. st->conversion_done,
  667. msecs_to_jiffies(1000));
  668. if (ret == 0)
  669. ret = -ETIMEDOUT;
  670. if (ret > 0) {
  671. *val = st->conversion_value;
  672. if (chan->scan_type.sign == 's')
  673. *val = sign_extend32(*val, 11);
  674. ret = IIO_VAL_INT;
  675. st->conversion_done = false;
  676. }
  677. at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
  678. at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
  679. /* Needed to ACK the DRDY interruption */
  680. at91_adc_readl(st, AT91_SAMA5D2_LCDR);
  681. mutex_unlock(&st->lock);
  682. iio_device_release_direct_mode(indio_dev);
  683. return ret;
  684. case IIO_CHAN_INFO_SCALE:
  685. *val = st->vref_uv / 1000;
  686. if (chan->differential)
  687. *val *= 2;
  688. *val2 = chan->scan_type.realbits;
  689. return IIO_VAL_FRACTIONAL_LOG2;
  690. case IIO_CHAN_INFO_SAMP_FREQ:
  691. *val = at91_adc_get_sample_freq(st);
  692. return IIO_VAL_INT;
  693. default:
  694. return -EINVAL;
  695. }
  696. }
  697. static int at91_adc_write_raw(struct iio_dev *indio_dev,
  698. struct iio_chan_spec const *chan,
  699. int val, int val2, long mask)
  700. {
  701. struct at91_adc_state *st = iio_priv(indio_dev);
  702. if (mask != IIO_CHAN_INFO_SAMP_FREQ)
  703. return -EINVAL;
  704. if (val < st->soc_info.min_sample_rate ||
  705. val > st->soc_info.max_sample_rate)
  706. return -EINVAL;
  707. at91_adc_setup_samp_freq(st, val);
  708. return 0;
  709. }
  710. static void at91_adc_dma_init(struct platform_device *pdev)
  711. {
  712. struct iio_dev *indio_dev = platform_get_drvdata(pdev);
  713. struct at91_adc_state *st = iio_priv(indio_dev);
  714. struct dma_slave_config config = {0};
  715. /*
  716. * We make the buffer double the size of the fifo,
  717. * such that DMA uses one half of the buffer (full fifo size)
  718. * and the software uses the other half to read/write.
  719. */
  720. unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
  721. AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
  722. PAGE_SIZE);
  723. if (st->dma_st.dma_chan)
  724. return;
  725. st->dma_st.dma_chan = dma_request_slave_channel(&pdev->dev, "rx");
  726. if (!st->dma_st.dma_chan) {
  727. dev_info(&pdev->dev, "can't get DMA channel\n");
  728. goto dma_exit;
  729. }
  730. st->dma_st.rx_buf = dma_alloc_coherent(st->dma_st.dma_chan->device->dev,
  731. pages * PAGE_SIZE,
  732. &st->dma_st.rx_dma_buf,
  733. GFP_KERNEL);
  734. if (!st->dma_st.rx_buf) {
  735. dev_info(&pdev->dev, "can't allocate coherent DMA area\n");
  736. goto dma_chan_disable;
  737. }
  738. /* Configure DMA channel to read data register */
  739. config.direction = DMA_DEV_TO_MEM;
  740. config.src_addr = (phys_addr_t)(st->dma_st.phys_addr
  741. + AT91_SAMA5D2_LCDR);
  742. config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  743. config.src_maxburst = 1;
  744. config.dst_maxburst = 1;
  745. if (dmaengine_slave_config(st->dma_st.dma_chan, &config)) {
  746. dev_info(&pdev->dev, "can't configure DMA slave\n");
  747. goto dma_free_area;
  748. }
  749. dev_info(&pdev->dev, "using %s for rx DMA transfers\n",
  750. dma_chan_name(st->dma_st.dma_chan));
  751. return;
  752. dma_free_area:
  753. dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
  754. st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
  755. dma_chan_disable:
  756. dma_release_channel(st->dma_st.dma_chan);
  757. st->dma_st.dma_chan = 0;
  758. dma_exit:
  759. dev_info(&pdev->dev, "continuing without DMA support\n");
  760. }
  761. static void at91_adc_dma_disable(struct platform_device *pdev)
  762. {
  763. struct iio_dev *indio_dev = platform_get_drvdata(pdev);
  764. struct at91_adc_state *st = iio_priv(indio_dev);
  765. unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
  766. AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
  767. PAGE_SIZE);
  768. /* if we are not using DMA, just return */
  769. if (!st->dma_st.dma_chan)
  770. return;
  771. /* wait for all transactions to be terminated first*/
  772. dmaengine_terminate_sync(st->dma_st.dma_chan);
  773. dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
  774. st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
  775. dma_release_channel(st->dma_st.dma_chan);
  776. st->dma_st.dma_chan = 0;
  777. dev_info(&pdev->dev, "continuing without DMA support\n");
  778. }
  779. static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
  780. {
  781. struct at91_adc_state *st = iio_priv(indio_dev);
  782. if (val > AT91_HWFIFO_MAX_SIZE)
  783. return -EINVAL;
  784. if (!st->selected_trig->hw_trig) {
  785. dev_dbg(&indio_dev->dev, "we need hw trigger for DMA\n");
  786. return 0;
  787. }
  788. dev_dbg(&indio_dev->dev, "new watermark is %u\n", val);
  789. st->dma_st.watermark = val;
  790. /*
  791. * The logic here is: if we have watermark 1, it means we do
  792. * each conversion with it's own IRQ, thus we don't need DMA.
  793. * If the watermark is higher, we do DMA to do all the transfers in bulk
  794. */
  795. if (val == 1)
  796. at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
  797. else if (val > 1)
  798. at91_adc_dma_init(to_platform_device(&indio_dev->dev));
  799. return 0;
  800. }
  801. static const struct iio_info at91_adc_info = {
  802. .read_raw = &at91_adc_read_raw,
  803. .write_raw = &at91_adc_write_raw,
  804. .hwfifo_set_watermark = &at91_adc_set_watermark,
  805. };
  806. static void at91_adc_hw_init(struct at91_adc_state *st)
  807. {
  808. at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
  809. at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
  810. /*
  811. * Transfer field must be set to 2 according to the datasheet and
  812. * allows different analog settings for each channel.
  813. */
  814. at91_adc_writel(st, AT91_SAMA5D2_MR,
  815. AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
  816. at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
  817. }
  818. static ssize_t at91_adc_get_fifo_state(struct device *dev,
  819. struct device_attribute *attr, char *buf)
  820. {
  821. struct iio_dev *indio_dev =
  822. platform_get_drvdata(to_platform_device(dev));
  823. struct at91_adc_state *st = iio_priv(indio_dev);
  824. return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan);
  825. }
  826. static ssize_t at91_adc_get_watermark(struct device *dev,
  827. struct device_attribute *attr, char *buf)
  828. {
  829. struct iio_dev *indio_dev =
  830. platform_get_drvdata(to_platform_device(dev));
  831. struct at91_adc_state *st = iio_priv(indio_dev);
  832. return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
  833. }
  834. static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
  835. at91_adc_get_fifo_state, NULL, 0);
  836. static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
  837. at91_adc_get_watermark, NULL, 0);
  838. static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
  839. static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
  840. static const struct attribute *at91_adc_fifo_attributes[] = {
  841. &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
  842. &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
  843. &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
  844. &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
  845. NULL,
  846. };
  847. static int at91_adc_probe(struct platform_device *pdev)
  848. {
  849. struct iio_dev *indio_dev;
  850. struct at91_adc_state *st;
  851. struct resource *res;
  852. int ret, i;
  853. u32 edge_type = IRQ_TYPE_NONE;
  854. indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
  855. if (!indio_dev)
  856. return -ENOMEM;
  857. indio_dev->dev.parent = &pdev->dev;
  858. indio_dev->name = dev_name(&pdev->dev);
  859. indio_dev->modes = INDIO_DIRECT_MODE;
  860. indio_dev->info = &at91_adc_info;
  861. indio_dev->channels = at91_adc_channels;
  862. indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
  863. st = iio_priv(indio_dev);
  864. ret = of_property_read_u32(pdev->dev.of_node,
  865. "atmel,min-sample-rate-hz",
  866. &st->soc_info.min_sample_rate);
  867. if (ret) {
  868. dev_err(&pdev->dev,
  869. "invalid or missing value for atmel,min-sample-rate-hz\n");
  870. return ret;
  871. }
  872. ret = of_property_read_u32(pdev->dev.of_node,
  873. "atmel,max-sample-rate-hz",
  874. &st->soc_info.max_sample_rate);
  875. if (ret) {
  876. dev_err(&pdev->dev,
  877. "invalid or missing value for atmel,max-sample-rate-hz\n");
  878. return ret;
  879. }
  880. ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms",
  881. &st->soc_info.startup_time);
  882. if (ret) {
  883. dev_err(&pdev->dev,
  884. "invalid or missing value for atmel,startup-time-ms\n");
  885. return ret;
  886. }
  887. ret = of_property_read_u32(pdev->dev.of_node,
  888. "atmel,trigger-edge-type", &edge_type);
  889. if (ret) {
  890. dev_dbg(&pdev->dev,
  891. "atmel,trigger-edge-type not specified, only software trigger available\n");
  892. }
  893. st->selected_trig = NULL;
  894. /* find the right trigger, or no trigger at all */
  895. for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
  896. if (at91_adc_trigger_list[i].edge_type == edge_type) {
  897. st->selected_trig = &at91_adc_trigger_list[i];
  898. break;
  899. }
  900. if (!st->selected_trig) {
  901. dev_err(&pdev->dev, "invalid external trigger edge value\n");
  902. return -EINVAL;
  903. }
  904. init_waitqueue_head(&st->wq_data_available);
  905. mutex_init(&st->lock);
  906. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  907. if (!res)
  908. return -EINVAL;
  909. /* if we plan to use DMA, we need the physical address of the regs */
  910. st->dma_st.phys_addr = res->start;
  911. st->base = devm_ioremap_resource(&pdev->dev, res);
  912. if (IS_ERR(st->base))
  913. return PTR_ERR(st->base);
  914. st->irq = platform_get_irq(pdev, 0);
  915. if (st->irq <= 0) {
  916. if (!st->irq)
  917. st->irq = -ENXIO;
  918. return st->irq;
  919. }
  920. st->per_clk = devm_clk_get(&pdev->dev, "adc_clk");
  921. if (IS_ERR(st->per_clk))
  922. return PTR_ERR(st->per_clk);
  923. st->reg = devm_regulator_get(&pdev->dev, "vddana");
  924. if (IS_ERR(st->reg))
  925. return PTR_ERR(st->reg);
  926. st->vref = devm_regulator_get(&pdev->dev, "vref");
  927. if (IS_ERR(st->vref))
  928. return PTR_ERR(st->vref);
  929. ret = devm_request_irq(&pdev->dev, st->irq, at91_adc_interrupt, 0,
  930. pdev->dev.driver->name, indio_dev);
  931. if (ret)
  932. return ret;
  933. ret = regulator_enable(st->reg);
  934. if (ret)
  935. return ret;
  936. ret = regulator_enable(st->vref);
  937. if (ret)
  938. goto reg_disable;
  939. st->vref_uv = regulator_get_voltage(st->vref);
  940. if (st->vref_uv <= 0) {
  941. ret = -EINVAL;
  942. goto vref_disable;
  943. }
  944. at91_adc_hw_init(st);
  945. ret = clk_prepare_enable(st->per_clk);
  946. if (ret)
  947. goto vref_disable;
  948. platform_set_drvdata(pdev, indio_dev);
  949. if (st->selected_trig->hw_trig) {
  950. ret = at91_adc_buffer_init(indio_dev);
  951. if (ret < 0) {
  952. dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
  953. goto per_clk_disable_unprepare;
  954. }
  955. ret = at91_adc_trigger_init(indio_dev);
  956. if (ret < 0) {
  957. dev_err(&pdev->dev, "couldn't setup the triggers.\n");
  958. goto per_clk_disable_unprepare;
  959. }
  960. /*
  961. * Initially the iio buffer has a length of 2 and
  962. * a watermark of 1
  963. */
  964. st->dma_st.watermark = 1;
  965. iio_buffer_set_attrs(indio_dev->buffer,
  966. at91_adc_fifo_attributes);
  967. }
  968. if (dma_coerce_mask_and_coherent(&indio_dev->dev, DMA_BIT_MASK(32)))
  969. dev_info(&pdev->dev, "cannot set DMA mask to 32-bit\n");
  970. ret = iio_device_register(indio_dev);
  971. if (ret < 0)
  972. goto dma_disable;
  973. if (st->selected_trig->hw_trig)
  974. dev_info(&pdev->dev, "setting up trigger as %s\n",
  975. st->selected_trig->name);
  976. dev_info(&pdev->dev, "version: %x\n",
  977. readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
  978. return 0;
  979. dma_disable:
  980. at91_adc_dma_disable(pdev);
  981. per_clk_disable_unprepare:
  982. clk_disable_unprepare(st->per_clk);
  983. vref_disable:
  984. regulator_disable(st->vref);
  985. reg_disable:
  986. regulator_disable(st->reg);
  987. return ret;
  988. }
  989. static int at91_adc_remove(struct platform_device *pdev)
  990. {
  991. struct iio_dev *indio_dev = platform_get_drvdata(pdev);
  992. struct at91_adc_state *st = iio_priv(indio_dev);
  993. iio_device_unregister(indio_dev);
  994. at91_adc_dma_disable(pdev);
  995. clk_disable_unprepare(st->per_clk);
  996. regulator_disable(st->vref);
  997. regulator_disable(st->reg);
  998. return 0;
  999. }
  1000. static __maybe_unused int at91_adc_suspend(struct device *dev)
  1001. {
  1002. struct iio_dev *indio_dev =
  1003. platform_get_drvdata(to_platform_device(dev));
  1004. struct at91_adc_state *st = iio_priv(indio_dev);
  1005. /*
  1006. * Do a sofware reset of the ADC before we go to suspend.
  1007. * this will ensure that all pins are free from being muxed by the ADC
  1008. * and can be used by for other devices.
  1009. * Otherwise, ADC will hog them and we can't go to suspend mode.
  1010. */
  1011. at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
  1012. clk_disable_unprepare(st->per_clk);
  1013. regulator_disable(st->vref);
  1014. regulator_disable(st->reg);
  1015. return pinctrl_pm_select_sleep_state(dev);
  1016. }
  1017. static __maybe_unused int at91_adc_resume(struct device *dev)
  1018. {
  1019. struct iio_dev *indio_dev =
  1020. platform_get_drvdata(to_platform_device(dev));
  1021. struct at91_adc_state *st = iio_priv(indio_dev);
  1022. int ret;
  1023. ret = pinctrl_pm_select_default_state(dev);
  1024. if (ret)
  1025. goto resume_failed;
  1026. ret = regulator_enable(st->reg);
  1027. if (ret)
  1028. goto resume_failed;
  1029. ret = regulator_enable(st->vref);
  1030. if (ret)
  1031. goto reg_disable_resume;
  1032. ret = clk_prepare_enable(st->per_clk);
  1033. if (ret)
  1034. goto vref_disable_resume;
  1035. at91_adc_hw_init(st);
  1036. /* reconfiguring trigger hardware state */
  1037. if (iio_buffer_enabled(indio_dev))
  1038. at91_adc_configure_trigger(st->trig, true);
  1039. return 0;
  1040. vref_disable_resume:
  1041. regulator_disable(st->vref);
  1042. reg_disable_resume:
  1043. regulator_disable(st->reg);
  1044. resume_failed:
  1045. dev_err(&indio_dev->dev, "failed to resume\n");
  1046. return ret;
  1047. }
  1048. static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
  1049. static const struct of_device_id at91_adc_dt_match[] = {
  1050. {
  1051. .compatible = "atmel,sama5d2-adc",
  1052. }, {
  1053. /* sentinel */
  1054. }
  1055. };
  1056. MODULE_DEVICE_TABLE(of, at91_adc_dt_match);
  1057. static struct platform_driver at91_adc_driver = {
  1058. .probe = at91_adc_probe,
  1059. .remove = at91_adc_remove,
  1060. .driver = {
  1061. .name = "at91-sama5d2_adc",
  1062. .of_match_table = at91_adc_dt_match,
  1063. .pm = &at91_adc_pm_ops,
  1064. },
  1065. };
  1066. module_platform_driver(at91_adc_driver)
  1067. MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
  1068. MODULE_DESCRIPTION("Atmel AT91 SAMA5D2 ADC");
  1069. MODULE_LICENSE("GPL v2");