amba-pl011.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657
  1. /*
  2. * Driver for AMBA serial ports
  3. *
  4. * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
  5. *
  6. * Copyright 1999 ARM Limited
  7. * Copyright (C) 2000 Deep Blue Solutions Ltd.
  8. * Copyright (C) 2010 ST-Ericsson SA
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. *
  24. * This is a generic driver for ARM AMBA-type serial ports. They
  25. * have a lot of 16550-like features, but are not register compatible.
  26. * Note that although they do have CTS, DCD and DSR inputs, they do
  27. * not have an RI input, nor do they have DTR or RTS outputs. If
  28. * required, these have to be supplied via some other means (eg, GPIO)
  29. * and hooked into this driver.
  30. */
  31. #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
  32. #define SUPPORT_SYSRQ
  33. #endif
  34. #include <linux/module.h>
  35. #include <linux/ioport.h>
  36. #include <linux/init.h>
  37. #include <linux/console.h>
  38. #include <linux/sysrq.h>
  39. #include <linux/device.h>
  40. #include <linux/tty.h>
  41. #include <linux/tty_flip.h>
  42. #include <linux/serial_core.h>
  43. #include <linux/serial.h>
  44. #include <linux/amba/bus.h>
  45. #include <linux/amba/serial.h>
  46. #include <linux/clk.h>
  47. #include <linux/slab.h>
  48. #include <linux/dmaengine.h>
  49. #include <linux/dma-mapping.h>
  50. #include <linux/scatterlist.h>
  51. #include <linux/delay.h>
  52. #include <linux/types.h>
  53. #include <linux/of.h>
  54. #include <linux/of_device.h>
  55. #include <linux/pinctrl/consumer.h>
  56. #include <linux/sizes.h>
  57. #include <linux/io.h>
  58. #include <linux/acpi.h>
  59. #include "amba-pl011.h"
  60. #define UART_NR 14
  61. #define SERIAL_AMBA_MAJOR 204
  62. #define SERIAL_AMBA_MINOR 64
  63. #define SERIAL_AMBA_NR UART_NR
  64. #define AMBA_ISR_PASS_LIMIT 256
  65. #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
  66. #define UART_DUMMY_DR_RX (1 << 16)
  67. static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
  68. [REG_DR] = UART01x_DR,
  69. [REG_FR] = UART01x_FR,
  70. [REG_LCRH_RX] = UART011_LCRH,
  71. [REG_LCRH_TX] = UART011_LCRH,
  72. [REG_IBRD] = UART011_IBRD,
  73. [REG_FBRD] = UART011_FBRD,
  74. [REG_CR] = UART011_CR,
  75. [REG_IFLS] = UART011_IFLS,
  76. [REG_IMSC] = UART011_IMSC,
  77. [REG_RIS] = UART011_RIS,
  78. [REG_MIS] = UART011_MIS,
  79. [REG_ICR] = UART011_ICR,
  80. [REG_DMACR] = UART011_DMACR,
  81. };
  82. /* There is by now at least one vendor with differing details, so handle it */
  83. struct vendor_data {
  84. const u16 *reg_offset;
  85. unsigned int ifls;
  86. bool access_32b;
  87. bool oversampling;
  88. bool dma_threshold;
  89. bool cts_event_workaround;
  90. bool always_enabled;
  91. bool fixed_options;
  92. unsigned int (*get_fifosize)(struct amba_device *dev);
  93. };
  94. static unsigned int get_fifosize_arm(struct amba_device *dev)
  95. {
  96. return amba_rev(dev) < 3 ? 16 : 32;
  97. }
  98. static struct vendor_data vendor_arm = {
  99. .reg_offset = pl011_std_offsets,
  100. .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
  101. .oversampling = false,
  102. .dma_threshold = false,
  103. .cts_event_workaround = false,
  104. .always_enabled = false,
  105. .fixed_options = false,
  106. .get_fifosize = get_fifosize_arm,
  107. };
  108. static struct vendor_data vendor_sbsa = {
  109. .reg_offset = pl011_std_offsets,
  110. .oversampling = false,
  111. .dma_threshold = false,
  112. .cts_event_workaround = false,
  113. .always_enabled = true,
  114. .fixed_options = true,
  115. };
  116. static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
  117. [REG_DR] = UART01x_DR,
  118. [REG_ST_DMAWM] = ST_UART011_DMAWM,
  119. [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
  120. [REG_FR] = UART01x_FR,
  121. [REG_LCRH_RX] = ST_UART011_LCRH_RX,
  122. [REG_LCRH_TX] = ST_UART011_LCRH_TX,
  123. [REG_IBRD] = UART011_IBRD,
  124. [REG_FBRD] = UART011_FBRD,
  125. [REG_CR] = UART011_CR,
  126. [REG_IFLS] = UART011_IFLS,
  127. [REG_IMSC] = UART011_IMSC,
  128. [REG_RIS] = UART011_RIS,
  129. [REG_MIS] = UART011_MIS,
  130. [REG_ICR] = UART011_ICR,
  131. [REG_DMACR] = UART011_DMACR,
  132. [REG_ST_XFCR] = ST_UART011_XFCR,
  133. [REG_ST_XON1] = ST_UART011_XON1,
  134. [REG_ST_XON2] = ST_UART011_XON2,
  135. [REG_ST_XOFF1] = ST_UART011_XOFF1,
  136. [REG_ST_XOFF2] = ST_UART011_XOFF2,
  137. [REG_ST_ITCR] = ST_UART011_ITCR,
  138. [REG_ST_ITIP] = ST_UART011_ITIP,
  139. [REG_ST_ABCR] = ST_UART011_ABCR,
  140. [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
  141. };
  142. static unsigned int get_fifosize_st(struct amba_device *dev)
  143. {
  144. return 64;
  145. }
  146. static struct vendor_data vendor_st = {
  147. .reg_offset = pl011_st_offsets,
  148. .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
  149. .oversampling = true,
  150. .dma_threshold = true,
  151. .cts_event_workaround = true,
  152. .always_enabled = false,
  153. .fixed_options = false,
  154. .get_fifosize = get_fifosize_st,
  155. };
  156. static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
  157. [REG_DR] = ZX_UART011_DR,
  158. [REG_FR] = ZX_UART011_FR,
  159. [REG_LCRH_RX] = ZX_UART011_LCRH,
  160. [REG_LCRH_TX] = ZX_UART011_LCRH,
  161. [REG_IBRD] = ZX_UART011_IBRD,
  162. [REG_FBRD] = ZX_UART011_FBRD,
  163. [REG_CR] = ZX_UART011_CR,
  164. [REG_IFLS] = ZX_UART011_IFLS,
  165. [REG_IMSC] = ZX_UART011_IMSC,
  166. [REG_RIS] = ZX_UART011_RIS,
  167. [REG_MIS] = ZX_UART011_MIS,
  168. [REG_ICR] = ZX_UART011_ICR,
  169. [REG_DMACR] = ZX_UART011_DMACR,
  170. };
  171. static struct vendor_data vendor_zte __maybe_unused = {
  172. .reg_offset = pl011_zte_offsets,
  173. .access_32b = true,
  174. .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
  175. .get_fifosize = get_fifosize_arm,
  176. };
  177. /* Deals with DMA transactions */
  178. struct pl011_sgbuf {
  179. struct scatterlist sg;
  180. char *buf;
  181. };
  182. struct pl011_dmarx_data {
  183. struct dma_chan *chan;
  184. struct completion complete;
  185. bool use_buf_b;
  186. struct pl011_sgbuf sgbuf_a;
  187. struct pl011_sgbuf sgbuf_b;
  188. dma_cookie_t cookie;
  189. bool running;
  190. struct timer_list timer;
  191. unsigned int last_residue;
  192. unsigned long last_jiffies;
  193. bool auto_poll_rate;
  194. unsigned int poll_rate;
  195. unsigned int poll_timeout;
  196. };
  197. struct pl011_dmatx_data {
  198. struct dma_chan *chan;
  199. struct scatterlist sg;
  200. char *buf;
  201. bool queued;
  202. };
  203. /*
  204. * We wrap our port structure around the generic uart_port.
  205. */
  206. struct uart_amba_port {
  207. struct uart_port port;
  208. const u16 *reg_offset;
  209. struct clk *clk;
  210. const struct vendor_data *vendor;
  211. unsigned int dmacr; /* dma control reg */
  212. unsigned int im; /* interrupt mask */
  213. unsigned int old_status;
  214. unsigned int fifosize; /* vendor-specific */
  215. unsigned int old_cr; /* state during shutdown */
  216. bool autorts;
  217. unsigned int fixed_baud; /* vendor-set fixed baud rate */
  218. char type[12];
  219. #ifdef CONFIG_DMA_ENGINE
  220. /* DMA stuff */
  221. bool using_tx_dma;
  222. bool using_rx_dma;
  223. struct pl011_dmarx_data dmarx;
  224. struct pl011_dmatx_data dmatx;
  225. bool dma_probed;
  226. #endif
  227. };
  228. static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
  229. unsigned int reg)
  230. {
  231. return uap->reg_offset[reg];
  232. }
  233. static unsigned int pl011_read(const struct uart_amba_port *uap,
  234. unsigned int reg)
  235. {
  236. void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
  237. return (uap->port.iotype == UPIO_MEM32) ?
  238. readl_relaxed(addr) : readw_relaxed(addr);
  239. }
  240. static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
  241. unsigned int reg)
  242. {
  243. void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
  244. if (uap->port.iotype == UPIO_MEM32)
  245. writel_relaxed(val, addr);
  246. else
  247. writew_relaxed(val, addr);
  248. }
  249. /*
  250. * Reads up to 256 characters from the FIFO or until it's empty and
  251. * inserts them into the TTY layer. Returns the number of characters
  252. * read from the FIFO.
  253. */
  254. static int pl011_fifo_to_tty(struct uart_amba_port *uap)
  255. {
  256. u16 status;
  257. unsigned int ch, flag, max_count = 256;
  258. int fifotaken = 0;
  259. while (max_count--) {
  260. status = pl011_read(uap, REG_FR);
  261. if (status & UART01x_FR_RXFE)
  262. break;
  263. /* Take chars from the FIFO and update status */
  264. ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
  265. flag = TTY_NORMAL;
  266. uap->port.icount.rx++;
  267. fifotaken++;
  268. if (unlikely(ch & UART_DR_ERROR)) {
  269. if (ch & UART011_DR_BE) {
  270. ch &= ~(UART011_DR_FE | UART011_DR_PE);
  271. uap->port.icount.brk++;
  272. if (uart_handle_break(&uap->port))
  273. continue;
  274. } else if (ch & UART011_DR_PE)
  275. uap->port.icount.parity++;
  276. else if (ch & UART011_DR_FE)
  277. uap->port.icount.frame++;
  278. if (ch & UART011_DR_OE)
  279. uap->port.icount.overrun++;
  280. ch &= uap->port.read_status_mask;
  281. if (ch & UART011_DR_BE)
  282. flag = TTY_BREAK;
  283. else if (ch & UART011_DR_PE)
  284. flag = TTY_PARITY;
  285. else if (ch & UART011_DR_FE)
  286. flag = TTY_FRAME;
  287. }
  288. if (uart_handle_sysrq_char(&uap->port, ch & 255))
  289. continue;
  290. uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
  291. }
  292. return fifotaken;
  293. }
  294. /*
  295. * All the DMA operation mode stuff goes inside this ifdef.
  296. * This assumes that you have a generic DMA device interface,
  297. * no custom DMA interfaces are supported.
  298. */
  299. #ifdef CONFIG_DMA_ENGINE
  300. #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
  301. static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
  302. enum dma_data_direction dir)
  303. {
  304. dma_addr_t dma_addr;
  305. sg->buf = dma_alloc_coherent(chan->device->dev,
  306. PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
  307. if (!sg->buf)
  308. return -ENOMEM;
  309. sg_init_table(&sg->sg, 1);
  310. sg_set_page(&sg->sg, phys_to_page(dma_addr),
  311. PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
  312. sg_dma_address(&sg->sg) = dma_addr;
  313. sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
  314. return 0;
  315. }
  316. static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
  317. enum dma_data_direction dir)
  318. {
  319. if (sg->buf) {
  320. dma_free_coherent(chan->device->dev,
  321. PL011_DMA_BUFFER_SIZE, sg->buf,
  322. sg_dma_address(&sg->sg));
  323. }
  324. }
  325. static void pl011_dma_probe(struct uart_amba_port *uap)
  326. {
  327. /* DMA is the sole user of the platform data right now */
  328. struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
  329. struct device *dev = uap->port.dev;
  330. struct dma_slave_config tx_conf = {
  331. .dst_addr = uap->port.mapbase +
  332. pl011_reg_to_offset(uap, REG_DR),
  333. .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  334. .direction = DMA_MEM_TO_DEV,
  335. .dst_maxburst = uap->fifosize >> 1,
  336. .device_fc = false,
  337. };
  338. struct dma_chan *chan;
  339. dma_cap_mask_t mask;
  340. uap->dma_probed = true;
  341. chan = dma_request_slave_channel_reason(dev, "tx");
  342. if (IS_ERR(chan)) {
  343. if (PTR_ERR(chan) == -EPROBE_DEFER) {
  344. uap->dma_probed = false;
  345. return;
  346. }
  347. /* We need platform data */
  348. if (!plat || !plat->dma_filter) {
  349. dev_info(uap->port.dev, "no DMA platform data\n");
  350. return;
  351. }
  352. /* Try to acquire a generic DMA engine slave TX channel */
  353. dma_cap_zero(mask);
  354. dma_cap_set(DMA_SLAVE, mask);
  355. chan = dma_request_channel(mask, plat->dma_filter,
  356. plat->dma_tx_param);
  357. if (!chan) {
  358. dev_err(uap->port.dev, "no TX DMA channel!\n");
  359. return;
  360. }
  361. }
  362. dmaengine_slave_config(chan, &tx_conf);
  363. uap->dmatx.chan = chan;
  364. dev_info(uap->port.dev, "DMA channel TX %s\n",
  365. dma_chan_name(uap->dmatx.chan));
  366. /* Optionally make use of an RX channel as well */
  367. chan = dma_request_slave_channel(dev, "rx");
  368. if (!chan && plat && plat->dma_rx_param) {
  369. chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
  370. if (!chan) {
  371. dev_err(uap->port.dev, "no RX DMA channel!\n");
  372. return;
  373. }
  374. }
  375. if (chan) {
  376. struct dma_slave_config rx_conf = {
  377. .src_addr = uap->port.mapbase +
  378. pl011_reg_to_offset(uap, REG_DR),
  379. .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  380. .direction = DMA_DEV_TO_MEM,
  381. .src_maxburst = uap->fifosize >> 2,
  382. .device_fc = false,
  383. };
  384. struct dma_slave_caps caps;
  385. /*
  386. * Some DMA controllers provide information on their capabilities.
  387. * If the controller does, check for suitable residue processing
  388. * otherwise assime all is well.
  389. */
  390. if (0 == dma_get_slave_caps(chan, &caps)) {
  391. if (caps.residue_granularity ==
  392. DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
  393. dma_release_channel(chan);
  394. dev_info(uap->port.dev,
  395. "RX DMA disabled - no residue processing\n");
  396. return;
  397. }
  398. }
  399. dmaengine_slave_config(chan, &rx_conf);
  400. uap->dmarx.chan = chan;
  401. uap->dmarx.auto_poll_rate = false;
  402. if (plat && plat->dma_rx_poll_enable) {
  403. /* Set poll rate if specified. */
  404. if (plat->dma_rx_poll_rate) {
  405. uap->dmarx.auto_poll_rate = false;
  406. uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
  407. } else {
  408. /*
  409. * 100 ms defaults to poll rate if not
  410. * specified. This will be adjusted with
  411. * the baud rate at set_termios.
  412. */
  413. uap->dmarx.auto_poll_rate = true;
  414. uap->dmarx.poll_rate = 100;
  415. }
  416. /* 3 secs defaults poll_timeout if not specified. */
  417. if (plat->dma_rx_poll_timeout)
  418. uap->dmarx.poll_timeout =
  419. plat->dma_rx_poll_timeout;
  420. else
  421. uap->dmarx.poll_timeout = 3000;
  422. } else if (!plat && dev->of_node) {
  423. uap->dmarx.auto_poll_rate = of_property_read_bool(
  424. dev->of_node, "auto-poll");
  425. if (uap->dmarx.auto_poll_rate) {
  426. u32 x;
  427. if (0 == of_property_read_u32(dev->of_node,
  428. "poll-rate-ms", &x))
  429. uap->dmarx.poll_rate = x;
  430. else
  431. uap->dmarx.poll_rate = 100;
  432. if (0 == of_property_read_u32(dev->of_node,
  433. "poll-timeout-ms", &x))
  434. uap->dmarx.poll_timeout = x;
  435. else
  436. uap->dmarx.poll_timeout = 3000;
  437. }
  438. }
  439. dev_info(uap->port.dev, "DMA channel RX %s\n",
  440. dma_chan_name(uap->dmarx.chan));
  441. }
  442. }
  443. static void pl011_dma_remove(struct uart_amba_port *uap)
  444. {
  445. if (uap->dmatx.chan)
  446. dma_release_channel(uap->dmatx.chan);
  447. if (uap->dmarx.chan)
  448. dma_release_channel(uap->dmarx.chan);
  449. }
  450. /* Forward declare these for the refill routine */
  451. static int pl011_dma_tx_refill(struct uart_amba_port *uap);
  452. static void pl011_start_tx_pio(struct uart_amba_port *uap);
  453. /*
  454. * The current DMA TX buffer has been sent.
  455. * Try to queue up another DMA buffer.
  456. */
  457. static void pl011_dma_tx_callback(void *data)
  458. {
  459. struct uart_amba_port *uap = data;
  460. struct pl011_dmatx_data *dmatx = &uap->dmatx;
  461. unsigned long flags;
  462. u16 dmacr;
  463. spin_lock_irqsave(&uap->port.lock, flags);
  464. if (uap->dmatx.queued)
  465. dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
  466. DMA_TO_DEVICE);
  467. dmacr = uap->dmacr;
  468. uap->dmacr = dmacr & ~UART011_TXDMAE;
  469. pl011_write(uap->dmacr, uap, REG_DMACR);
  470. /*
  471. * If TX DMA was disabled, it means that we've stopped the DMA for
  472. * some reason (eg, XOFF received, or we want to send an X-char.)
  473. *
  474. * Note: we need to be careful here of a potential race between DMA
  475. * and the rest of the driver - if the driver disables TX DMA while
  476. * a TX buffer completing, we must update the tx queued status to
  477. * get further refills (hence we check dmacr).
  478. */
  479. if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
  480. uart_circ_empty(&uap->port.state->xmit)) {
  481. uap->dmatx.queued = false;
  482. spin_unlock_irqrestore(&uap->port.lock, flags);
  483. return;
  484. }
  485. if (pl011_dma_tx_refill(uap) <= 0)
  486. /*
  487. * We didn't queue a DMA buffer for some reason, but we
  488. * have data pending to be sent. Re-enable the TX IRQ.
  489. */
  490. pl011_start_tx_pio(uap);
  491. spin_unlock_irqrestore(&uap->port.lock, flags);
  492. }
  493. /*
  494. * Try to refill the TX DMA buffer.
  495. * Locking: called with port lock held and IRQs disabled.
  496. * Returns:
  497. * 1 if we queued up a TX DMA buffer.
  498. * 0 if we didn't want to handle this by DMA
  499. * <0 on error
  500. */
  501. static int pl011_dma_tx_refill(struct uart_amba_port *uap)
  502. {
  503. struct pl011_dmatx_data *dmatx = &uap->dmatx;
  504. struct dma_chan *chan = dmatx->chan;
  505. struct dma_device *dma_dev = chan->device;
  506. struct dma_async_tx_descriptor *desc;
  507. struct circ_buf *xmit = &uap->port.state->xmit;
  508. unsigned int count;
  509. /*
  510. * Try to avoid the overhead involved in using DMA if the
  511. * transaction fits in the first half of the FIFO, by using
  512. * the standard interrupt handling. This ensures that we
  513. * issue a uart_write_wakeup() at the appropriate time.
  514. */
  515. count = uart_circ_chars_pending(xmit);
  516. if (count < (uap->fifosize >> 1)) {
  517. uap->dmatx.queued = false;
  518. return 0;
  519. }
  520. /*
  521. * Bodge: don't send the last character by DMA, as this
  522. * will prevent XON from notifying us to restart DMA.
  523. */
  524. count -= 1;
  525. /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
  526. if (count > PL011_DMA_BUFFER_SIZE)
  527. count = PL011_DMA_BUFFER_SIZE;
  528. if (xmit->tail < xmit->head)
  529. memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
  530. else {
  531. size_t first = UART_XMIT_SIZE - xmit->tail;
  532. size_t second;
  533. if (first > count)
  534. first = count;
  535. second = count - first;
  536. memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
  537. if (second)
  538. memcpy(&dmatx->buf[first], &xmit->buf[0], second);
  539. }
  540. dmatx->sg.length = count;
  541. if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
  542. uap->dmatx.queued = false;
  543. dev_dbg(uap->port.dev, "unable to map TX DMA\n");
  544. return -EBUSY;
  545. }
  546. desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
  547. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  548. if (!desc) {
  549. dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
  550. uap->dmatx.queued = false;
  551. /*
  552. * If DMA cannot be used right now, we complete this
  553. * transaction via IRQ and let the TTY layer retry.
  554. */
  555. dev_dbg(uap->port.dev, "TX DMA busy\n");
  556. return -EBUSY;
  557. }
  558. /* Some data to go along to the callback */
  559. desc->callback = pl011_dma_tx_callback;
  560. desc->callback_param = uap;
  561. /* All errors should happen at prepare time */
  562. dmaengine_submit(desc);
  563. /* Fire the DMA transaction */
  564. dma_dev->device_issue_pending(chan);
  565. uap->dmacr |= UART011_TXDMAE;
  566. pl011_write(uap->dmacr, uap, REG_DMACR);
  567. uap->dmatx.queued = true;
  568. /*
  569. * Now we know that DMA will fire, so advance the ring buffer
  570. * with the stuff we just dispatched.
  571. */
  572. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  573. uap->port.icount.tx += count;
  574. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  575. uart_write_wakeup(&uap->port);
  576. return 1;
  577. }
  578. /*
  579. * We received a transmit interrupt without a pending X-char but with
  580. * pending characters.
  581. * Locking: called with port lock held and IRQs disabled.
  582. * Returns:
  583. * false if we want to use PIO to transmit
  584. * true if we queued a DMA buffer
  585. */
  586. static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
  587. {
  588. if (!uap->using_tx_dma)
  589. return false;
  590. /*
  591. * If we already have a TX buffer queued, but received a
  592. * TX interrupt, it will be because we've just sent an X-char.
  593. * Ensure the TX DMA is enabled and the TX IRQ is disabled.
  594. */
  595. if (uap->dmatx.queued) {
  596. uap->dmacr |= UART011_TXDMAE;
  597. pl011_write(uap->dmacr, uap, REG_DMACR);
  598. uap->im &= ~UART011_TXIM;
  599. pl011_write(uap->im, uap, REG_IMSC);
  600. return true;
  601. }
  602. /*
  603. * We don't have a TX buffer queued, so try to queue one.
  604. * If we successfully queued a buffer, mask the TX IRQ.
  605. */
  606. if (pl011_dma_tx_refill(uap) > 0) {
  607. uap->im &= ~UART011_TXIM;
  608. pl011_write(uap->im, uap, REG_IMSC);
  609. return true;
  610. }
  611. return false;
  612. }
  613. /*
  614. * Stop the DMA transmit (eg, due to received XOFF).
  615. * Locking: called with port lock held and IRQs disabled.
  616. */
  617. static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
  618. {
  619. if (uap->dmatx.queued) {
  620. uap->dmacr &= ~UART011_TXDMAE;
  621. pl011_write(uap->dmacr, uap, REG_DMACR);
  622. }
  623. }
  624. /*
  625. * Try to start a DMA transmit, or in the case of an XON/OFF
  626. * character queued for send, try to get that character out ASAP.
  627. * Locking: called with port lock held and IRQs disabled.
  628. * Returns:
  629. * false if we want the TX IRQ to be enabled
  630. * true if we have a buffer queued
  631. */
  632. static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
  633. {
  634. u16 dmacr;
  635. if (!uap->using_tx_dma)
  636. return false;
  637. if (!uap->port.x_char) {
  638. /* no X-char, try to push chars out in DMA mode */
  639. bool ret = true;
  640. if (!uap->dmatx.queued) {
  641. if (pl011_dma_tx_refill(uap) > 0) {
  642. uap->im &= ~UART011_TXIM;
  643. pl011_write(uap->im, uap, REG_IMSC);
  644. } else
  645. ret = false;
  646. } else if (!(uap->dmacr & UART011_TXDMAE)) {
  647. uap->dmacr |= UART011_TXDMAE;
  648. pl011_write(uap->dmacr, uap, REG_DMACR);
  649. }
  650. return ret;
  651. }
  652. /*
  653. * We have an X-char to send. Disable DMA to prevent it loading
  654. * the TX fifo, and then see if we can stuff it into the FIFO.
  655. */
  656. dmacr = uap->dmacr;
  657. uap->dmacr &= ~UART011_TXDMAE;
  658. pl011_write(uap->dmacr, uap, REG_DMACR);
  659. if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
  660. /*
  661. * No space in the FIFO, so enable the transmit interrupt
  662. * so we know when there is space. Note that once we've
  663. * loaded the character, we should just re-enable DMA.
  664. */
  665. return false;
  666. }
  667. pl011_write(uap->port.x_char, uap, REG_DR);
  668. uap->port.icount.tx++;
  669. uap->port.x_char = 0;
  670. /* Success - restore the DMA state */
  671. uap->dmacr = dmacr;
  672. pl011_write(dmacr, uap, REG_DMACR);
  673. return true;
  674. }
  675. /*
  676. * Flush the transmit buffer.
  677. * Locking: called with port lock held and IRQs disabled.
  678. */
  679. static void pl011_dma_flush_buffer(struct uart_port *port)
  680. __releases(&uap->port.lock)
  681. __acquires(&uap->port.lock)
  682. {
  683. struct uart_amba_port *uap =
  684. container_of(port, struct uart_amba_port, port);
  685. if (!uap->using_tx_dma)
  686. return;
  687. /* Avoid deadlock with the DMA engine callback */
  688. spin_unlock(&uap->port.lock);
  689. dmaengine_terminate_all(uap->dmatx.chan);
  690. spin_lock(&uap->port.lock);
  691. if (uap->dmatx.queued) {
  692. dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
  693. DMA_TO_DEVICE);
  694. uap->dmatx.queued = false;
  695. uap->dmacr &= ~UART011_TXDMAE;
  696. pl011_write(uap->dmacr, uap, REG_DMACR);
  697. }
  698. }
  699. static void pl011_dma_rx_callback(void *data);
  700. static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
  701. {
  702. struct dma_chan *rxchan = uap->dmarx.chan;
  703. struct pl011_dmarx_data *dmarx = &uap->dmarx;
  704. struct dma_async_tx_descriptor *desc;
  705. struct pl011_sgbuf *sgbuf;
  706. if (!rxchan)
  707. return -EIO;
  708. /* Start the RX DMA job */
  709. sgbuf = uap->dmarx.use_buf_b ?
  710. &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
  711. desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
  712. DMA_DEV_TO_MEM,
  713. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  714. /*
  715. * If the DMA engine is busy and cannot prepare a
  716. * channel, no big deal, the driver will fall back
  717. * to interrupt mode as a result of this error code.
  718. */
  719. if (!desc) {
  720. uap->dmarx.running = false;
  721. dmaengine_terminate_all(rxchan);
  722. return -EBUSY;
  723. }
  724. /* Some data to go along to the callback */
  725. desc->callback = pl011_dma_rx_callback;
  726. desc->callback_param = uap;
  727. dmarx->cookie = dmaengine_submit(desc);
  728. dma_async_issue_pending(rxchan);
  729. uap->dmacr |= UART011_RXDMAE;
  730. pl011_write(uap->dmacr, uap, REG_DMACR);
  731. uap->dmarx.running = true;
  732. uap->im &= ~UART011_RXIM;
  733. pl011_write(uap->im, uap, REG_IMSC);
  734. return 0;
  735. }
  736. /*
  737. * This is called when either the DMA job is complete, or
  738. * the FIFO timeout interrupt occurred. This must be called
  739. * with the port spinlock uap->port.lock held.
  740. */
  741. static void pl011_dma_rx_chars(struct uart_amba_port *uap,
  742. u32 pending, bool use_buf_b,
  743. bool readfifo)
  744. {
  745. struct tty_port *port = &uap->port.state->port;
  746. struct pl011_sgbuf *sgbuf = use_buf_b ?
  747. &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
  748. int dma_count = 0;
  749. u32 fifotaken = 0; /* only used for vdbg() */
  750. struct pl011_dmarx_data *dmarx = &uap->dmarx;
  751. int dmataken = 0;
  752. if (uap->dmarx.poll_rate) {
  753. /* The data can be taken by polling */
  754. dmataken = sgbuf->sg.length - dmarx->last_residue;
  755. /* Recalculate the pending size */
  756. if (pending >= dmataken)
  757. pending -= dmataken;
  758. }
  759. /* Pick the remain data from the DMA */
  760. if (pending) {
  761. /*
  762. * First take all chars in the DMA pipe, then look in the FIFO.
  763. * Note that tty_insert_flip_buf() tries to take as many chars
  764. * as it can.
  765. */
  766. dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
  767. pending);
  768. uap->port.icount.rx += dma_count;
  769. if (dma_count < pending)
  770. dev_warn(uap->port.dev,
  771. "couldn't insert all characters (TTY is full?)\n");
  772. }
  773. /* Reset the last_residue for Rx DMA poll */
  774. if (uap->dmarx.poll_rate)
  775. dmarx->last_residue = sgbuf->sg.length;
  776. /*
  777. * Only continue with trying to read the FIFO if all DMA chars have
  778. * been taken first.
  779. */
  780. if (dma_count == pending && readfifo) {
  781. /* Clear any error flags */
  782. pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
  783. UART011_FEIS, uap, REG_ICR);
  784. /*
  785. * If we read all the DMA'd characters, and we had an
  786. * incomplete buffer, that could be due to an rx error, or
  787. * maybe we just timed out. Read any pending chars and check
  788. * the error status.
  789. *
  790. * Error conditions will only occur in the FIFO, these will
  791. * trigger an immediate interrupt and stop the DMA job, so we
  792. * will always find the error in the FIFO, never in the DMA
  793. * buffer.
  794. */
  795. fifotaken = pl011_fifo_to_tty(uap);
  796. }
  797. spin_unlock(&uap->port.lock);
  798. dev_vdbg(uap->port.dev,
  799. "Took %d chars from DMA buffer and %d chars from the FIFO\n",
  800. dma_count, fifotaken);
  801. tty_flip_buffer_push(port);
  802. spin_lock(&uap->port.lock);
  803. }
  804. static void pl011_dma_rx_irq(struct uart_amba_port *uap)
  805. {
  806. struct pl011_dmarx_data *dmarx = &uap->dmarx;
  807. struct dma_chan *rxchan = dmarx->chan;
  808. struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
  809. &dmarx->sgbuf_b : &dmarx->sgbuf_a;
  810. size_t pending;
  811. struct dma_tx_state state;
  812. enum dma_status dmastat;
  813. /*
  814. * Pause the transfer so we can trust the current counter,
  815. * do this before we pause the PL011 block, else we may
  816. * overflow the FIFO.
  817. */
  818. if (dmaengine_pause(rxchan))
  819. dev_err(uap->port.dev, "unable to pause DMA transfer\n");
  820. dmastat = rxchan->device->device_tx_status(rxchan,
  821. dmarx->cookie, &state);
  822. if (dmastat != DMA_PAUSED)
  823. dev_err(uap->port.dev, "unable to pause DMA transfer\n");
  824. /* Disable RX DMA - incoming data will wait in the FIFO */
  825. uap->dmacr &= ~UART011_RXDMAE;
  826. pl011_write(uap->dmacr, uap, REG_DMACR);
  827. uap->dmarx.running = false;
  828. pending = sgbuf->sg.length - state.residue;
  829. BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
  830. /* Then we terminate the transfer - we now know our residue */
  831. dmaengine_terminate_all(rxchan);
  832. /*
  833. * This will take the chars we have so far and insert
  834. * into the framework.
  835. */
  836. pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
  837. /* Switch buffer & re-trigger DMA job */
  838. dmarx->use_buf_b = !dmarx->use_buf_b;
  839. if (pl011_dma_rx_trigger_dma(uap)) {
  840. dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
  841. "fall back to interrupt mode\n");
  842. uap->im |= UART011_RXIM;
  843. pl011_write(uap->im, uap, REG_IMSC);
  844. }
  845. }
  846. static void pl011_dma_rx_callback(void *data)
  847. {
  848. struct uart_amba_port *uap = data;
  849. struct pl011_dmarx_data *dmarx = &uap->dmarx;
  850. struct dma_chan *rxchan = dmarx->chan;
  851. bool lastbuf = dmarx->use_buf_b;
  852. struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
  853. &dmarx->sgbuf_b : &dmarx->sgbuf_a;
  854. size_t pending;
  855. struct dma_tx_state state;
  856. int ret;
  857. /*
  858. * This completion interrupt occurs typically when the
  859. * RX buffer is totally stuffed but no timeout has yet
  860. * occurred. When that happens, we just want the RX
  861. * routine to flush out the secondary DMA buffer while
  862. * we immediately trigger the next DMA job.
  863. */
  864. spin_lock_irq(&uap->port.lock);
  865. /*
  866. * Rx data can be taken by the UART interrupts during
  867. * the DMA irq handler. So we check the residue here.
  868. */
  869. rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
  870. pending = sgbuf->sg.length - state.residue;
  871. BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
  872. /* Then we terminate the transfer - we now know our residue */
  873. dmaengine_terminate_all(rxchan);
  874. uap->dmarx.running = false;
  875. dmarx->use_buf_b = !lastbuf;
  876. ret = pl011_dma_rx_trigger_dma(uap);
  877. pl011_dma_rx_chars(uap, pending, lastbuf, false);
  878. spin_unlock_irq(&uap->port.lock);
  879. /*
  880. * Do this check after we picked the DMA chars so we don't
  881. * get some IRQ immediately from RX.
  882. */
  883. if (ret) {
  884. dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
  885. "fall back to interrupt mode\n");
  886. uap->im |= UART011_RXIM;
  887. pl011_write(uap->im, uap, REG_IMSC);
  888. }
  889. }
  890. /*
  891. * Stop accepting received characters, when we're shutting down or
  892. * suspending this port.
  893. * Locking: called with port lock held and IRQs disabled.
  894. */
  895. static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
  896. {
  897. /* FIXME. Just disable the DMA enable */
  898. uap->dmacr &= ~UART011_RXDMAE;
  899. pl011_write(uap->dmacr, uap, REG_DMACR);
  900. }
  901. /*
  902. * Timer handler for Rx DMA polling.
  903. * Every polling, It checks the residue in the dma buffer and transfer
  904. * data to the tty. Also, last_residue is updated for the next polling.
  905. */
  906. static void pl011_dma_rx_poll(unsigned long args)
  907. {
  908. struct uart_amba_port *uap = (struct uart_amba_port *)args;
  909. struct tty_port *port = &uap->port.state->port;
  910. struct pl011_dmarx_data *dmarx = &uap->dmarx;
  911. struct dma_chan *rxchan = uap->dmarx.chan;
  912. unsigned long flags = 0;
  913. unsigned int dmataken = 0;
  914. unsigned int size = 0;
  915. struct pl011_sgbuf *sgbuf;
  916. int dma_count;
  917. struct dma_tx_state state;
  918. sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
  919. rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
  920. if (likely(state.residue < dmarx->last_residue)) {
  921. dmataken = sgbuf->sg.length - dmarx->last_residue;
  922. size = dmarx->last_residue - state.residue;
  923. dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
  924. size);
  925. if (dma_count == size)
  926. dmarx->last_residue = state.residue;
  927. dmarx->last_jiffies = jiffies;
  928. }
  929. tty_flip_buffer_push(port);
  930. /*
  931. * If no data is received in poll_timeout, the driver will fall back
  932. * to interrupt mode. We will retrigger DMA at the first interrupt.
  933. */
  934. if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
  935. > uap->dmarx.poll_timeout) {
  936. spin_lock_irqsave(&uap->port.lock, flags);
  937. pl011_dma_rx_stop(uap);
  938. uap->im |= UART011_RXIM;
  939. pl011_write(uap->im, uap, REG_IMSC);
  940. spin_unlock_irqrestore(&uap->port.lock, flags);
  941. uap->dmarx.running = false;
  942. dmaengine_terminate_all(rxchan);
  943. del_timer(&uap->dmarx.timer);
  944. } else {
  945. mod_timer(&uap->dmarx.timer,
  946. jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
  947. }
  948. }
  949. static void pl011_dma_startup(struct uart_amba_port *uap)
  950. {
  951. int ret;
  952. if (!uap->dma_probed)
  953. pl011_dma_probe(uap);
  954. if (!uap->dmatx.chan)
  955. return;
  956. uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
  957. if (!uap->dmatx.buf) {
  958. dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
  959. uap->port.fifosize = uap->fifosize;
  960. return;
  961. }
  962. sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
  963. /* The DMA buffer is now the FIFO the TTY subsystem can use */
  964. uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
  965. uap->using_tx_dma = true;
  966. if (!uap->dmarx.chan)
  967. goto skip_rx;
  968. /* Allocate and map DMA RX buffers */
  969. ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
  970. DMA_FROM_DEVICE);
  971. if (ret) {
  972. dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
  973. "RX buffer A", ret);
  974. goto skip_rx;
  975. }
  976. ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
  977. DMA_FROM_DEVICE);
  978. if (ret) {
  979. dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
  980. "RX buffer B", ret);
  981. pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
  982. DMA_FROM_DEVICE);
  983. goto skip_rx;
  984. }
  985. uap->using_rx_dma = true;
  986. skip_rx:
  987. /* Turn on DMA error (RX/TX will be enabled on demand) */
  988. uap->dmacr |= UART011_DMAONERR;
  989. pl011_write(uap->dmacr, uap, REG_DMACR);
  990. /*
  991. * ST Micro variants has some specific dma burst threshold
  992. * compensation. Set this to 16 bytes, so burst will only
  993. * be issued above/below 16 bytes.
  994. */
  995. if (uap->vendor->dma_threshold)
  996. pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
  997. uap, REG_ST_DMAWM);
  998. if (uap->using_rx_dma) {
  999. if (pl011_dma_rx_trigger_dma(uap))
  1000. dev_dbg(uap->port.dev, "could not trigger initial "
  1001. "RX DMA job, fall back to interrupt mode\n");
  1002. if (uap->dmarx.poll_rate) {
  1003. init_timer(&(uap->dmarx.timer));
  1004. uap->dmarx.timer.function = pl011_dma_rx_poll;
  1005. uap->dmarx.timer.data = (unsigned long)uap;
  1006. mod_timer(&uap->dmarx.timer,
  1007. jiffies +
  1008. msecs_to_jiffies(uap->dmarx.poll_rate));
  1009. uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
  1010. uap->dmarx.last_jiffies = jiffies;
  1011. }
  1012. }
  1013. }
  1014. static void pl011_dma_shutdown(struct uart_amba_port *uap)
  1015. {
  1016. if (!(uap->using_tx_dma || uap->using_rx_dma))
  1017. return;
  1018. /* Disable RX and TX DMA */
  1019. while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
  1020. cpu_relax();
  1021. spin_lock_irq(&uap->port.lock);
  1022. uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
  1023. pl011_write(uap->dmacr, uap, REG_DMACR);
  1024. spin_unlock_irq(&uap->port.lock);
  1025. if (uap->using_tx_dma) {
  1026. /* In theory, this should already be done by pl011_dma_flush_buffer */
  1027. dmaengine_terminate_all(uap->dmatx.chan);
  1028. if (uap->dmatx.queued) {
  1029. dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
  1030. DMA_TO_DEVICE);
  1031. uap->dmatx.queued = false;
  1032. }
  1033. kfree(uap->dmatx.buf);
  1034. uap->using_tx_dma = false;
  1035. }
  1036. if (uap->using_rx_dma) {
  1037. dmaengine_terminate_all(uap->dmarx.chan);
  1038. /* Clean up the RX DMA */
  1039. pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
  1040. pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
  1041. if (uap->dmarx.poll_rate)
  1042. del_timer_sync(&uap->dmarx.timer);
  1043. uap->using_rx_dma = false;
  1044. }
  1045. }
  1046. static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
  1047. {
  1048. return uap->using_rx_dma;
  1049. }
  1050. static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
  1051. {
  1052. return uap->using_rx_dma && uap->dmarx.running;
  1053. }
  1054. #else
  1055. /* Blank functions if the DMA engine is not available */
  1056. static inline void pl011_dma_probe(struct uart_amba_port *uap)
  1057. {
  1058. }
  1059. static inline void pl011_dma_remove(struct uart_amba_port *uap)
  1060. {
  1061. }
  1062. static inline void pl011_dma_startup(struct uart_amba_port *uap)
  1063. {
  1064. }
  1065. static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
  1066. {
  1067. }
  1068. static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
  1069. {
  1070. return false;
  1071. }
  1072. static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
  1073. {
  1074. }
  1075. static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
  1076. {
  1077. return false;
  1078. }
  1079. static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
  1080. {
  1081. }
  1082. static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
  1083. {
  1084. }
  1085. static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
  1086. {
  1087. return -EIO;
  1088. }
  1089. static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
  1090. {
  1091. return false;
  1092. }
  1093. static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
  1094. {
  1095. return false;
  1096. }
  1097. #define pl011_dma_flush_buffer NULL
  1098. #endif
  1099. static void pl011_stop_tx(struct uart_port *port)
  1100. {
  1101. struct uart_amba_port *uap =
  1102. container_of(port, struct uart_amba_port, port);
  1103. uap->im &= ~UART011_TXIM;
  1104. pl011_write(uap->im, uap, REG_IMSC);
  1105. pl011_dma_tx_stop(uap);
  1106. }
  1107. static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
  1108. /* Start TX with programmed I/O only (no DMA) */
  1109. static void pl011_start_tx_pio(struct uart_amba_port *uap)
  1110. {
  1111. uap->im |= UART011_TXIM;
  1112. pl011_write(uap->im, uap, REG_IMSC);
  1113. pl011_tx_chars(uap, false);
  1114. }
  1115. static void pl011_start_tx(struct uart_port *port)
  1116. {
  1117. struct uart_amba_port *uap =
  1118. container_of(port, struct uart_amba_port, port);
  1119. if (!pl011_dma_tx_start(uap))
  1120. pl011_start_tx_pio(uap);
  1121. }
  1122. static void pl011_stop_rx(struct uart_port *port)
  1123. {
  1124. struct uart_amba_port *uap =
  1125. container_of(port, struct uart_amba_port, port);
  1126. uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
  1127. UART011_PEIM|UART011_BEIM|UART011_OEIM);
  1128. pl011_write(uap->im, uap, REG_IMSC);
  1129. pl011_dma_rx_stop(uap);
  1130. }
  1131. static void pl011_enable_ms(struct uart_port *port)
  1132. {
  1133. struct uart_amba_port *uap =
  1134. container_of(port, struct uart_amba_port, port);
  1135. uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
  1136. pl011_write(uap->im, uap, REG_IMSC);
  1137. }
  1138. static void pl011_rx_chars(struct uart_amba_port *uap)
  1139. __releases(&uap->port.lock)
  1140. __acquires(&uap->port.lock)
  1141. {
  1142. pl011_fifo_to_tty(uap);
  1143. spin_unlock(&uap->port.lock);
  1144. tty_flip_buffer_push(&uap->port.state->port);
  1145. /*
  1146. * If we were temporarily out of DMA mode for a while,
  1147. * attempt to switch back to DMA mode again.
  1148. */
  1149. if (pl011_dma_rx_available(uap)) {
  1150. if (pl011_dma_rx_trigger_dma(uap)) {
  1151. dev_dbg(uap->port.dev, "could not trigger RX DMA job "
  1152. "fall back to interrupt mode again\n");
  1153. uap->im |= UART011_RXIM;
  1154. pl011_write(uap->im, uap, REG_IMSC);
  1155. } else {
  1156. #ifdef CONFIG_DMA_ENGINE
  1157. /* Start Rx DMA poll */
  1158. if (uap->dmarx.poll_rate) {
  1159. uap->dmarx.last_jiffies = jiffies;
  1160. uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
  1161. mod_timer(&uap->dmarx.timer,
  1162. jiffies +
  1163. msecs_to_jiffies(uap->dmarx.poll_rate));
  1164. }
  1165. #endif
  1166. }
  1167. }
  1168. spin_lock(&uap->port.lock);
  1169. }
  1170. static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
  1171. bool from_irq)
  1172. {
  1173. if (unlikely(!from_irq) &&
  1174. pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
  1175. return false; /* unable to transmit character */
  1176. pl011_write(c, uap, REG_DR);
  1177. uap->port.icount.tx++;
  1178. return true;
  1179. }
  1180. static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
  1181. {
  1182. struct circ_buf *xmit = &uap->port.state->xmit;
  1183. int count = uap->fifosize >> 1;
  1184. if (uap->port.x_char) {
  1185. if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
  1186. return;
  1187. uap->port.x_char = 0;
  1188. --count;
  1189. }
  1190. if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
  1191. pl011_stop_tx(&uap->port);
  1192. return;
  1193. }
  1194. /* If we are using DMA mode, try to send some characters. */
  1195. if (pl011_dma_tx_irq(uap))
  1196. return;
  1197. do {
  1198. if (likely(from_irq) && count-- == 0)
  1199. break;
  1200. if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
  1201. break;
  1202. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  1203. } while (!uart_circ_empty(xmit));
  1204. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  1205. uart_write_wakeup(&uap->port);
  1206. if (uart_circ_empty(xmit))
  1207. pl011_stop_tx(&uap->port);
  1208. }
  1209. static void pl011_modem_status(struct uart_amba_port *uap)
  1210. {
  1211. unsigned int status, delta;
  1212. status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
  1213. delta = status ^ uap->old_status;
  1214. uap->old_status = status;
  1215. if (!delta)
  1216. return;
  1217. if (delta & UART01x_FR_DCD)
  1218. uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
  1219. if (delta & UART01x_FR_DSR)
  1220. uap->port.icount.dsr++;
  1221. if (delta & UART01x_FR_CTS)
  1222. uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
  1223. wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
  1224. }
  1225. static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
  1226. {
  1227. unsigned int dummy_read;
  1228. if (!uap->vendor->cts_event_workaround)
  1229. return;
  1230. /* workaround to make sure that all bits are unlocked.. */
  1231. pl011_write(0x00, uap, REG_ICR);
  1232. /*
  1233. * WA: introduce 26ns(1 uart clk) delay before W1C;
  1234. * single apb access will incur 2 pclk(133.12Mhz) delay,
  1235. * so add 2 dummy reads
  1236. */
  1237. dummy_read = pl011_read(uap, REG_ICR);
  1238. dummy_read = pl011_read(uap, REG_ICR);
  1239. }
  1240. static irqreturn_t pl011_int(int irq, void *dev_id)
  1241. {
  1242. struct uart_amba_port *uap = dev_id;
  1243. unsigned long flags;
  1244. unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
  1245. u16 imsc;
  1246. int handled = 0;
  1247. spin_lock_irqsave(&uap->port.lock, flags);
  1248. imsc = pl011_read(uap, REG_IMSC);
  1249. status = pl011_read(uap, REG_RIS) & imsc;
  1250. if (status) {
  1251. do {
  1252. check_apply_cts_event_workaround(uap);
  1253. pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
  1254. UART011_RXIS),
  1255. uap, REG_ICR);
  1256. if (status & (UART011_RTIS|UART011_RXIS)) {
  1257. if (pl011_dma_rx_running(uap))
  1258. pl011_dma_rx_irq(uap);
  1259. else
  1260. pl011_rx_chars(uap);
  1261. }
  1262. if (status & (UART011_DSRMIS|UART011_DCDMIS|
  1263. UART011_CTSMIS|UART011_RIMIS))
  1264. pl011_modem_status(uap);
  1265. if (status & UART011_TXIS)
  1266. pl011_tx_chars(uap, true);
  1267. if (pass_counter-- == 0)
  1268. break;
  1269. status = pl011_read(uap, REG_RIS) & imsc;
  1270. } while (status != 0);
  1271. handled = 1;
  1272. }
  1273. spin_unlock_irqrestore(&uap->port.lock, flags);
  1274. return IRQ_RETVAL(handled);
  1275. }
  1276. static unsigned int pl011_tx_empty(struct uart_port *port)
  1277. {
  1278. struct uart_amba_port *uap =
  1279. container_of(port, struct uart_amba_port, port);
  1280. unsigned int status = pl011_read(uap, REG_FR);
  1281. return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
  1282. }
  1283. static unsigned int pl011_get_mctrl(struct uart_port *port)
  1284. {
  1285. struct uart_amba_port *uap =
  1286. container_of(port, struct uart_amba_port, port);
  1287. unsigned int result = 0;
  1288. unsigned int status = pl011_read(uap, REG_FR);
  1289. #define TIOCMBIT(uartbit, tiocmbit) \
  1290. if (status & uartbit) \
  1291. result |= tiocmbit
  1292. TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
  1293. TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
  1294. TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
  1295. TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
  1296. #undef TIOCMBIT
  1297. return result;
  1298. }
  1299. static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
  1300. {
  1301. struct uart_amba_port *uap =
  1302. container_of(port, struct uart_amba_port, port);
  1303. unsigned int cr;
  1304. cr = pl011_read(uap, REG_CR);
  1305. #define TIOCMBIT(tiocmbit, uartbit) \
  1306. if (mctrl & tiocmbit) \
  1307. cr |= uartbit; \
  1308. else \
  1309. cr &= ~uartbit
  1310. TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
  1311. TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
  1312. TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
  1313. TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
  1314. TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
  1315. if (uap->autorts) {
  1316. /* We need to disable auto-RTS if we want to turn RTS off */
  1317. TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
  1318. }
  1319. #undef TIOCMBIT
  1320. pl011_write(cr, uap, REG_CR);
  1321. }
  1322. static void pl011_break_ctl(struct uart_port *port, int break_state)
  1323. {
  1324. struct uart_amba_port *uap =
  1325. container_of(port, struct uart_amba_port, port);
  1326. unsigned long flags;
  1327. unsigned int lcr_h;
  1328. spin_lock_irqsave(&uap->port.lock, flags);
  1329. lcr_h = pl011_read(uap, REG_LCRH_TX);
  1330. if (break_state == -1)
  1331. lcr_h |= UART01x_LCRH_BRK;
  1332. else
  1333. lcr_h &= ~UART01x_LCRH_BRK;
  1334. pl011_write(lcr_h, uap, REG_LCRH_TX);
  1335. spin_unlock_irqrestore(&uap->port.lock, flags);
  1336. }
  1337. #ifdef CONFIG_CONSOLE_POLL
  1338. static void pl011_quiesce_irqs(struct uart_port *port)
  1339. {
  1340. struct uart_amba_port *uap =
  1341. container_of(port, struct uart_amba_port, port);
  1342. pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
  1343. /*
  1344. * There is no way to clear TXIM as this is "ready to transmit IRQ", so
  1345. * we simply mask it. start_tx() will unmask it.
  1346. *
  1347. * Note we can race with start_tx(), and if the race happens, the
  1348. * polling user might get another interrupt just after we clear it.
  1349. * But it should be OK and can happen even w/o the race, e.g.
  1350. * controller immediately got some new data and raised the IRQ.
  1351. *
  1352. * And whoever uses polling routines assumes that it manages the device
  1353. * (including tx queue), so we're also fine with start_tx()'s caller
  1354. * side.
  1355. */
  1356. pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
  1357. REG_IMSC);
  1358. }
  1359. static int pl011_get_poll_char(struct uart_port *port)
  1360. {
  1361. struct uart_amba_port *uap =
  1362. container_of(port, struct uart_amba_port, port);
  1363. unsigned int status;
  1364. /*
  1365. * The caller might need IRQs lowered, e.g. if used with KDB NMI
  1366. * debugger.
  1367. */
  1368. pl011_quiesce_irqs(port);
  1369. status = pl011_read(uap, REG_FR);
  1370. if (status & UART01x_FR_RXFE)
  1371. return NO_POLL_CHAR;
  1372. return pl011_read(uap, REG_DR);
  1373. }
  1374. static void pl011_put_poll_char(struct uart_port *port,
  1375. unsigned char ch)
  1376. {
  1377. struct uart_amba_port *uap =
  1378. container_of(port, struct uart_amba_port, port);
  1379. while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
  1380. cpu_relax();
  1381. pl011_write(ch, uap, REG_DR);
  1382. }
  1383. #endif /* CONFIG_CONSOLE_POLL */
  1384. static int pl011_hwinit(struct uart_port *port)
  1385. {
  1386. struct uart_amba_port *uap =
  1387. container_of(port, struct uart_amba_port, port);
  1388. int retval;
  1389. /* Optionaly enable pins to be muxed in and configured */
  1390. pinctrl_pm_select_default_state(port->dev);
  1391. /*
  1392. * Try to enable the clock producer.
  1393. */
  1394. retval = clk_prepare_enable(uap->clk);
  1395. if (retval)
  1396. return retval;
  1397. uap->port.uartclk = clk_get_rate(uap->clk);
  1398. /* Clear pending error and receive interrupts */
  1399. pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
  1400. UART011_FEIS | UART011_RTIS | UART011_RXIS,
  1401. uap, REG_ICR);
  1402. /*
  1403. * Save interrupts enable mask, and enable RX interrupts in case if
  1404. * the interrupt is used for NMI entry.
  1405. */
  1406. uap->im = pl011_read(uap, REG_IMSC);
  1407. pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
  1408. if (dev_get_platdata(uap->port.dev)) {
  1409. struct amba_pl011_data *plat;
  1410. plat = dev_get_platdata(uap->port.dev);
  1411. if (plat->init)
  1412. plat->init();
  1413. }
  1414. return 0;
  1415. }
  1416. static bool pl011_split_lcrh(const struct uart_amba_port *uap)
  1417. {
  1418. return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
  1419. pl011_reg_to_offset(uap, REG_LCRH_TX);
  1420. }
  1421. static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
  1422. {
  1423. pl011_write(lcr_h, uap, REG_LCRH_RX);
  1424. if (pl011_split_lcrh(uap)) {
  1425. int i;
  1426. /*
  1427. * Wait 10 PCLKs before writing LCRH_TX register,
  1428. * to get this delay write read only register 10 times
  1429. */
  1430. for (i = 0; i < 10; ++i)
  1431. pl011_write(0xff, uap, REG_MIS);
  1432. pl011_write(lcr_h, uap, REG_LCRH_TX);
  1433. }
  1434. }
  1435. static int pl011_allocate_irq(struct uart_amba_port *uap)
  1436. {
  1437. pl011_write(uap->im, uap, REG_IMSC);
  1438. return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
  1439. }
  1440. /*
  1441. * Enable interrupts, only timeouts when using DMA
  1442. * if initial RX DMA job failed, start in interrupt mode
  1443. * as well.
  1444. */
  1445. static void pl011_enable_interrupts(struct uart_amba_port *uap)
  1446. {
  1447. spin_lock_irq(&uap->port.lock);
  1448. /* Clear out any spuriously appearing RX interrupts */
  1449. pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
  1450. uap->im = UART011_RTIM;
  1451. if (!pl011_dma_rx_running(uap))
  1452. uap->im |= UART011_RXIM;
  1453. pl011_write(uap->im, uap, REG_IMSC);
  1454. spin_unlock_irq(&uap->port.lock);
  1455. }
  1456. static int pl011_startup(struct uart_port *port)
  1457. {
  1458. struct uart_amba_port *uap =
  1459. container_of(port, struct uart_amba_port, port);
  1460. unsigned int cr;
  1461. int retval;
  1462. retval = pl011_hwinit(port);
  1463. if (retval)
  1464. goto clk_dis;
  1465. retval = pl011_allocate_irq(uap);
  1466. if (retval)
  1467. goto clk_dis;
  1468. pl011_write(uap->vendor->ifls, uap, REG_IFLS);
  1469. spin_lock_irq(&uap->port.lock);
  1470. /* restore RTS and DTR */
  1471. cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
  1472. cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
  1473. pl011_write(cr, uap, REG_CR);
  1474. spin_unlock_irq(&uap->port.lock);
  1475. /*
  1476. * initialise the old status of the modem signals
  1477. */
  1478. uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
  1479. /* Startup DMA */
  1480. pl011_dma_startup(uap);
  1481. pl011_enable_interrupts(uap);
  1482. return 0;
  1483. clk_dis:
  1484. clk_disable_unprepare(uap->clk);
  1485. return retval;
  1486. }
  1487. static int sbsa_uart_startup(struct uart_port *port)
  1488. {
  1489. struct uart_amba_port *uap =
  1490. container_of(port, struct uart_amba_port, port);
  1491. int retval;
  1492. retval = pl011_hwinit(port);
  1493. if (retval)
  1494. return retval;
  1495. retval = pl011_allocate_irq(uap);
  1496. if (retval)
  1497. return retval;
  1498. /* The SBSA UART does not support any modem status lines. */
  1499. uap->old_status = 0;
  1500. pl011_enable_interrupts(uap);
  1501. return 0;
  1502. }
  1503. static void pl011_shutdown_channel(struct uart_amba_port *uap,
  1504. unsigned int lcrh)
  1505. {
  1506. unsigned long val;
  1507. val = pl011_read(uap, lcrh);
  1508. val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
  1509. pl011_write(val, uap, lcrh);
  1510. }
  1511. /*
  1512. * disable the port. It should not disable RTS and DTR.
  1513. * Also RTS and DTR state should be preserved to restore
  1514. * it during startup().
  1515. */
  1516. static void pl011_disable_uart(struct uart_amba_port *uap)
  1517. {
  1518. unsigned int cr;
  1519. uap->autorts = false;
  1520. spin_lock_irq(&uap->port.lock);
  1521. cr = pl011_read(uap, REG_CR);
  1522. uap->old_cr = cr;
  1523. cr &= UART011_CR_RTS | UART011_CR_DTR;
  1524. cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
  1525. pl011_write(cr, uap, REG_CR);
  1526. spin_unlock_irq(&uap->port.lock);
  1527. /*
  1528. * disable break condition and fifos
  1529. */
  1530. pl011_shutdown_channel(uap, REG_LCRH_RX);
  1531. if (pl011_split_lcrh(uap))
  1532. pl011_shutdown_channel(uap, REG_LCRH_TX);
  1533. }
  1534. static void pl011_disable_interrupts(struct uart_amba_port *uap)
  1535. {
  1536. spin_lock_irq(&uap->port.lock);
  1537. /* mask all interrupts and clear all pending ones */
  1538. uap->im = 0;
  1539. pl011_write(uap->im, uap, REG_IMSC);
  1540. pl011_write(0xffff, uap, REG_ICR);
  1541. spin_unlock_irq(&uap->port.lock);
  1542. }
  1543. static void pl011_shutdown(struct uart_port *port)
  1544. {
  1545. struct uart_amba_port *uap =
  1546. container_of(port, struct uart_amba_port, port);
  1547. pl011_disable_interrupts(uap);
  1548. pl011_dma_shutdown(uap);
  1549. free_irq(uap->port.irq, uap);
  1550. pl011_disable_uart(uap);
  1551. /*
  1552. * Shut down the clock producer
  1553. */
  1554. clk_disable_unprepare(uap->clk);
  1555. /* Optionally let pins go into sleep states */
  1556. pinctrl_pm_select_sleep_state(port->dev);
  1557. if (dev_get_platdata(uap->port.dev)) {
  1558. struct amba_pl011_data *plat;
  1559. plat = dev_get_platdata(uap->port.dev);
  1560. if (plat->exit)
  1561. plat->exit();
  1562. }
  1563. if (uap->port.ops->flush_buffer)
  1564. uap->port.ops->flush_buffer(port);
  1565. }
  1566. static void sbsa_uart_shutdown(struct uart_port *port)
  1567. {
  1568. struct uart_amba_port *uap =
  1569. container_of(port, struct uart_amba_port, port);
  1570. pl011_disable_interrupts(uap);
  1571. free_irq(uap->port.irq, uap);
  1572. if (uap->port.ops->flush_buffer)
  1573. uap->port.ops->flush_buffer(port);
  1574. }
  1575. static void
  1576. pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
  1577. {
  1578. port->read_status_mask = UART011_DR_OE | 255;
  1579. if (termios->c_iflag & INPCK)
  1580. port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
  1581. if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
  1582. port->read_status_mask |= UART011_DR_BE;
  1583. /*
  1584. * Characters to ignore
  1585. */
  1586. port->ignore_status_mask = 0;
  1587. if (termios->c_iflag & IGNPAR)
  1588. port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
  1589. if (termios->c_iflag & IGNBRK) {
  1590. port->ignore_status_mask |= UART011_DR_BE;
  1591. /*
  1592. * If we're ignoring parity and break indicators,
  1593. * ignore overruns too (for real raw support).
  1594. */
  1595. if (termios->c_iflag & IGNPAR)
  1596. port->ignore_status_mask |= UART011_DR_OE;
  1597. }
  1598. /*
  1599. * Ignore all characters if CREAD is not set.
  1600. */
  1601. if ((termios->c_cflag & CREAD) == 0)
  1602. port->ignore_status_mask |= UART_DUMMY_DR_RX;
  1603. }
  1604. static void
  1605. pl011_set_termios(struct uart_port *port, struct ktermios *termios,
  1606. struct ktermios *old)
  1607. {
  1608. struct uart_amba_port *uap =
  1609. container_of(port, struct uart_amba_port, port);
  1610. unsigned int lcr_h, old_cr;
  1611. unsigned long flags;
  1612. unsigned int baud, quot, clkdiv;
  1613. if (uap->vendor->oversampling)
  1614. clkdiv = 8;
  1615. else
  1616. clkdiv = 16;
  1617. /*
  1618. * Ask the core to calculate the divisor for us.
  1619. */
  1620. baud = uart_get_baud_rate(port, termios, old, 0,
  1621. port->uartclk / clkdiv);
  1622. #ifdef CONFIG_DMA_ENGINE
  1623. /*
  1624. * Adjust RX DMA polling rate with baud rate if not specified.
  1625. */
  1626. if (uap->dmarx.auto_poll_rate)
  1627. uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
  1628. #endif
  1629. if (baud > port->uartclk/16)
  1630. quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
  1631. else
  1632. quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
  1633. switch (termios->c_cflag & CSIZE) {
  1634. case CS5:
  1635. lcr_h = UART01x_LCRH_WLEN_5;
  1636. break;
  1637. case CS6:
  1638. lcr_h = UART01x_LCRH_WLEN_6;
  1639. break;
  1640. case CS7:
  1641. lcr_h = UART01x_LCRH_WLEN_7;
  1642. break;
  1643. default: // CS8
  1644. lcr_h = UART01x_LCRH_WLEN_8;
  1645. break;
  1646. }
  1647. if (termios->c_cflag & CSTOPB)
  1648. lcr_h |= UART01x_LCRH_STP2;
  1649. if (termios->c_cflag & PARENB) {
  1650. lcr_h |= UART01x_LCRH_PEN;
  1651. if (!(termios->c_cflag & PARODD))
  1652. lcr_h |= UART01x_LCRH_EPS;
  1653. if (termios->c_cflag & CMSPAR)
  1654. lcr_h |= UART011_LCRH_SPS;
  1655. }
  1656. if (uap->fifosize > 1)
  1657. lcr_h |= UART01x_LCRH_FEN;
  1658. spin_lock_irqsave(&port->lock, flags);
  1659. /*
  1660. * Update the per-port timeout.
  1661. */
  1662. uart_update_timeout(port, termios->c_cflag, baud);
  1663. pl011_setup_status_masks(port, termios);
  1664. if (UART_ENABLE_MS(port, termios->c_cflag))
  1665. pl011_enable_ms(port);
  1666. /* first, disable everything */
  1667. old_cr = pl011_read(uap, REG_CR);
  1668. pl011_write(0, uap, REG_CR);
  1669. if (termios->c_cflag & CRTSCTS) {
  1670. if (old_cr & UART011_CR_RTS)
  1671. old_cr |= UART011_CR_RTSEN;
  1672. old_cr |= UART011_CR_CTSEN;
  1673. uap->autorts = true;
  1674. } else {
  1675. old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
  1676. uap->autorts = false;
  1677. }
  1678. if (uap->vendor->oversampling) {
  1679. if (baud > port->uartclk / 16)
  1680. old_cr |= ST_UART011_CR_OVSFACT;
  1681. else
  1682. old_cr &= ~ST_UART011_CR_OVSFACT;
  1683. }
  1684. /*
  1685. * Workaround for the ST Micro oversampling variants to
  1686. * increase the bitrate slightly, by lowering the divisor,
  1687. * to avoid delayed sampling of start bit at high speeds,
  1688. * else we see data corruption.
  1689. */
  1690. if (uap->vendor->oversampling) {
  1691. if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
  1692. quot -= 1;
  1693. else if ((baud > 3250000) && (quot > 2))
  1694. quot -= 2;
  1695. }
  1696. /* Set baud rate */
  1697. pl011_write(quot & 0x3f, uap, REG_FBRD);
  1698. pl011_write(quot >> 6, uap, REG_IBRD);
  1699. /*
  1700. * ----------v----------v----------v----------v-----
  1701. * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
  1702. * REG_FBRD & REG_IBRD.
  1703. * ----------^----------^----------^----------^-----
  1704. */
  1705. pl011_write_lcr_h(uap, lcr_h);
  1706. pl011_write(old_cr, uap, REG_CR);
  1707. spin_unlock_irqrestore(&port->lock, flags);
  1708. }
  1709. static void
  1710. sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
  1711. struct ktermios *old)
  1712. {
  1713. struct uart_amba_port *uap =
  1714. container_of(port, struct uart_amba_port, port);
  1715. unsigned long flags;
  1716. tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
  1717. /* The SBSA UART only supports 8n1 without hardware flow control. */
  1718. termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
  1719. termios->c_cflag &= ~(CMSPAR | CRTSCTS);
  1720. termios->c_cflag |= CS8 | CLOCAL;
  1721. spin_lock_irqsave(&port->lock, flags);
  1722. uart_update_timeout(port, CS8, uap->fixed_baud);
  1723. pl011_setup_status_masks(port, termios);
  1724. spin_unlock_irqrestore(&port->lock, flags);
  1725. }
  1726. static const char *pl011_type(struct uart_port *port)
  1727. {
  1728. struct uart_amba_port *uap =
  1729. container_of(port, struct uart_amba_port, port);
  1730. return uap->port.type == PORT_AMBA ? uap->type : NULL;
  1731. }
  1732. /*
  1733. * Release the memory region(s) being used by 'port'
  1734. */
  1735. static void pl011_release_port(struct uart_port *port)
  1736. {
  1737. release_mem_region(port->mapbase, SZ_4K);
  1738. }
  1739. /*
  1740. * Request the memory region(s) being used by 'port'
  1741. */
  1742. static int pl011_request_port(struct uart_port *port)
  1743. {
  1744. return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
  1745. != NULL ? 0 : -EBUSY;
  1746. }
  1747. /*
  1748. * Configure/autoconfigure the port.
  1749. */
  1750. static void pl011_config_port(struct uart_port *port, int flags)
  1751. {
  1752. if (flags & UART_CONFIG_TYPE) {
  1753. port->type = PORT_AMBA;
  1754. pl011_request_port(port);
  1755. }
  1756. }
  1757. /*
  1758. * verify the new serial_struct (for TIOCSSERIAL).
  1759. */
  1760. static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
  1761. {
  1762. int ret = 0;
  1763. if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
  1764. ret = -EINVAL;
  1765. if (ser->irq < 0 || ser->irq >= nr_irqs)
  1766. ret = -EINVAL;
  1767. if (ser->baud_base < 9600)
  1768. ret = -EINVAL;
  1769. return ret;
  1770. }
  1771. static struct uart_ops amba_pl011_pops = {
  1772. .tx_empty = pl011_tx_empty,
  1773. .set_mctrl = pl011_set_mctrl,
  1774. .get_mctrl = pl011_get_mctrl,
  1775. .stop_tx = pl011_stop_tx,
  1776. .start_tx = pl011_start_tx,
  1777. .stop_rx = pl011_stop_rx,
  1778. .enable_ms = pl011_enable_ms,
  1779. .break_ctl = pl011_break_ctl,
  1780. .startup = pl011_startup,
  1781. .shutdown = pl011_shutdown,
  1782. .flush_buffer = pl011_dma_flush_buffer,
  1783. .set_termios = pl011_set_termios,
  1784. .type = pl011_type,
  1785. .release_port = pl011_release_port,
  1786. .request_port = pl011_request_port,
  1787. .config_port = pl011_config_port,
  1788. .verify_port = pl011_verify_port,
  1789. #ifdef CONFIG_CONSOLE_POLL
  1790. .poll_init = pl011_hwinit,
  1791. .poll_get_char = pl011_get_poll_char,
  1792. .poll_put_char = pl011_put_poll_char,
  1793. #endif
  1794. };
  1795. static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
  1796. {
  1797. }
  1798. static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
  1799. {
  1800. return 0;
  1801. }
  1802. static const struct uart_ops sbsa_uart_pops = {
  1803. .tx_empty = pl011_tx_empty,
  1804. .set_mctrl = sbsa_uart_set_mctrl,
  1805. .get_mctrl = sbsa_uart_get_mctrl,
  1806. .stop_tx = pl011_stop_tx,
  1807. .start_tx = pl011_start_tx,
  1808. .stop_rx = pl011_stop_rx,
  1809. .startup = sbsa_uart_startup,
  1810. .shutdown = sbsa_uart_shutdown,
  1811. .set_termios = sbsa_uart_set_termios,
  1812. .type = pl011_type,
  1813. .release_port = pl011_release_port,
  1814. .request_port = pl011_request_port,
  1815. .config_port = pl011_config_port,
  1816. .verify_port = pl011_verify_port,
  1817. #ifdef CONFIG_CONSOLE_POLL
  1818. .poll_init = pl011_hwinit,
  1819. .poll_get_char = pl011_get_poll_char,
  1820. .poll_put_char = pl011_put_poll_char,
  1821. #endif
  1822. };
  1823. static struct uart_amba_port *amba_ports[UART_NR];
  1824. #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
  1825. static void pl011_console_putchar(struct uart_port *port, int ch)
  1826. {
  1827. struct uart_amba_port *uap =
  1828. container_of(port, struct uart_amba_port, port);
  1829. while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
  1830. cpu_relax();
  1831. pl011_write(ch, uap, REG_DR);
  1832. }
  1833. static void
  1834. pl011_console_write(struct console *co, const char *s, unsigned int count)
  1835. {
  1836. struct uart_amba_port *uap = amba_ports[co->index];
  1837. unsigned int old_cr = 0, new_cr;
  1838. unsigned long flags;
  1839. int locked = 1;
  1840. clk_enable(uap->clk);
  1841. local_irq_save(flags);
  1842. if (uap->port.sysrq)
  1843. locked = 0;
  1844. else if (oops_in_progress)
  1845. locked = spin_trylock(&uap->port.lock);
  1846. else
  1847. spin_lock(&uap->port.lock);
  1848. /*
  1849. * First save the CR then disable the interrupts
  1850. */
  1851. if (!uap->vendor->always_enabled) {
  1852. old_cr = pl011_read(uap, REG_CR);
  1853. new_cr = old_cr & ~UART011_CR_CTSEN;
  1854. new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
  1855. pl011_write(new_cr, uap, REG_CR);
  1856. }
  1857. uart_console_write(&uap->port, s, count, pl011_console_putchar);
  1858. /*
  1859. * Finally, wait for transmitter to become empty
  1860. * and restore the TCR
  1861. */
  1862. while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
  1863. cpu_relax();
  1864. if (!uap->vendor->always_enabled)
  1865. pl011_write(old_cr, uap, REG_CR);
  1866. if (locked)
  1867. spin_unlock(&uap->port.lock);
  1868. local_irq_restore(flags);
  1869. clk_disable(uap->clk);
  1870. }
  1871. static void __init
  1872. pl011_console_get_options(struct uart_amba_port *uap, int *baud,
  1873. int *parity, int *bits)
  1874. {
  1875. if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
  1876. unsigned int lcr_h, ibrd, fbrd;
  1877. lcr_h = pl011_read(uap, REG_LCRH_TX);
  1878. *parity = 'n';
  1879. if (lcr_h & UART01x_LCRH_PEN) {
  1880. if (lcr_h & UART01x_LCRH_EPS)
  1881. *parity = 'e';
  1882. else
  1883. *parity = 'o';
  1884. }
  1885. if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
  1886. *bits = 7;
  1887. else
  1888. *bits = 8;
  1889. ibrd = pl011_read(uap, REG_IBRD);
  1890. fbrd = pl011_read(uap, REG_FBRD);
  1891. *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
  1892. if (uap->vendor->oversampling) {
  1893. if (pl011_read(uap, REG_CR)
  1894. & ST_UART011_CR_OVSFACT)
  1895. *baud *= 2;
  1896. }
  1897. }
  1898. }
  1899. static int __init pl011_console_setup(struct console *co, char *options)
  1900. {
  1901. struct uart_amba_port *uap;
  1902. int baud = 38400;
  1903. int bits = 8;
  1904. int parity = 'n';
  1905. int flow = 'n';
  1906. int ret;
  1907. /*
  1908. * Check whether an invalid uart number has been specified, and
  1909. * if so, search for the first available port that does have
  1910. * console support.
  1911. */
  1912. if (co->index >= UART_NR)
  1913. co->index = 0;
  1914. uap = amba_ports[co->index];
  1915. if (!uap)
  1916. return -ENODEV;
  1917. /* Allow pins to be muxed in and configured */
  1918. pinctrl_pm_select_default_state(uap->port.dev);
  1919. ret = clk_prepare(uap->clk);
  1920. if (ret)
  1921. return ret;
  1922. if (dev_get_platdata(uap->port.dev)) {
  1923. struct amba_pl011_data *plat;
  1924. plat = dev_get_platdata(uap->port.dev);
  1925. if (plat->init)
  1926. plat->init();
  1927. }
  1928. uap->port.uartclk = clk_get_rate(uap->clk);
  1929. if (uap->vendor->fixed_options) {
  1930. baud = uap->fixed_baud;
  1931. } else {
  1932. if (options)
  1933. uart_parse_options(options,
  1934. &baud, &parity, &bits, &flow);
  1935. else
  1936. pl011_console_get_options(uap, &baud, &parity, &bits);
  1937. }
  1938. return uart_set_options(&uap->port, co, baud, parity, bits, flow);
  1939. }
  1940. static struct uart_driver amba_reg;
  1941. static struct console amba_console = {
  1942. .name = "ttyAMA",
  1943. .write = pl011_console_write,
  1944. .device = uart_console_device,
  1945. .setup = pl011_console_setup,
  1946. .flags = CON_PRINTBUFFER,
  1947. .index = -1,
  1948. .data = &amba_reg,
  1949. };
  1950. #define AMBA_CONSOLE (&amba_console)
  1951. static void pl011_putc(struct uart_port *port, int c)
  1952. {
  1953. while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
  1954. cpu_relax();
  1955. if (port->iotype == UPIO_MEM32)
  1956. writel(c, port->membase + UART01x_DR);
  1957. else
  1958. writeb(c, port->membase + UART01x_DR);
  1959. while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
  1960. cpu_relax();
  1961. }
  1962. static void pl011_early_write(struct console *con, const char *s, unsigned n)
  1963. {
  1964. struct earlycon_device *dev = con->data;
  1965. uart_console_write(&dev->port, s, n, pl011_putc);
  1966. }
  1967. static int __init pl011_early_console_setup(struct earlycon_device *device,
  1968. const char *opt)
  1969. {
  1970. if (!device->port.membase)
  1971. return -ENODEV;
  1972. device->con->write = pl011_early_write;
  1973. return 0;
  1974. }
  1975. OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
  1976. #else
  1977. #define AMBA_CONSOLE NULL
  1978. #endif
  1979. static struct uart_driver amba_reg = {
  1980. .owner = THIS_MODULE,
  1981. .driver_name = "ttyAMA",
  1982. .dev_name = "ttyAMA",
  1983. .major = SERIAL_AMBA_MAJOR,
  1984. .minor = SERIAL_AMBA_MINOR,
  1985. .nr = UART_NR,
  1986. .cons = AMBA_CONSOLE,
  1987. };
  1988. static int pl011_probe_dt_alias(int index, struct device *dev)
  1989. {
  1990. struct device_node *np;
  1991. static bool seen_dev_with_alias = false;
  1992. static bool seen_dev_without_alias = false;
  1993. int ret = index;
  1994. if (!IS_ENABLED(CONFIG_OF))
  1995. return ret;
  1996. np = dev->of_node;
  1997. if (!np)
  1998. return ret;
  1999. ret = of_alias_get_id(np, "serial");
  2000. if (IS_ERR_VALUE(ret)) {
  2001. seen_dev_without_alias = true;
  2002. ret = index;
  2003. } else {
  2004. seen_dev_with_alias = true;
  2005. if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
  2006. dev_warn(dev, "requested serial port %d not available.\n", ret);
  2007. ret = index;
  2008. }
  2009. }
  2010. if (seen_dev_with_alias && seen_dev_without_alias)
  2011. dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
  2012. return ret;
  2013. }
  2014. /* unregisters the driver also if no more ports are left */
  2015. static void pl011_unregister_port(struct uart_amba_port *uap)
  2016. {
  2017. int i;
  2018. bool busy = false;
  2019. for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
  2020. if (amba_ports[i] == uap)
  2021. amba_ports[i] = NULL;
  2022. else if (amba_ports[i])
  2023. busy = true;
  2024. }
  2025. pl011_dma_remove(uap);
  2026. if (!busy)
  2027. uart_unregister_driver(&amba_reg);
  2028. }
  2029. static int pl011_find_free_port(void)
  2030. {
  2031. int i;
  2032. for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
  2033. if (amba_ports[i] == NULL)
  2034. return i;
  2035. return -EBUSY;
  2036. }
  2037. static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
  2038. struct resource *mmiobase, int index)
  2039. {
  2040. void __iomem *base;
  2041. base = devm_ioremap_resource(dev, mmiobase);
  2042. if (IS_ERR(base))
  2043. return PTR_ERR(base);
  2044. index = pl011_probe_dt_alias(index, dev);
  2045. uap->old_cr = 0;
  2046. uap->port.dev = dev;
  2047. uap->port.mapbase = mmiobase->start;
  2048. uap->port.membase = base;
  2049. uap->port.fifosize = uap->fifosize;
  2050. uap->port.flags = UPF_BOOT_AUTOCONF;
  2051. uap->port.line = index;
  2052. amba_ports[index] = uap;
  2053. return 0;
  2054. }
  2055. static int pl011_register_port(struct uart_amba_port *uap)
  2056. {
  2057. int ret;
  2058. /* Ensure interrupts from this UART are masked and cleared */
  2059. pl011_write(0, uap, REG_IMSC);
  2060. pl011_write(0xffff, uap, REG_ICR);
  2061. if (!amba_reg.state) {
  2062. ret = uart_register_driver(&amba_reg);
  2063. if (ret < 0) {
  2064. dev_err(uap->port.dev,
  2065. "Failed to register AMBA-PL011 driver\n");
  2066. return ret;
  2067. }
  2068. }
  2069. ret = uart_add_one_port(&amba_reg, &uap->port);
  2070. if (ret)
  2071. pl011_unregister_port(uap);
  2072. return ret;
  2073. }
  2074. static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
  2075. {
  2076. struct uart_amba_port *uap;
  2077. struct vendor_data *vendor = id->data;
  2078. int portnr, ret;
  2079. portnr = pl011_find_free_port();
  2080. if (portnr < 0)
  2081. return portnr;
  2082. uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
  2083. GFP_KERNEL);
  2084. if (!uap)
  2085. return -ENOMEM;
  2086. uap->clk = devm_clk_get(&dev->dev, NULL);
  2087. if (IS_ERR(uap->clk))
  2088. return PTR_ERR(uap->clk);
  2089. uap->reg_offset = vendor->reg_offset;
  2090. uap->vendor = vendor;
  2091. uap->fifosize = vendor->get_fifosize(dev);
  2092. uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
  2093. uap->port.irq = dev->irq[0];
  2094. uap->port.ops = &amba_pl011_pops;
  2095. snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
  2096. ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
  2097. if (ret)
  2098. return ret;
  2099. amba_set_drvdata(dev, uap);
  2100. return pl011_register_port(uap);
  2101. }
  2102. static int pl011_remove(struct amba_device *dev)
  2103. {
  2104. struct uart_amba_port *uap = amba_get_drvdata(dev);
  2105. uart_remove_one_port(&amba_reg, &uap->port);
  2106. pl011_unregister_port(uap);
  2107. return 0;
  2108. }
  2109. #ifdef CONFIG_PM_SLEEP
  2110. static int pl011_suspend(struct device *dev)
  2111. {
  2112. struct uart_amba_port *uap = dev_get_drvdata(dev);
  2113. if (!uap)
  2114. return -EINVAL;
  2115. return uart_suspend_port(&amba_reg, &uap->port);
  2116. }
  2117. static int pl011_resume(struct device *dev)
  2118. {
  2119. struct uart_amba_port *uap = dev_get_drvdata(dev);
  2120. if (!uap)
  2121. return -EINVAL;
  2122. return uart_resume_port(&amba_reg, &uap->port);
  2123. }
  2124. #endif
  2125. static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
  2126. static int sbsa_uart_probe(struct platform_device *pdev)
  2127. {
  2128. struct uart_amba_port *uap;
  2129. struct resource *r;
  2130. int portnr, ret;
  2131. int baudrate;
  2132. /*
  2133. * Check the mandatory baud rate parameter in the DT node early
  2134. * so that we can easily exit with the error.
  2135. */
  2136. if (pdev->dev.of_node) {
  2137. struct device_node *np = pdev->dev.of_node;
  2138. ret = of_property_read_u32(np, "current-speed", &baudrate);
  2139. if (ret)
  2140. return ret;
  2141. } else {
  2142. baudrate = 115200;
  2143. }
  2144. portnr = pl011_find_free_port();
  2145. if (portnr < 0)
  2146. return portnr;
  2147. uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
  2148. GFP_KERNEL);
  2149. if (!uap)
  2150. return -ENOMEM;
  2151. uap->reg_offset = vendor_sbsa.reg_offset;
  2152. uap->vendor = &vendor_sbsa;
  2153. uap->fifosize = 32;
  2154. uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
  2155. uap->port.irq = platform_get_irq(pdev, 0);
  2156. uap->port.ops = &sbsa_uart_pops;
  2157. uap->fixed_baud = baudrate;
  2158. snprintf(uap->type, sizeof(uap->type), "SBSA");
  2159. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2160. ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
  2161. if (ret)
  2162. return ret;
  2163. platform_set_drvdata(pdev, uap);
  2164. return pl011_register_port(uap);
  2165. }
  2166. static int sbsa_uart_remove(struct platform_device *pdev)
  2167. {
  2168. struct uart_amba_port *uap = platform_get_drvdata(pdev);
  2169. uart_remove_one_port(&amba_reg, &uap->port);
  2170. pl011_unregister_port(uap);
  2171. return 0;
  2172. }
  2173. static const struct of_device_id sbsa_uart_of_match[] = {
  2174. { .compatible = "arm,sbsa-uart", },
  2175. {},
  2176. };
  2177. MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
  2178. static const struct acpi_device_id sbsa_uart_acpi_match[] = {
  2179. { "ARMH0011", 0 },
  2180. {},
  2181. };
  2182. MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
  2183. static struct platform_driver arm_sbsa_uart_platform_driver = {
  2184. .probe = sbsa_uart_probe,
  2185. .remove = sbsa_uart_remove,
  2186. .driver = {
  2187. .name = "sbsa-uart",
  2188. .of_match_table = of_match_ptr(sbsa_uart_of_match),
  2189. .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
  2190. },
  2191. };
  2192. static struct amba_id pl011_ids[] = {
  2193. {
  2194. .id = 0x00041011,
  2195. .mask = 0x000fffff,
  2196. .data = &vendor_arm,
  2197. },
  2198. {
  2199. .id = 0x00380802,
  2200. .mask = 0x00ffffff,
  2201. .data = &vendor_st,
  2202. },
  2203. { 0, 0 },
  2204. };
  2205. MODULE_DEVICE_TABLE(amba, pl011_ids);
  2206. static struct amba_driver pl011_driver = {
  2207. .drv = {
  2208. .name = "uart-pl011",
  2209. .pm = &pl011_dev_pm_ops,
  2210. },
  2211. .id_table = pl011_ids,
  2212. .probe = pl011_probe,
  2213. .remove = pl011_remove,
  2214. };
  2215. static int __init pl011_init(void)
  2216. {
  2217. printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
  2218. if (platform_driver_register(&arm_sbsa_uart_platform_driver))
  2219. pr_warn("could not register SBSA UART platform driver\n");
  2220. return amba_driver_register(&pl011_driver);
  2221. }
  2222. static void __exit pl011_exit(void)
  2223. {
  2224. platform_driver_unregister(&arm_sbsa_uart_platform_driver);
  2225. amba_driver_unregister(&pl011_driver);
  2226. }
  2227. /*
  2228. * While this can be a module, if builtin it's most likely the console
  2229. * So let's leave module_exit but move module_init to an earlier place
  2230. */
  2231. arch_initcall(pl011_init);
  2232. module_exit(pl011_exit);
  2233. MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
  2234. MODULE_DESCRIPTION("ARM AMBA serial port driver");
  2235. MODULE_LICENSE("GPL");