octeon_device.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2015 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * This file may also be available under a different license from Cavium.
  20. * Contact Cavium, Inc. for more information
  21. **********************************************************************/
  22. #include <linux/types.h>
  23. #include <linux/list.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pci.h>
  26. #include <linux/crc32.h>
  27. #include <linux/kthread.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/vmalloc.h>
  30. #include "octeon_config.h"
  31. #include "liquidio_common.h"
  32. #include "octeon_droq.h"
  33. #include "octeon_iq.h"
  34. #include "response_manager.h"
  35. #include "octeon_device.h"
  36. #include "octeon_nic.h"
  37. #include "octeon_main.h"
  38. #include "octeon_network.h"
  39. #include "cn66xx_regs.h"
  40. #include "cn66xx_device.h"
  41. #include "cn68xx_regs.h"
  42. #include "cn68xx_device.h"
  43. #include "liquidio_image.h"
  44. #include "octeon_mem_ops.h"
  45. /** Default configuration
  46. * for CN66XX OCTEON Models.
  47. */
  48. static struct octeon_config default_cn66xx_conf = {
  49. .card_type = LIO_210SV,
  50. .card_name = LIO_210SV_NAME,
  51. /** IQ attributes */
  52. .iq = {
  53. .max_iqs = CN6XXX_CFG_IO_QUEUES,
  54. .pending_list_size =
  55. (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
  56. .instr_type = OCTEON_64BYTE_INSTR,
  57. .db_min = CN6XXX_DB_MIN,
  58. .db_timeout = CN6XXX_DB_TIMEOUT,
  59. }
  60. ,
  61. /** OQ attributes */
  62. .oq = {
  63. .max_oqs = CN6XXX_CFG_IO_QUEUES,
  64. .info_ptr = OCTEON_OQ_INFOPTR_MODE,
  65. .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
  66. .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
  67. .oq_intr_time = CN6XXX_OQ_INTR_TIME,
  68. .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
  69. }
  70. ,
  71. .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
  72. .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  73. .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  74. .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  75. /* For ethernet interface 0: Port cfg Attributes */
  76. .nic_if_cfg[0] = {
  77. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  78. .max_txqs = MAX_TXQS_PER_INTF,
  79. /* Actual configured value. Range could be: 1...max_txqs */
  80. .num_txqs = DEF_TXQS_PER_INTF,
  81. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  82. .max_rxqs = MAX_RXQS_PER_INTF,
  83. /* Actual configured value. Range could be: 1...max_rxqs */
  84. .num_rxqs = DEF_RXQS_PER_INTF,
  85. /* Num of desc for rx rings */
  86. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  87. /* Num of desc for tx rings */
  88. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  89. /* SKB size, We need not change buf size even for Jumbo frames.
  90. * Octeon can send jumbo frames in 4 consecutive descriptors,
  91. */
  92. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  93. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  94. .gmx_port_id = 0,
  95. },
  96. .nic_if_cfg[1] = {
  97. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  98. .max_txqs = MAX_TXQS_PER_INTF,
  99. /* Actual configured value. Range could be: 1...max_txqs */
  100. .num_txqs = DEF_TXQS_PER_INTF,
  101. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  102. .max_rxqs = MAX_RXQS_PER_INTF,
  103. /* Actual configured value. Range could be: 1...max_rxqs */
  104. .num_rxqs = DEF_RXQS_PER_INTF,
  105. /* Num of desc for rx rings */
  106. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  107. /* Num of desc for tx rings */
  108. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  109. /* SKB size, We need not change buf size even for Jumbo frames.
  110. * Octeon can send jumbo frames in 4 consecutive descriptors,
  111. */
  112. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  113. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  114. .gmx_port_id = 1,
  115. },
  116. /** Miscellaneous attributes */
  117. .misc = {
  118. /* Host driver link query interval */
  119. .oct_link_query_interval = 100,
  120. /* Octeon link query interval */
  121. .host_link_query_interval = 500,
  122. .enable_sli_oq_bp = 0,
  123. /* Control queue group */
  124. .ctrlq_grp = 1,
  125. }
  126. ,
  127. };
  128. /** Default configuration
  129. * for CN68XX OCTEON Model.
  130. */
  131. static struct octeon_config default_cn68xx_conf = {
  132. .card_type = LIO_410NV,
  133. .card_name = LIO_410NV_NAME,
  134. /** IQ attributes */
  135. .iq = {
  136. .max_iqs = CN6XXX_CFG_IO_QUEUES,
  137. .pending_list_size =
  138. (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
  139. .instr_type = OCTEON_64BYTE_INSTR,
  140. .db_min = CN6XXX_DB_MIN,
  141. .db_timeout = CN6XXX_DB_TIMEOUT,
  142. }
  143. ,
  144. /** OQ attributes */
  145. .oq = {
  146. .max_oqs = CN6XXX_CFG_IO_QUEUES,
  147. .info_ptr = OCTEON_OQ_INFOPTR_MODE,
  148. .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
  149. .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
  150. .oq_intr_time = CN6XXX_OQ_INTR_TIME,
  151. .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
  152. }
  153. ,
  154. .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
  155. .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  156. .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  157. .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  158. .nic_if_cfg[0] = {
  159. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  160. .max_txqs = MAX_TXQS_PER_INTF,
  161. /* Actual configured value. Range could be: 1...max_txqs */
  162. .num_txqs = DEF_TXQS_PER_INTF,
  163. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  164. .max_rxqs = MAX_RXQS_PER_INTF,
  165. /* Actual configured value. Range could be: 1...max_rxqs */
  166. .num_rxqs = DEF_RXQS_PER_INTF,
  167. /* Num of desc for rx rings */
  168. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  169. /* Num of desc for tx rings */
  170. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  171. /* SKB size, We need not change buf size even for Jumbo frames.
  172. * Octeon can send jumbo frames in 4 consecutive descriptors,
  173. */
  174. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  175. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  176. .gmx_port_id = 0,
  177. },
  178. .nic_if_cfg[1] = {
  179. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  180. .max_txqs = MAX_TXQS_PER_INTF,
  181. /* Actual configured value. Range could be: 1...max_txqs */
  182. .num_txqs = DEF_TXQS_PER_INTF,
  183. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  184. .max_rxqs = MAX_RXQS_PER_INTF,
  185. /* Actual configured value. Range could be: 1...max_rxqs */
  186. .num_rxqs = DEF_RXQS_PER_INTF,
  187. /* Num of desc for rx rings */
  188. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  189. /* Num of desc for tx rings */
  190. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  191. /* SKB size, We need not change buf size even for Jumbo frames.
  192. * Octeon can send jumbo frames in 4 consecutive descriptors,
  193. */
  194. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  195. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  196. .gmx_port_id = 1,
  197. },
  198. .nic_if_cfg[2] = {
  199. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  200. .max_txqs = MAX_TXQS_PER_INTF,
  201. /* Actual configured value. Range could be: 1...max_txqs */
  202. .num_txqs = DEF_TXQS_PER_INTF,
  203. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  204. .max_rxqs = MAX_RXQS_PER_INTF,
  205. /* Actual configured value. Range could be: 1...max_rxqs */
  206. .num_rxqs = DEF_RXQS_PER_INTF,
  207. /* Num of desc for rx rings */
  208. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  209. /* Num of desc for tx rings */
  210. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  211. /* SKB size, We need not change buf size even for Jumbo frames.
  212. * Octeon can send jumbo frames in 4 consecutive descriptors,
  213. */
  214. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  215. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  216. .gmx_port_id = 2,
  217. },
  218. .nic_if_cfg[3] = {
  219. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  220. .max_txqs = MAX_TXQS_PER_INTF,
  221. /* Actual configured value. Range could be: 1...max_txqs */
  222. .num_txqs = DEF_TXQS_PER_INTF,
  223. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  224. .max_rxqs = MAX_RXQS_PER_INTF,
  225. /* Actual configured value. Range could be: 1...max_rxqs */
  226. .num_rxqs = DEF_RXQS_PER_INTF,
  227. /* Num of desc for rx rings */
  228. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  229. /* Num of desc for tx rings */
  230. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  231. /* SKB size, We need not change buf size even for Jumbo frames.
  232. * Octeon can send jumbo frames in 4 consecutive descriptors,
  233. */
  234. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  235. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  236. .gmx_port_id = 3,
  237. },
  238. /** Miscellaneous attributes */
  239. .misc = {
  240. /* Host driver link query interval */
  241. .oct_link_query_interval = 100,
  242. /* Octeon link query interval */
  243. .host_link_query_interval = 500,
  244. .enable_sli_oq_bp = 0,
  245. /* Control queue group */
  246. .ctrlq_grp = 1,
  247. }
  248. ,
  249. };
  250. /** Default configuration
  251. * for CN68XX OCTEON Model.
  252. */
  253. static struct octeon_config default_cn68xx_210nv_conf = {
  254. .card_type = LIO_210NV,
  255. .card_name = LIO_210NV_NAME,
  256. /** IQ attributes */
  257. .iq = {
  258. .max_iqs = CN6XXX_CFG_IO_QUEUES,
  259. .pending_list_size =
  260. (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
  261. .instr_type = OCTEON_64BYTE_INSTR,
  262. .db_min = CN6XXX_DB_MIN,
  263. .db_timeout = CN6XXX_DB_TIMEOUT,
  264. }
  265. ,
  266. /** OQ attributes */
  267. .oq = {
  268. .max_oqs = CN6XXX_CFG_IO_QUEUES,
  269. .info_ptr = OCTEON_OQ_INFOPTR_MODE,
  270. .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
  271. .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
  272. .oq_intr_time = CN6XXX_OQ_INTR_TIME,
  273. .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
  274. }
  275. ,
  276. .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
  277. .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  278. .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  279. .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  280. .nic_if_cfg[0] = {
  281. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  282. .max_txqs = MAX_TXQS_PER_INTF,
  283. /* Actual configured value. Range could be: 1...max_txqs */
  284. .num_txqs = DEF_TXQS_PER_INTF,
  285. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  286. .max_rxqs = MAX_RXQS_PER_INTF,
  287. /* Actual configured value. Range could be: 1...max_rxqs */
  288. .num_rxqs = DEF_RXQS_PER_INTF,
  289. /* Num of desc for rx rings */
  290. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  291. /* Num of desc for tx rings */
  292. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  293. /* SKB size, We need not change buf size even for Jumbo frames.
  294. * Octeon can send jumbo frames in 4 consecutive descriptors,
  295. */
  296. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  297. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  298. .gmx_port_id = 0,
  299. },
  300. .nic_if_cfg[1] = {
  301. /* Max Txqs: Half for each of the two ports :max_iq/2 */
  302. .max_txqs = MAX_TXQS_PER_INTF,
  303. /* Actual configured value. Range could be: 1...max_txqs */
  304. .num_txqs = DEF_TXQS_PER_INTF,
  305. /* Max Rxqs: Half for each of the two ports :max_oq/2 */
  306. .max_rxqs = MAX_RXQS_PER_INTF,
  307. /* Actual configured value. Range could be: 1...max_rxqs */
  308. .num_rxqs = DEF_RXQS_PER_INTF,
  309. /* Num of desc for rx rings */
  310. .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
  311. /* Num of desc for tx rings */
  312. .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
  313. /* SKB size, We need not change buf size even for Jumbo frames.
  314. * Octeon can send jumbo frames in 4 consecutive descriptors,
  315. */
  316. .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
  317. .base_queue = BASE_QUEUE_NOT_REQUESTED,
  318. .gmx_port_id = 1,
  319. },
  320. /** Miscellaneous attributes */
  321. .misc = {
  322. /* Host driver link query interval */
  323. .oct_link_query_interval = 100,
  324. /* Octeon link query interval */
  325. .host_link_query_interval = 500,
  326. .enable_sli_oq_bp = 0,
  327. /* Control queue group */
  328. .ctrlq_grp = 1,
  329. }
  330. ,
  331. };
  332. enum {
  333. OCTEON_CONFIG_TYPE_DEFAULT = 0,
  334. NUM_OCTEON_CONFS,
  335. };
  336. static struct octeon_config_ptr {
  337. u32 conf_type;
  338. } oct_conf_info[MAX_OCTEON_DEVICES] = {
  339. {
  340. OCTEON_CONFIG_TYPE_DEFAULT,
  341. }, {
  342. OCTEON_CONFIG_TYPE_DEFAULT,
  343. }, {
  344. OCTEON_CONFIG_TYPE_DEFAULT,
  345. }, {
  346. OCTEON_CONFIG_TYPE_DEFAULT,
  347. },
  348. };
  349. static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
  350. "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
  351. "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
  352. "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
  353. "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
  354. "INVALID"
  355. };
  356. static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
  357. "BASE", "NIC", "UNKNOWN"};
  358. static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
  359. static u32 octeon_device_count;
  360. static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
  361. static void oct_set_config_info(int oct_id, int conf_type)
  362. {
  363. if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
  364. conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
  365. oct_conf_info[oct_id].conf_type = conf_type;
  366. }
  367. void octeon_init_device_list(int conf_type)
  368. {
  369. int i;
  370. memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
  371. for (i = 0; i < MAX_OCTEON_DEVICES; i++)
  372. oct_set_config_info(i, conf_type);
  373. }
  374. static void *__retrieve_octeon_config_info(struct octeon_device *oct,
  375. u16 card_type)
  376. {
  377. u32 oct_id = oct->octeon_id;
  378. void *ret = NULL;
  379. switch (oct_conf_info[oct_id].conf_type) {
  380. case OCTEON_CONFIG_TYPE_DEFAULT:
  381. if (oct->chip_id == OCTEON_CN66XX) {
  382. ret = (void *)&default_cn66xx_conf;
  383. } else if ((oct->chip_id == OCTEON_CN68XX) &&
  384. (card_type == LIO_210NV)) {
  385. ret = (void *)&default_cn68xx_210nv_conf;
  386. } else if ((oct->chip_id == OCTEON_CN68XX) &&
  387. (card_type == LIO_410NV)) {
  388. ret = (void *)&default_cn68xx_conf;
  389. }
  390. break;
  391. default:
  392. break;
  393. }
  394. return ret;
  395. }
  396. static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
  397. {
  398. switch (oct->chip_id) {
  399. case OCTEON_CN66XX:
  400. case OCTEON_CN68XX:
  401. return lio_validate_cn6xxx_config_info(oct, conf);
  402. default:
  403. break;
  404. }
  405. return 1;
  406. }
  407. void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
  408. {
  409. void *conf = NULL;
  410. conf = __retrieve_octeon_config_info(oct, card_type);
  411. if (!conf)
  412. return NULL;
  413. if (__verify_octeon_config_info(oct, conf)) {
  414. dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
  415. return NULL;
  416. }
  417. return conf;
  418. }
  419. char *lio_get_state_string(atomic_t *state_ptr)
  420. {
  421. s32 istate = (s32)atomic_read(state_ptr);
  422. if (istate > OCT_DEV_STATES || istate < 0)
  423. return oct_dev_state_str[OCT_DEV_STATE_INVALID];
  424. return oct_dev_state_str[istate];
  425. }
  426. static char *get_oct_app_string(u32 app_mode)
  427. {
  428. if (app_mode <= CVM_DRV_APP_END)
  429. return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
  430. return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
  431. }
  432. int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
  433. size_t size)
  434. {
  435. int ret = 0;
  436. u8 *p;
  437. u8 *buffer;
  438. u32 crc32_result;
  439. u64 load_addr;
  440. u32 image_len;
  441. struct octeon_firmware_file_header *h;
  442. u32 i;
  443. if (size < sizeof(struct octeon_firmware_file_header)) {
  444. dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
  445. (u32)size,
  446. (u32)sizeof(struct octeon_firmware_file_header));
  447. return -EINVAL;
  448. }
  449. h = (struct octeon_firmware_file_header *)data;
  450. if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
  451. dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
  452. return -EINVAL;
  453. }
  454. crc32_result =
  455. crc32(~0, data,
  456. sizeof(struct octeon_firmware_file_header) -
  457. sizeof(u32)) ^ ~0U;
  458. if (crc32_result != be32_to_cpu(h->crc32)) {
  459. dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
  460. crc32_result, be32_to_cpu(h->crc32));
  461. return -EINVAL;
  462. }
  463. if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
  464. dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
  465. LIQUIDIO_VERSION, h->version);
  466. return -EINVAL;
  467. }
  468. if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
  469. dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
  470. be32_to_cpu(h->num_images));
  471. return -EINVAL;
  472. }
  473. dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
  474. snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
  475. h->version);
  476. buffer = kmemdup(data, size, GFP_KERNEL);
  477. if (!buffer)
  478. return -ENOMEM;
  479. p = buffer + sizeof(struct octeon_firmware_file_header);
  480. /* load all images */
  481. for (i = 0; i < be32_to_cpu(h->num_images); i++) {
  482. load_addr = be64_to_cpu(h->desc[i].addr);
  483. image_len = be32_to_cpu(h->desc[i].len);
  484. /* validate the image */
  485. crc32_result = crc32(~0, p, image_len) ^ ~0U;
  486. if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
  487. dev_err(&oct->pci_dev->dev,
  488. "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
  489. i, crc32_result,
  490. be32_to_cpu(h->desc[i].crc32));
  491. ret = -EINVAL;
  492. goto done_downloading;
  493. }
  494. /* download the image */
  495. octeon_pci_write_core_mem(oct, load_addr, p, image_len);
  496. p += image_len;
  497. dev_dbg(&oct->pci_dev->dev,
  498. "Downloaded image %d (%d bytes) to address 0x%016llx\n",
  499. i, image_len, load_addr);
  500. }
  501. /* Invoke the bootcmd */
  502. ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
  503. done_downloading:
  504. kfree(buffer);
  505. return ret;
  506. }
  507. void octeon_free_device_mem(struct octeon_device *oct)
  508. {
  509. u32 i;
  510. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
  511. /* could check mask as well */
  512. vfree(oct->droq[i]);
  513. }
  514. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
  515. /* could check mask as well */
  516. vfree(oct->instr_queue[i]);
  517. }
  518. i = oct->octeon_id;
  519. vfree(oct);
  520. octeon_device[i] = NULL;
  521. octeon_device_count--;
  522. }
  523. static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
  524. u32 priv_size)
  525. {
  526. struct octeon_device *oct;
  527. u8 *buf = NULL;
  528. u32 octdevsize = 0, configsize = 0, size;
  529. switch (pci_id) {
  530. case OCTEON_CN68XX:
  531. case OCTEON_CN66XX:
  532. configsize = sizeof(struct octeon_cn6xxx);
  533. break;
  534. default:
  535. pr_err("%s: Unknown PCI Device: 0x%x\n",
  536. __func__,
  537. pci_id);
  538. return NULL;
  539. }
  540. if (configsize & 0x7)
  541. configsize += (8 - (configsize & 0x7));
  542. octdevsize = sizeof(struct octeon_device);
  543. if (octdevsize & 0x7)
  544. octdevsize += (8 - (octdevsize & 0x7));
  545. if (priv_size & 0x7)
  546. priv_size += (8 - (priv_size & 0x7));
  547. size = octdevsize + priv_size + configsize +
  548. (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
  549. buf = vmalloc(size);
  550. if (!buf)
  551. return NULL;
  552. memset(buf, 0, size);
  553. oct = (struct octeon_device *)buf;
  554. oct->priv = (void *)(buf + octdevsize);
  555. oct->chip = (void *)(buf + octdevsize + priv_size);
  556. oct->dispatch.dlist = (struct octeon_dispatch *)
  557. (buf + octdevsize + priv_size + configsize);
  558. return oct;
  559. }
  560. struct octeon_device *octeon_allocate_device(u32 pci_id,
  561. u32 priv_size)
  562. {
  563. u32 oct_idx = 0;
  564. struct octeon_device *oct = NULL;
  565. for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
  566. if (!octeon_device[oct_idx])
  567. break;
  568. if (oct_idx == MAX_OCTEON_DEVICES)
  569. return NULL;
  570. oct = octeon_allocate_device_mem(pci_id, priv_size);
  571. if (!oct)
  572. return NULL;
  573. spin_lock_init(&oct->pci_win_lock);
  574. spin_lock_init(&oct->mem_access_lock);
  575. octeon_device_count++;
  576. octeon_device[oct_idx] = oct;
  577. oct->octeon_id = oct_idx;
  578. snprintf((oct->device_name), sizeof(oct->device_name),
  579. "LiquidIO%d", (oct->octeon_id));
  580. return oct;
  581. }
  582. /* this function is only for setting up the first queue */
  583. int octeon_setup_instr_queues(struct octeon_device *oct)
  584. {
  585. u32 num_iqs = 0;
  586. u32 num_descs = 0;
  587. u32 iq_no = 0;
  588. union oct_txpciq txpciq;
  589. int numa_node = cpu_to_node(iq_no % num_online_cpus());
  590. num_iqs = 1;
  591. /* this causes queue 0 to be default queue */
  592. if (OCTEON_CN6XXX(oct))
  593. num_descs =
  594. CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
  595. oct->num_iqs = 0;
  596. oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
  597. numa_node);
  598. if (!oct->instr_queue[0])
  599. oct->instr_queue[0] =
  600. vmalloc(sizeof(struct octeon_instr_queue));
  601. if (!oct->instr_queue[0])
  602. return 1;
  603. memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
  604. oct->instr_queue[0]->q_index = 0;
  605. oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
  606. oct->instr_queue[0]->ifidx = 0;
  607. txpciq.u64 = 0;
  608. txpciq.s.q_no = iq_no;
  609. txpciq.s.use_qpg = 0;
  610. txpciq.s.qpg = 0;
  611. if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
  612. /* prevent memory leak */
  613. vfree(oct->instr_queue[0]);
  614. return 1;
  615. }
  616. oct->num_iqs++;
  617. return 0;
  618. }
  619. int octeon_setup_output_queues(struct octeon_device *oct)
  620. {
  621. u32 num_oqs = 0;
  622. u32 num_descs = 0;
  623. u32 desc_size = 0;
  624. u32 oq_no = 0;
  625. int numa_node = cpu_to_node(oq_no % num_online_cpus());
  626. num_oqs = 1;
  627. /* this causes queue 0 to be default queue */
  628. if (OCTEON_CN6XXX(oct)) {
  629. num_descs =
  630. CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
  631. desc_size =
  632. CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
  633. }
  634. oct->num_oqs = 0;
  635. oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
  636. if (!oct->droq[0])
  637. oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
  638. if (!oct->droq[0])
  639. return 1;
  640. if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
  641. return 1;
  642. oct->num_oqs++;
  643. return 0;
  644. }
  645. void octeon_set_io_queues_off(struct octeon_device *oct)
  646. {
  647. /* Disable the i/p and o/p queues for this Octeon. */
  648. octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
  649. octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
  650. }
  651. void octeon_set_droq_pkt_op(struct octeon_device *oct,
  652. u32 q_no,
  653. u32 enable)
  654. {
  655. u32 reg_val = 0;
  656. /* Disable the i/p and o/p queues for this Octeon. */
  657. reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
  658. if (enable)
  659. reg_val = reg_val | (1 << q_no);
  660. else
  661. reg_val = reg_val & (~(1 << q_no));
  662. octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
  663. }
  664. int octeon_init_dispatch_list(struct octeon_device *oct)
  665. {
  666. u32 i;
  667. oct->dispatch.count = 0;
  668. for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
  669. oct->dispatch.dlist[i].opcode = 0;
  670. INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
  671. }
  672. for (i = 0; i <= REQTYPE_LAST; i++)
  673. octeon_register_reqtype_free_fn(oct, i, NULL);
  674. spin_lock_init(&oct->dispatch.lock);
  675. return 0;
  676. }
  677. void octeon_delete_dispatch_list(struct octeon_device *oct)
  678. {
  679. u32 i;
  680. struct list_head freelist, *temp, *tmp2;
  681. INIT_LIST_HEAD(&freelist);
  682. spin_lock_bh(&oct->dispatch.lock);
  683. for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
  684. struct list_head *dispatch;
  685. dispatch = &oct->dispatch.dlist[i].list;
  686. while (dispatch->next != dispatch) {
  687. temp = dispatch->next;
  688. list_del(temp);
  689. list_add_tail(temp, &freelist);
  690. }
  691. oct->dispatch.dlist[i].opcode = 0;
  692. }
  693. oct->dispatch.count = 0;
  694. spin_unlock_bh(&oct->dispatch.lock);
  695. list_for_each_safe(temp, tmp2, &freelist) {
  696. list_del(temp);
  697. vfree(temp);
  698. }
  699. }
  700. octeon_dispatch_fn_t
  701. octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
  702. u16 subcode)
  703. {
  704. u32 idx;
  705. struct list_head *dispatch;
  706. octeon_dispatch_fn_t fn = NULL;
  707. u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
  708. idx = combined_opcode & OCTEON_OPCODE_MASK;
  709. spin_lock_bh(&octeon_dev->dispatch.lock);
  710. if (octeon_dev->dispatch.count == 0) {
  711. spin_unlock_bh(&octeon_dev->dispatch.lock);
  712. return NULL;
  713. }
  714. if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
  715. spin_unlock_bh(&octeon_dev->dispatch.lock);
  716. return NULL;
  717. }
  718. if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
  719. fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
  720. } else {
  721. list_for_each(dispatch,
  722. &octeon_dev->dispatch.dlist[idx].list) {
  723. if (((struct octeon_dispatch *)dispatch)->opcode ==
  724. combined_opcode) {
  725. fn = ((struct octeon_dispatch *)
  726. dispatch)->dispatch_fn;
  727. break;
  728. }
  729. }
  730. }
  731. spin_unlock_bh(&octeon_dev->dispatch.lock);
  732. return fn;
  733. }
  734. /* octeon_register_dispatch_fn
  735. * Parameters:
  736. * octeon_id - id of the octeon device.
  737. * opcode - opcode for which driver should call the registered function
  738. * subcode - subcode for which driver should call the registered function
  739. * fn - The function to call when a packet with "opcode" arrives in
  740. * octeon output queues.
  741. * fn_arg - The argument to be passed when calling function "fn".
  742. * Description:
  743. * Registers a function and its argument to be called when a packet
  744. * arrives in Octeon output queues with "opcode".
  745. * Returns:
  746. * Success: 0
  747. * Failure: 1
  748. * Locks:
  749. * No locks are held.
  750. */
  751. int
  752. octeon_register_dispatch_fn(struct octeon_device *oct,
  753. u16 opcode,
  754. u16 subcode,
  755. octeon_dispatch_fn_t fn, void *fn_arg)
  756. {
  757. u32 idx;
  758. octeon_dispatch_fn_t pfn;
  759. u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
  760. idx = combined_opcode & OCTEON_OPCODE_MASK;
  761. spin_lock_bh(&oct->dispatch.lock);
  762. /* Add dispatch function to first level of lookup table */
  763. if (oct->dispatch.dlist[idx].opcode == 0) {
  764. oct->dispatch.dlist[idx].opcode = combined_opcode;
  765. oct->dispatch.dlist[idx].dispatch_fn = fn;
  766. oct->dispatch.dlist[idx].arg = fn_arg;
  767. oct->dispatch.count++;
  768. spin_unlock_bh(&oct->dispatch.lock);
  769. return 0;
  770. }
  771. spin_unlock_bh(&oct->dispatch.lock);
  772. /* Check if there was a function already registered for this
  773. * opcode/subcode.
  774. */
  775. pfn = octeon_get_dispatch(oct, opcode, subcode);
  776. if (!pfn) {
  777. struct octeon_dispatch *dispatch;
  778. dev_dbg(&oct->pci_dev->dev,
  779. "Adding opcode to dispatch list linked list\n");
  780. dispatch = (struct octeon_dispatch *)
  781. vmalloc(sizeof(struct octeon_dispatch));
  782. if (!dispatch) {
  783. dev_err(&oct->pci_dev->dev,
  784. "No memory to add dispatch function\n");
  785. return 1;
  786. }
  787. dispatch->opcode = combined_opcode;
  788. dispatch->dispatch_fn = fn;
  789. dispatch->arg = fn_arg;
  790. /* Add dispatch function to linked list of fn ptrs
  791. * at the hashed index.
  792. */
  793. spin_lock_bh(&oct->dispatch.lock);
  794. list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
  795. oct->dispatch.count++;
  796. spin_unlock_bh(&oct->dispatch.lock);
  797. } else {
  798. dev_err(&oct->pci_dev->dev,
  799. "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
  800. opcode, subcode);
  801. return 1;
  802. }
  803. return 0;
  804. }
  805. /* octeon_unregister_dispatch_fn
  806. * Parameters:
  807. * oct - octeon device
  808. * opcode - driver should unregister the function for this opcode
  809. * subcode - driver should unregister the function for this subcode
  810. * Description:
  811. * Unregister the function set for this opcode+subcode.
  812. * Returns:
  813. * Success: 0
  814. * Failure: 1
  815. * Locks:
  816. * No locks are held.
  817. */
  818. int
  819. octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
  820. u16 subcode)
  821. {
  822. int retval = 0;
  823. u32 idx;
  824. struct list_head *dispatch, *dfree = NULL, *tmp2;
  825. u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
  826. idx = combined_opcode & OCTEON_OPCODE_MASK;
  827. spin_lock_bh(&oct->dispatch.lock);
  828. if (oct->dispatch.count == 0) {
  829. spin_unlock_bh(&oct->dispatch.lock);
  830. dev_err(&oct->pci_dev->dev,
  831. "No dispatch functions registered for this device\n");
  832. return 1;
  833. }
  834. if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
  835. dispatch = &oct->dispatch.dlist[idx].list;
  836. if (dispatch->next != dispatch) {
  837. dispatch = dispatch->next;
  838. oct->dispatch.dlist[idx].opcode =
  839. ((struct octeon_dispatch *)dispatch)->opcode;
  840. oct->dispatch.dlist[idx].dispatch_fn =
  841. ((struct octeon_dispatch *)
  842. dispatch)->dispatch_fn;
  843. oct->dispatch.dlist[idx].arg =
  844. ((struct octeon_dispatch *)dispatch)->arg;
  845. list_del(dispatch);
  846. dfree = dispatch;
  847. } else {
  848. oct->dispatch.dlist[idx].opcode = 0;
  849. oct->dispatch.dlist[idx].dispatch_fn = NULL;
  850. oct->dispatch.dlist[idx].arg = NULL;
  851. }
  852. } else {
  853. retval = 1;
  854. list_for_each_safe(dispatch, tmp2,
  855. &(oct->dispatch.dlist[idx].
  856. list)) {
  857. if (((struct octeon_dispatch *)dispatch)->opcode ==
  858. combined_opcode) {
  859. list_del(dispatch);
  860. dfree = dispatch;
  861. retval = 0;
  862. }
  863. }
  864. }
  865. if (!retval)
  866. oct->dispatch.count--;
  867. spin_unlock_bh(&oct->dispatch.lock);
  868. vfree(dfree);
  869. return retval;
  870. }
  871. int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
  872. {
  873. u32 i;
  874. char app_name[16];
  875. struct octeon_device *oct = (struct octeon_device *)buf;
  876. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  877. struct octeon_core_setup *cs = NULL;
  878. u32 num_nic_ports = 0;
  879. if (OCTEON_CN6XXX(oct))
  880. num_nic_ports =
  881. CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
  882. if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
  883. dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
  884. atomic_read(&oct->status));
  885. goto core_drv_init_err;
  886. }
  887. strncpy(app_name,
  888. get_oct_app_string(
  889. (u32)recv_pkt->rh.r_core_drv_init.app_mode),
  890. sizeof(app_name) - 1);
  891. oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
  892. if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
  893. oct->fw_info.max_nic_ports =
  894. (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
  895. oct->fw_info.num_gmx_ports =
  896. (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
  897. }
  898. if (oct->fw_info.max_nic_ports < num_nic_ports) {
  899. dev_err(&oct->pci_dev->dev,
  900. "Config has more ports than firmware allows (%d > %d).\n",
  901. num_nic_ports, oct->fw_info.max_nic_ports);
  902. goto core_drv_init_err;
  903. }
  904. oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
  905. oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
  906. atomic_set(&oct->status, OCT_DEV_CORE_OK);
  907. cs = &core_setup[oct->octeon_id];
  908. if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
  909. dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
  910. (u32)sizeof(*cs),
  911. recv_pkt->buffer_size[0]);
  912. }
  913. memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
  914. strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
  915. strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
  916. OCT_SERIAL_LEN);
  917. octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
  918. oct->boardinfo.major = cs->board_rev_major;
  919. oct->boardinfo.minor = cs->board_rev_minor;
  920. dev_info(&oct->pci_dev->dev,
  921. "Running %s (%llu Hz)\n",
  922. app_name, CVM_CAST64(cs->corefreq));
  923. core_drv_init_err:
  924. for (i = 0; i < recv_pkt->buffer_count; i++)
  925. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  926. octeon_free_recv_info(recv_info);
  927. return 0;
  928. }
  929. int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
  930. {
  931. if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
  932. (oct->io_qmask.iq & (1UL << q_no)))
  933. return oct->instr_queue[q_no]->max_count;
  934. return -1;
  935. }
  936. int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
  937. {
  938. if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
  939. (oct->io_qmask.oq & (1UL << q_no)))
  940. return oct->droq[q_no]->max_count;
  941. return -1;
  942. }
  943. /* Retruns the host firmware handshake OCTEON specific configuration */
  944. struct octeon_config *octeon_get_conf(struct octeon_device *oct)
  945. {
  946. struct octeon_config *default_oct_conf = NULL;
  947. /* check the OCTEON Device model & return the corresponding octeon
  948. * configuration
  949. */
  950. if (OCTEON_CN6XXX(oct)) {
  951. default_oct_conf =
  952. (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
  953. }
  954. return default_oct_conf;
  955. }
  956. /* scratch register address is same in all the OCT-II and CN70XX models */
  957. #define CNXX_SLI_SCRATCH1 0x3C0
  958. /** Get the octeon device pointer.
  959. * @param octeon_id - The id for which the octeon device pointer is required.
  960. * @return Success: Octeon device pointer.
  961. * @return Failure: NULL.
  962. */
  963. struct octeon_device *lio_get_device(u32 octeon_id)
  964. {
  965. if (octeon_id >= MAX_OCTEON_DEVICES)
  966. return NULL;
  967. else
  968. return octeon_device[octeon_id];
  969. }
  970. u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
  971. {
  972. u64 val64;
  973. unsigned long flags;
  974. u32 val32, addrhi;
  975. spin_lock_irqsave(&oct->pci_win_lock, flags);
  976. /* The windowed read happens when the LSB of the addr is written.
  977. * So write MSB first
  978. */
  979. addrhi = (addr >> 32);
  980. if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
  981. addrhi |= 0x00060000;
  982. writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
  983. /* Read back to preserve ordering of writes */
  984. val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
  985. writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
  986. val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
  987. val64 = readq(oct->reg_list.pci_win_rd_data);
  988. spin_unlock_irqrestore(&oct->pci_win_lock, flags);
  989. return val64;
  990. }
  991. void lio_pci_writeq(struct octeon_device *oct,
  992. u64 val,
  993. u64 addr)
  994. {
  995. u32 val32;
  996. unsigned long flags;
  997. spin_lock_irqsave(&oct->pci_win_lock, flags);
  998. writeq(addr, oct->reg_list.pci_win_wr_addr);
  999. /* The write happens when the LSB is written. So write MSB first. */
  1000. writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
  1001. /* Read the MSB to ensure ordering of writes. */
  1002. val32 = readl(oct->reg_list.pci_win_wr_data_hi);
  1003. writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
  1004. spin_unlock_irqrestore(&oct->pci_win_lock, flags);
  1005. }
  1006. int octeon_mem_access_ok(struct octeon_device *oct)
  1007. {
  1008. u64 access_okay = 0;
  1009. /* Check to make sure a DDR interface is enabled */
  1010. u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
  1011. access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
  1012. return access_okay ? 0 : 1;
  1013. }
  1014. int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
  1015. {
  1016. int ret = 1;
  1017. u32 ms;
  1018. if (!timeout)
  1019. return ret;
  1020. while (*timeout == 0)
  1021. schedule_timeout_uninterruptible(HZ / 10);
  1022. for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
  1023. ms += HZ / 10) {
  1024. ret = octeon_mem_access_ok(oct);
  1025. /* wait 100 ms */
  1026. if (ret)
  1027. schedule_timeout_uninterruptible(HZ / 10);
  1028. }
  1029. return ret;
  1030. }
  1031. /** Get the octeon id assigned to the octeon device passed as argument.
  1032. * This function is exported to other modules.
  1033. * @param dev - octeon device pointer passed as a void *.
  1034. * @return octeon device id
  1035. */
  1036. int lio_get_device_id(void *dev)
  1037. {
  1038. struct octeon_device *octeon_dev = (struct octeon_device *)dev;
  1039. u32 i;
  1040. for (i = 0; i < MAX_OCTEON_DEVICES; i++)
  1041. if (octeon_device[i] == octeon_dev)
  1042. return octeon_dev->octeon_id;
  1043. return -1;
  1044. }