platform.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/firmware.h>
  48. #include "hfi.h"
  49. #include "efivar.h"
  50. #include "eprom.h"
  51. #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
  52. static int validate_scratch_checksum(struct hfi1_devdata *dd)
  53. {
  54. u64 checksum = 0, temp_scratch = 0;
  55. int i, j, version;
  56. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  57. version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
  58. /* Prevent power on default of all zeroes from passing checksum */
  59. if (!version) {
  60. dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
  61. dd_dev_err(dd,
  62. "%s: Please update your BIOS to support active channels\n",
  63. __func__);
  64. return 0;
  65. }
  66. /*
  67. * ASIC scratch 0 only contains the checksum and bitmap version as
  68. * fields of interest, both of which are handled separately from the
  69. * loop below, so skip it
  70. */
  71. checksum += version;
  72. for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
  73. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
  74. for (j = sizeof(u64); j != 0; j -= 2) {
  75. checksum += (temp_scratch & 0xFFFF);
  76. temp_scratch >>= 16;
  77. }
  78. }
  79. while (checksum >> 16)
  80. checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
  81. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  82. temp_scratch &= CHECKSUM_SMASK;
  83. temp_scratch >>= CHECKSUM_SHIFT;
  84. if (checksum + temp_scratch == 0xFFFF)
  85. return 1;
  86. dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
  87. return 0;
  88. }
  89. static void save_platform_config_fields(struct hfi1_devdata *dd)
  90. {
  91. struct hfi1_pportdata *ppd = dd->pport;
  92. u64 temp_scratch = 0, temp_dest = 0;
  93. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
  94. temp_dest = temp_scratch &
  95. (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
  96. PORT0_PORT_TYPE_SMASK);
  97. ppd->port_type = temp_dest >>
  98. (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
  99. PORT0_PORT_TYPE_SHIFT);
  100. temp_dest = temp_scratch &
  101. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
  102. PORT0_LOCAL_ATTEN_SMASK);
  103. ppd->local_atten = temp_dest >>
  104. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
  105. PORT0_LOCAL_ATTEN_SHIFT);
  106. temp_dest = temp_scratch &
  107. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
  108. PORT0_REMOTE_ATTEN_SMASK);
  109. ppd->remote_atten = temp_dest >>
  110. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
  111. PORT0_REMOTE_ATTEN_SHIFT);
  112. temp_dest = temp_scratch &
  113. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
  114. PORT0_DEFAULT_ATTEN_SMASK);
  115. ppd->default_atten = temp_dest >>
  116. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
  117. PORT0_DEFAULT_ATTEN_SHIFT);
  118. temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
  119. ASIC_CFG_SCRATCH_2);
  120. ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
  121. ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
  122. ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
  123. ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
  124. QSFP_MAX_POWER_SHIFT;
  125. ppd->config_from_scratch = true;
  126. }
  127. void get_platform_config(struct hfi1_devdata *dd)
  128. {
  129. int ret = 0;
  130. u8 *temp_platform_config = NULL;
  131. u32 esize;
  132. const struct firmware *platform_config_file = NULL;
  133. if (is_integrated(dd)) {
  134. if (validate_scratch_checksum(dd)) {
  135. save_platform_config_fields(dd);
  136. return;
  137. }
  138. } else {
  139. ret = eprom_read_platform_config(dd,
  140. (void **)&temp_platform_config,
  141. &esize);
  142. if (!ret) {
  143. /* success */
  144. dd->platform_config.data = temp_platform_config;
  145. dd->platform_config.size = esize;
  146. return;
  147. }
  148. }
  149. dd_dev_err(dd,
  150. "%s: Failed to get platform config, falling back to sub-optimal default file\n",
  151. __func__);
  152. ret = request_firmware(&platform_config_file,
  153. DEFAULT_PLATFORM_CONFIG_NAME,
  154. &dd->pcidev->dev);
  155. if (ret) {
  156. dd_dev_err(dd,
  157. "%s: No default platform config file found\n",
  158. __func__);
  159. return;
  160. }
  161. /*
  162. * Allocate separate memory block to store data and free firmware
  163. * structure. This allows free_platform_config to treat EPROM and
  164. * fallback configs in the same manner.
  165. */
  166. dd->platform_config.data = kmemdup(platform_config_file->data,
  167. platform_config_file->size,
  168. GFP_KERNEL);
  169. dd->platform_config.size = platform_config_file->size;
  170. release_firmware(platform_config_file);
  171. }
  172. void free_platform_config(struct hfi1_devdata *dd)
  173. {
  174. /* Release memory allocated for eprom or fallback file read. */
  175. kfree(dd->platform_config.data);
  176. }
  177. void get_port_type(struct hfi1_pportdata *ppd)
  178. {
  179. int ret;
  180. u32 temp;
  181. ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  182. PORT_TABLE_PORT_TYPE, &temp,
  183. 4);
  184. if (ret) {
  185. ppd->port_type = PORT_TYPE_UNKNOWN;
  186. return;
  187. }
  188. ppd->port_type = temp;
  189. }
  190. int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
  191. {
  192. u8 tx_ctrl_byte = on ? 0x0 : 0xF;
  193. int ret = 0;
  194. ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
  195. &tx_ctrl_byte, 1);
  196. /* we expected 1, so consider 0 an error */
  197. if (ret == 0)
  198. ret = -EIO;
  199. else if (ret == 1)
  200. ret = 0;
  201. return ret;
  202. }
  203. static int qual_power(struct hfi1_pportdata *ppd)
  204. {
  205. u32 cable_power_class = 0, power_class_max = 0;
  206. u8 *cache = ppd->qsfp_info.cache;
  207. int ret = 0;
  208. ret = get_platform_config_field(
  209. ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  210. SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
  211. if (ret)
  212. return ret;
  213. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  214. if (cable_power_class > power_class_max)
  215. ppd->offline_disabled_reason =
  216. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
  217. if (ppd->offline_disabled_reason ==
  218. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
  219. dd_dev_err(
  220. ppd->dd,
  221. "%s: Port disabled due to system power restrictions\n",
  222. __func__);
  223. ret = -EPERM;
  224. }
  225. return ret;
  226. }
  227. static int qual_bitrate(struct hfi1_pportdata *ppd)
  228. {
  229. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  230. u8 *cache = ppd->qsfp_info.cache;
  231. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
  232. cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
  233. ppd->offline_disabled_reason =
  234. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  235. if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
  236. cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
  237. ppd->offline_disabled_reason =
  238. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  239. if (ppd->offline_disabled_reason ==
  240. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
  241. dd_dev_err(
  242. ppd->dd,
  243. "%s: Cable failed bitrate check, disabling port\n",
  244. __func__);
  245. return -EPERM;
  246. }
  247. return 0;
  248. }
  249. static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
  250. {
  251. u8 cable_power_class = 0, power_ctrl_byte = 0;
  252. u8 *cache = ppd->qsfp_info.cache;
  253. int ret;
  254. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  255. if (cable_power_class > QSFP_POWER_CLASS_1) {
  256. power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
  257. power_ctrl_byte |= 1;
  258. power_ctrl_byte &= ~(0x2);
  259. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  260. QSFP_PWR_CTRL_BYTE_OFFS,
  261. &power_ctrl_byte, 1);
  262. if (ret != 1)
  263. return -EIO;
  264. if (cable_power_class > QSFP_POWER_CLASS_4) {
  265. power_ctrl_byte |= (1 << 2);
  266. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  267. QSFP_PWR_CTRL_BYTE_OFFS,
  268. &power_ctrl_byte, 1);
  269. if (ret != 1)
  270. return -EIO;
  271. }
  272. /* SFF 8679 rev 1.7 LPMode Deassert time */
  273. msleep(300);
  274. }
  275. return 0;
  276. }
  277. static void apply_rx_cdr(struct hfi1_pportdata *ppd,
  278. u32 rx_preset_index,
  279. u8 *cdr_ctrl_byte)
  280. {
  281. u32 rx_preset;
  282. u8 *cache = ppd->qsfp_info.cache;
  283. int cable_power_class;
  284. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
  285. (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
  286. return;
  287. /* RX CDR present, bypass supported */
  288. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  289. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  290. /* Power class <= 3, ignore config & turn RX CDR on */
  291. *cdr_ctrl_byte |= 0xF;
  292. return;
  293. }
  294. get_platform_config_field(
  295. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  296. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
  297. &rx_preset, 4);
  298. if (!rx_preset) {
  299. dd_dev_info(
  300. ppd->dd,
  301. "%s: RX_CDR_APPLY is set to disabled\n",
  302. __func__);
  303. return;
  304. }
  305. get_platform_config_field(
  306. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  307. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
  308. &rx_preset, 4);
  309. /* Expand cdr setting to all 4 lanes */
  310. rx_preset = (rx_preset | (rx_preset << 1) |
  311. (rx_preset << 2) | (rx_preset << 3));
  312. if (rx_preset) {
  313. *cdr_ctrl_byte |= rx_preset;
  314. } else {
  315. *cdr_ctrl_byte &= rx_preset;
  316. /* Preserve current TX CDR status */
  317. *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
  318. }
  319. }
  320. static void apply_tx_cdr(struct hfi1_pportdata *ppd,
  321. u32 tx_preset_index,
  322. u8 *cdr_ctrl_byte)
  323. {
  324. u32 tx_preset;
  325. u8 *cache = ppd->qsfp_info.cache;
  326. int cable_power_class;
  327. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
  328. (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
  329. return;
  330. /* TX CDR present, bypass supported */
  331. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  332. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  333. /* Power class <= 3, ignore config & turn TX CDR on */
  334. *cdr_ctrl_byte |= 0xF0;
  335. return;
  336. }
  337. get_platform_config_field(
  338. ppd->dd,
  339. PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  340. TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
  341. if (!tx_preset) {
  342. dd_dev_info(
  343. ppd->dd,
  344. "%s: TX_CDR_APPLY is set to disabled\n",
  345. __func__);
  346. return;
  347. }
  348. get_platform_config_field(
  349. ppd->dd,
  350. PLATFORM_CONFIG_TX_PRESET_TABLE,
  351. tx_preset_index,
  352. TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
  353. /* Expand cdr setting to all 4 lanes */
  354. tx_preset = (tx_preset | (tx_preset << 1) |
  355. (tx_preset << 2) | (tx_preset << 3));
  356. if (tx_preset)
  357. *cdr_ctrl_byte |= (tx_preset << 4);
  358. else
  359. /* Preserve current/determined RX CDR status */
  360. *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
  361. }
  362. static void apply_cdr_settings(
  363. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  364. u32 tx_preset_index)
  365. {
  366. u8 *cache = ppd->qsfp_info.cache;
  367. u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
  368. apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
  369. apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
  370. qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
  371. &cdr_ctrl_byte, 1);
  372. }
  373. static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
  374. {
  375. u8 *cache = ppd->qsfp_info.cache;
  376. u8 tx_eq;
  377. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
  378. return;
  379. /* Disable adaptive TX EQ if present */
  380. tx_eq = cache[(128 * 3) + 241];
  381. tx_eq &= 0xF0;
  382. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
  383. }
  384. static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
  385. {
  386. u8 *cache = ppd->qsfp_info.cache;
  387. u32 tx_preset;
  388. u8 tx_eq;
  389. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
  390. return;
  391. get_platform_config_field(
  392. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  393. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
  394. &tx_preset, 4);
  395. if (!tx_preset) {
  396. dd_dev_info(
  397. ppd->dd,
  398. "%s: TX_EQ_APPLY is set to disabled\n",
  399. __func__);
  400. return;
  401. }
  402. get_platform_config_field(
  403. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  404. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
  405. &tx_preset, 4);
  406. if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
  407. dd_dev_info(
  408. ppd->dd,
  409. "%s: TX EQ %x unsupported\n",
  410. __func__, tx_preset);
  411. dd_dev_info(
  412. ppd->dd,
  413. "%s: Applying EQ %x\n",
  414. __func__, cache[608] & 0xF0);
  415. tx_preset = (cache[608] & 0xF0) >> 4;
  416. }
  417. tx_eq = tx_preset | (tx_preset << 4);
  418. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
  419. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
  420. }
  421. static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
  422. {
  423. u32 rx_preset;
  424. u8 rx_eq, *cache = ppd->qsfp_info.cache;
  425. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
  426. return;
  427. get_platform_config_field(
  428. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  429. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
  430. &rx_preset, 4);
  431. if (!rx_preset) {
  432. dd_dev_info(
  433. ppd->dd,
  434. "%s: RX_EMP_APPLY is set to disabled\n",
  435. __func__);
  436. return;
  437. }
  438. get_platform_config_field(
  439. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  440. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
  441. &rx_preset, 4);
  442. if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
  443. dd_dev_info(
  444. ppd->dd,
  445. "%s: Requested RX EMP %x\n",
  446. __func__, rx_preset);
  447. dd_dev_info(
  448. ppd->dd,
  449. "%s: Applying supported EMP %x\n",
  450. __func__, cache[608] & 0xF);
  451. rx_preset = cache[608] & 0xF;
  452. }
  453. rx_eq = rx_preset | (rx_preset << 4);
  454. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
  455. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
  456. }
  457. static void apply_eq_settings(struct hfi1_pportdata *ppd,
  458. u32 rx_preset_index, u32 tx_preset_index)
  459. {
  460. u8 *cache = ppd->qsfp_info.cache;
  461. /* no point going on w/o a page 3 */
  462. if (cache[2] & 4) {
  463. dd_dev_info(ppd->dd,
  464. "%s: Upper page 03 not present\n",
  465. __func__);
  466. return;
  467. }
  468. apply_tx_eq_auto(ppd);
  469. apply_tx_eq_prog(ppd, tx_preset_index);
  470. apply_rx_eq_emp(ppd, rx_preset_index);
  471. }
  472. static void apply_rx_amplitude_settings(
  473. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  474. u32 tx_preset_index)
  475. {
  476. u32 rx_preset;
  477. u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
  478. /* no point going on w/o a page 3 */
  479. if (cache[2] & 4) {
  480. dd_dev_info(ppd->dd,
  481. "%s: Upper page 03 not present\n",
  482. __func__);
  483. return;
  484. }
  485. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
  486. dd_dev_info(ppd->dd,
  487. "%s: RX_AMP_APPLY is set to disabled\n",
  488. __func__);
  489. return;
  490. }
  491. get_platform_config_field(ppd->dd,
  492. PLATFORM_CONFIG_RX_PRESET_TABLE,
  493. rx_preset_index,
  494. RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
  495. &rx_preset, 4);
  496. if (!rx_preset) {
  497. dd_dev_info(ppd->dd,
  498. "%s: RX_AMP_APPLY is set to disabled\n",
  499. __func__);
  500. return;
  501. }
  502. get_platform_config_field(ppd->dd,
  503. PLATFORM_CONFIG_RX_PRESET_TABLE,
  504. rx_preset_index,
  505. RX_PRESET_TABLE_QSFP_RX_AMP,
  506. &rx_preset, 4);
  507. dd_dev_info(ppd->dd,
  508. "%s: Requested RX AMP %x\n",
  509. __func__,
  510. rx_preset);
  511. for (i = 0; i < 4; i++) {
  512. if (cache[(128 * 3) + 225] & (1 << i)) {
  513. preferred = i;
  514. if (preferred == rx_preset)
  515. break;
  516. }
  517. }
  518. /*
  519. * Verify that preferred RX amplitude is not just a
  520. * fall through of the default
  521. */
  522. if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
  523. dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
  524. return;
  525. }
  526. dd_dev_info(ppd->dd,
  527. "%s: Applying RX AMP %x\n", __func__, preferred);
  528. rx_amp = preferred | (preferred << 4);
  529. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
  530. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
  531. }
  532. #define OPA_INVALID_INDEX 0xFFF
  533. static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
  534. u32 config_data, const char *message)
  535. {
  536. u8 i;
  537. int ret = HCMD_SUCCESS;
  538. for (i = 0; i < 4; i++) {
  539. ret = load_8051_config(ppd->dd, field_id, i, config_data);
  540. if (ret != HCMD_SUCCESS) {
  541. dd_dev_err(
  542. ppd->dd,
  543. "%s: %s for lane %u failed\n",
  544. message, __func__, i);
  545. }
  546. }
  547. }
  548. /*
  549. * Return a special SerDes setting for low power AOC cables. The power class
  550. * threshold and setting being used were all found by empirical testing.
  551. *
  552. * Summary of the logic:
  553. *
  554. * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
  555. * return 0xe
  556. * return 0; // leave at default
  557. */
  558. static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
  559. {
  560. u8 *cache = ppd->qsfp_info.cache;
  561. int power_class;
  562. /* QSFP only */
  563. if (ppd->port_type != PORT_TYPE_QSFP)
  564. return 0; /* leave at default */
  565. /* active optical cables only */
  566. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  567. case 0x0 ... 0x9: /* fallthrough */
  568. case 0xC: /* fallthrough */
  569. case 0xE:
  570. /* active AOC */
  571. power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  572. if (power_class < QSFP_POWER_CLASS_4)
  573. return 0xe;
  574. }
  575. return 0; /* leave at default */
  576. }
  577. static void apply_tunings(
  578. struct hfi1_pportdata *ppd, u32 tx_preset_index,
  579. u8 tuning_method, u32 total_atten, u8 limiting_active)
  580. {
  581. int ret = 0;
  582. u32 config_data = 0, tx_preset = 0;
  583. u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
  584. u8 *cache = ppd->qsfp_info.cache;
  585. /* Pass tuning method to 8051 */
  586. read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  587. &config_data);
  588. config_data &= ~(0xff << TUNING_METHOD_SHIFT);
  589. config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
  590. ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  591. config_data);
  592. if (ret != HCMD_SUCCESS)
  593. dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
  594. __func__);
  595. /* Set same channel loss for both TX and RX */
  596. config_data = 0 | (total_atten << 16) | (total_atten << 24);
  597. apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
  598. "Setting channel loss");
  599. /* Inform 8051 of cable capabilities */
  600. if (ppd->qsfp_info.cache_valid) {
  601. external_device_config =
  602. ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
  603. ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
  604. ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
  605. (cache[QSFP_EQ_INFO_OFFS] & 0x4);
  606. ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  607. GENERAL_CONFIG, &config_data);
  608. /* Clear, then set the external device config field */
  609. config_data &= ~(u32)0xFF;
  610. config_data |= external_device_config;
  611. ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  612. GENERAL_CONFIG, config_data);
  613. if (ret != HCMD_SUCCESS)
  614. dd_dev_err(ppd->dd,
  615. "%s: Failed set ext device config params\n",
  616. __func__);
  617. }
  618. if (tx_preset_index == OPA_INVALID_INDEX) {
  619. if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
  620. dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
  621. __func__);
  622. return;
  623. }
  624. /* Following for limiting active channels only */
  625. get_platform_config_field(
  626. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  627. TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
  628. precur = tx_preset;
  629. get_platform_config_field(
  630. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  631. tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
  632. attn = tx_preset;
  633. get_platform_config_field(
  634. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  635. tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
  636. postcur = tx_preset;
  637. /*
  638. * NOTES:
  639. * o The aoc_low_power_setting is applied to all lanes even
  640. * though only lane 0's value is examined by the firmware.
  641. * o A lingering low power setting after a cable swap does
  642. * not occur. On cable unplug the 8051 is reset and
  643. * restarted on cable insert. This resets all settings to
  644. * their default, erasing any previous low power setting.
  645. */
  646. config_data = precur | (attn << 8) | (postcur << 16) |
  647. (aoc_low_power_setting(ppd) << 24);
  648. apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
  649. "Applying TX settings");
  650. }
  651. /* Must be holding the QSFP i2c resource */
  652. static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
  653. u32 *ptr_rx_preset, u32 *ptr_total_atten)
  654. {
  655. int ret;
  656. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  657. u8 *cache = ppd->qsfp_info.cache;
  658. ppd->qsfp_info.limiting_active = 1;
  659. ret = set_qsfp_tx(ppd, 0);
  660. if (ret)
  661. return ret;
  662. ret = qual_power(ppd);
  663. if (ret)
  664. return ret;
  665. ret = qual_bitrate(ppd);
  666. if (ret)
  667. return ret;
  668. /*
  669. * We'll change the QSFP memory contents from here on out, thus we set a
  670. * flag here to remind ourselves to reset the QSFP module. This prevents
  671. * reuse of stale settings established in our previous pass through.
  672. */
  673. if (ppd->qsfp_info.reset_needed) {
  674. ret = reset_qsfp(ppd);
  675. if (ret)
  676. return ret;
  677. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  678. } else {
  679. ppd->qsfp_info.reset_needed = 1;
  680. }
  681. ret = set_qsfp_high_power(ppd);
  682. if (ret)
  683. return ret;
  684. if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
  685. ret = get_platform_config_field(
  686. ppd->dd,
  687. PLATFORM_CONFIG_PORT_TABLE, 0,
  688. PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
  689. ptr_tx_preset, 4);
  690. if (ret) {
  691. *ptr_tx_preset = OPA_INVALID_INDEX;
  692. return ret;
  693. }
  694. } else {
  695. ret = get_platform_config_field(
  696. ppd->dd,
  697. PLATFORM_CONFIG_PORT_TABLE, 0,
  698. PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
  699. ptr_tx_preset, 4);
  700. if (ret) {
  701. *ptr_tx_preset = OPA_INVALID_INDEX;
  702. return ret;
  703. }
  704. }
  705. ret = get_platform_config_field(
  706. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  707. PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
  708. if (ret) {
  709. *ptr_rx_preset = OPA_INVALID_INDEX;
  710. return ret;
  711. }
  712. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  713. get_platform_config_field(
  714. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  715. PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
  716. else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
  717. get_platform_config_field(
  718. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  719. PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
  720. apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  721. apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  722. apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  723. ret = set_qsfp_tx(ppd, 1);
  724. return ret;
  725. }
  726. static int tune_qsfp(struct hfi1_pportdata *ppd,
  727. u32 *ptr_tx_preset, u32 *ptr_rx_preset,
  728. u8 *ptr_tuning_method, u32 *ptr_total_atten)
  729. {
  730. u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
  731. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  732. int ret = 0;
  733. u8 *cache = ppd->qsfp_info.cache;
  734. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  735. case 0xA ... 0xB:
  736. ret = get_platform_config_field(
  737. ppd->dd,
  738. PLATFORM_CONFIG_PORT_TABLE, 0,
  739. PORT_TABLE_LOCAL_ATTEN_25G,
  740. &platform_atten, 4);
  741. if (ret)
  742. return ret;
  743. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  744. cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
  745. else if ((lss & OPA_LINK_SPEED_12_5G) &&
  746. (lse & OPA_LINK_SPEED_12_5G))
  747. cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
  748. /* Fallback to configured attenuation if cable memory is bad */
  749. if (cable_atten == 0 || cable_atten > 36) {
  750. ret = get_platform_config_field(
  751. ppd->dd,
  752. PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  753. SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
  754. &cable_atten, 4);
  755. if (ret)
  756. return ret;
  757. }
  758. ret = get_platform_config_field(
  759. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  760. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  761. if (ret)
  762. return ret;
  763. *ptr_total_atten = platform_atten + cable_atten + remote_atten;
  764. *ptr_tuning_method = OPA_PASSIVE_TUNING;
  765. break;
  766. case 0x0 ... 0x9: /* fallthrough */
  767. case 0xC: /* fallthrough */
  768. case 0xE:
  769. ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
  770. ptr_total_atten);
  771. if (ret)
  772. return ret;
  773. *ptr_tuning_method = OPA_ACTIVE_TUNING;
  774. break;
  775. case 0xD: /* fallthrough */
  776. case 0xF:
  777. default:
  778. dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
  779. __func__);
  780. break;
  781. }
  782. return ret;
  783. }
  784. /*
  785. * This function communicates its success or failure via ppd->driver_link_ready
  786. * Thus, it depends on its association with start_link(...) which checks
  787. * driver_link_ready before proceeding with the link negotiation and
  788. * initialization process.
  789. */
  790. void tune_serdes(struct hfi1_pportdata *ppd)
  791. {
  792. int ret = 0;
  793. u32 total_atten = 0;
  794. u32 remote_atten = 0, platform_atten = 0;
  795. u32 rx_preset_index, tx_preset_index;
  796. u8 tuning_method = 0, limiting_active = 0;
  797. struct hfi1_devdata *dd = ppd->dd;
  798. rx_preset_index = OPA_INVALID_INDEX;
  799. tx_preset_index = OPA_INVALID_INDEX;
  800. /* the link defaults to enabled */
  801. ppd->link_enabled = 1;
  802. /* the driver link ready state defaults to not ready */
  803. ppd->driver_link_ready = 0;
  804. ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  805. /* Skip the tuning for testing (loopback != none) and simulations */
  806. if (loopback != LOOPBACK_NONE ||
  807. ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  808. ppd->driver_link_ready = 1;
  809. if (qsfp_mod_present(ppd)) {
  810. ret = acquire_chip_resource(ppd->dd,
  811. qsfp_resource(ppd->dd),
  812. QSFP_WAIT);
  813. if (ret) {
  814. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  815. __func__, (int)ppd->dd->hfi1_id);
  816. goto bail;
  817. }
  818. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  819. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  820. }
  821. return;
  822. }
  823. switch (ppd->port_type) {
  824. case PORT_TYPE_DISCONNECTED:
  825. ppd->offline_disabled_reason =
  826. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
  827. dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
  828. __func__);
  829. goto bail;
  830. case PORT_TYPE_FIXED:
  831. /* platform_atten, remote_atten pre-zeroed to catch error */
  832. get_platform_config_field(
  833. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  834. PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
  835. get_platform_config_field(
  836. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  837. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  838. total_atten = platform_atten + remote_atten;
  839. tuning_method = OPA_PASSIVE_TUNING;
  840. break;
  841. case PORT_TYPE_VARIABLE:
  842. if (qsfp_mod_present(ppd)) {
  843. /*
  844. * platform_atten, remote_atten pre-zeroed to
  845. * catch error
  846. */
  847. get_platform_config_field(
  848. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  849. PORT_TABLE_LOCAL_ATTEN_25G,
  850. &platform_atten, 4);
  851. get_platform_config_field(
  852. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  853. PORT_TABLE_REMOTE_ATTEN_25G,
  854. &remote_atten, 4);
  855. total_atten = platform_atten + remote_atten;
  856. tuning_method = OPA_PASSIVE_TUNING;
  857. } else {
  858. ppd->offline_disabled_reason =
  859. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
  860. goto bail;
  861. }
  862. break;
  863. case PORT_TYPE_QSFP:
  864. if (qsfp_mod_present(ppd)) {
  865. ret = acquire_chip_resource(ppd->dd,
  866. qsfp_resource(ppd->dd),
  867. QSFP_WAIT);
  868. if (ret) {
  869. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  870. __func__, (int)ppd->dd->hfi1_id);
  871. goto bail;
  872. }
  873. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  874. if (ppd->qsfp_info.cache_valid) {
  875. ret = tune_qsfp(ppd,
  876. &tx_preset_index,
  877. &rx_preset_index,
  878. &tuning_method,
  879. &total_atten);
  880. /*
  881. * We may have modified the QSFP memory, so
  882. * update the cache to reflect the changes
  883. */
  884. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  885. limiting_active =
  886. ppd->qsfp_info.limiting_active;
  887. } else {
  888. dd_dev_err(dd,
  889. "%s: Reading QSFP memory failed\n",
  890. __func__);
  891. ret = -EINVAL; /* a fail indication */
  892. }
  893. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  894. if (ret)
  895. goto bail;
  896. } else {
  897. ppd->offline_disabled_reason =
  898. HFI1_ODR_MASK(
  899. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  900. goto bail;
  901. }
  902. break;
  903. default:
  904. dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
  905. ppd->port_type = PORT_TYPE_UNKNOWN;
  906. tuning_method = OPA_UNKNOWN_TUNING;
  907. total_atten = 0;
  908. limiting_active = 0;
  909. tx_preset_index = OPA_INVALID_INDEX;
  910. break;
  911. }
  912. if (ppd->offline_disabled_reason ==
  913. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  914. apply_tunings(ppd, tx_preset_index, tuning_method,
  915. total_atten, limiting_active);
  916. if (!ret)
  917. ppd->driver_link_ready = 1;
  918. return;
  919. bail:
  920. ppd->driver_link_ready = 0;
  921. }