platform.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include "hfi.h"
  48. #include "efivar.h"
  49. void get_platform_config(struct hfi1_devdata *dd)
  50. {
  51. int ret = 0;
  52. unsigned long size = 0;
  53. u8 *temp_platform_config = NULL;
  54. ret = read_hfi1_efi_var(dd, "configuration", &size,
  55. (void **)&temp_platform_config);
  56. if (ret) {
  57. dd_dev_info(dd,
  58. "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
  59. __func__);
  60. /* fall back to request firmware */
  61. platform_config_load = 1;
  62. goto bail;
  63. }
  64. dd->platform_config.data = temp_platform_config;
  65. dd->platform_config.size = size;
  66. bail:
  67. /* exit */;
  68. }
  69. void free_platform_config(struct hfi1_devdata *dd)
  70. {
  71. if (!platform_config_load) {
  72. /*
  73. * was loaded from EFI, release memory
  74. * allocated by read_efi_var
  75. */
  76. kfree(dd->platform_config.data);
  77. }
  78. /*
  79. * else do nothing, dispose_firmware will release
  80. * struct firmware platform_config on driver exit
  81. */
  82. }
  83. void get_port_type(struct hfi1_pportdata *ppd)
  84. {
  85. int ret;
  86. ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  87. PORT_TABLE_PORT_TYPE, &ppd->port_type,
  88. 4);
  89. if (ret)
  90. ppd->port_type = PORT_TYPE_UNKNOWN;
  91. }
  92. int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
  93. {
  94. u8 tx_ctrl_byte = on ? 0x0 : 0xF;
  95. int ret = 0;
  96. ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
  97. &tx_ctrl_byte, 1);
  98. /* we expected 1, so consider 0 an error */
  99. if (ret == 0)
  100. ret = -EIO;
  101. else if (ret == 1)
  102. ret = 0;
  103. return ret;
  104. }
  105. static int qual_power(struct hfi1_pportdata *ppd)
  106. {
  107. u32 cable_power_class = 0, power_class_max = 0;
  108. u8 *cache = ppd->qsfp_info.cache;
  109. int ret = 0;
  110. ret = get_platform_config_field(
  111. ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  112. SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
  113. if (ret)
  114. return ret;
  115. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  116. if (cable_power_class > power_class_max)
  117. ppd->offline_disabled_reason =
  118. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
  119. if (ppd->offline_disabled_reason ==
  120. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
  121. dd_dev_info(
  122. ppd->dd,
  123. "%s: Port disabled due to system power restrictions\n",
  124. __func__);
  125. ret = -EPERM;
  126. }
  127. return ret;
  128. }
  129. static int qual_bitrate(struct hfi1_pportdata *ppd)
  130. {
  131. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  132. u8 *cache = ppd->qsfp_info.cache;
  133. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
  134. cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
  135. ppd->offline_disabled_reason =
  136. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  137. if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
  138. cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
  139. ppd->offline_disabled_reason =
  140. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  141. if (ppd->offline_disabled_reason ==
  142. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
  143. dd_dev_info(
  144. ppd->dd,
  145. "%s: Cable failed bitrate check, disabling port\n",
  146. __func__);
  147. return -EPERM;
  148. }
  149. return 0;
  150. }
  151. static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
  152. {
  153. u8 cable_power_class = 0, power_ctrl_byte = 0;
  154. u8 *cache = ppd->qsfp_info.cache;
  155. int ret;
  156. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  157. if (cable_power_class > QSFP_POWER_CLASS_1) {
  158. power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
  159. power_ctrl_byte |= 1;
  160. power_ctrl_byte &= ~(0x2);
  161. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  162. QSFP_PWR_CTRL_BYTE_OFFS,
  163. &power_ctrl_byte, 1);
  164. if (ret != 1)
  165. return -EIO;
  166. if (cable_power_class > QSFP_POWER_CLASS_4) {
  167. power_ctrl_byte |= (1 << 2);
  168. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  169. QSFP_PWR_CTRL_BYTE_OFFS,
  170. &power_ctrl_byte, 1);
  171. if (ret != 1)
  172. return -EIO;
  173. }
  174. /* SFF 8679 rev 1.7 LPMode Deassert time */
  175. msleep(300);
  176. }
  177. return 0;
  178. }
  179. static void apply_rx_cdr(struct hfi1_pportdata *ppd,
  180. u32 rx_preset_index,
  181. u8 *cdr_ctrl_byte)
  182. {
  183. u32 rx_preset;
  184. u8 *cache = ppd->qsfp_info.cache;
  185. int cable_power_class;
  186. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
  187. (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
  188. return;
  189. /* RX CDR present, bypass supported */
  190. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  191. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  192. /* Power class <= 3, ignore config & turn RX CDR on */
  193. *cdr_ctrl_byte |= 0xF;
  194. return;
  195. }
  196. get_platform_config_field(
  197. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  198. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
  199. &rx_preset, 4);
  200. if (!rx_preset) {
  201. dd_dev_info(
  202. ppd->dd,
  203. "%s: RX_CDR_APPLY is set to disabled\n",
  204. __func__);
  205. return;
  206. }
  207. get_platform_config_field(
  208. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  209. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
  210. &rx_preset, 4);
  211. /* Expand cdr setting to all 4 lanes */
  212. rx_preset = (rx_preset | (rx_preset << 1) |
  213. (rx_preset << 2) | (rx_preset << 3));
  214. if (rx_preset) {
  215. *cdr_ctrl_byte |= rx_preset;
  216. } else {
  217. *cdr_ctrl_byte &= rx_preset;
  218. /* Preserve current TX CDR status */
  219. *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
  220. }
  221. }
  222. static void apply_tx_cdr(struct hfi1_pportdata *ppd,
  223. u32 tx_preset_index,
  224. u8 *cdr_ctrl_byte)
  225. {
  226. u32 tx_preset;
  227. u8 *cache = ppd->qsfp_info.cache;
  228. int cable_power_class;
  229. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
  230. (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
  231. return;
  232. /* TX CDR present, bypass supported */
  233. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  234. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  235. /* Power class <= 3, ignore config & turn TX CDR on */
  236. *cdr_ctrl_byte |= 0xF0;
  237. return;
  238. }
  239. get_platform_config_field(
  240. ppd->dd,
  241. PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  242. TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
  243. if (!tx_preset) {
  244. dd_dev_info(
  245. ppd->dd,
  246. "%s: TX_CDR_APPLY is set to disabled\n",
  247. __func__);
  248. return;
  249. }
  250. get_platform_config_field(
  251. ppd->dd,
  252. PLATFORM_CONFIG_TX_PRESET_TABLE,
  253. tx_preset_index,
  254. TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
  255. /* Expand cdr setting to all 4 lanes */
  256. tx_preset = (tx_preset | (tx_preset << 1) |
  257. (tx_preset << 2) | (tx_preset << 3));
  258. if (tx_preset)
  259. *cdr_ctrl_byte |= (tx_preset << 4);
  260. else
  261. /* Preserve current/determined RX CDR status */
  262. *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
  263. }
  264. static void apply_cdr_settings(
  265. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  266. u32 tx_preset_index)
  267. {
  268. u8 *cache = ppd->qsfp_info.cache;
  269. u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
  270. apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
  271. apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
  272. qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
  273. &cdr_ctrl_byte, 1);
  274. }
  275. static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
  276. {
  277. u8 *cache = ppd->qsfp_info.cache;
  278. u8 tx_eq;
  279. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
  280. return;
  281. /* Disable adaptive TX EQ if present */
  282. tx_eq = cache[(128 * 3) + 241];
  283. tx_eq &= 0xF0;
  284. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
  285. }
  286. static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
  287. {
  288. u8 *cache = ppd->qsfp_info.cache;
  289. u32 tx_preset;
  290. u8 tx_eq;
  291. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
  292. return;
  293. get_platform_config_field(
  294. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  295. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
  296. &tx_preset, 4);
  297. if (!tx_preset) {
  298. dd_dev_info(
  299. ppd->dd,
  300. "%s: TX_EQ_APPLY is set to disabled\n",
  301. __func__);
  302. return;
  303. }
  304. get_platform_config_field(
  305. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  306. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
  307. &tx_preset, 4);
  308. if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
  309. dd_dev_info(
  310. ppd->dd,
  311. "%s: TX EQ %x unsupported\n",
  312. __func__, tx_preset);
  313. dd_dev_info(
  314. ppd->dd,
  315. "%s: Applying EQ %x\n",
  316. __func__, cache[608] & 0xF0);
  317. tx_preset = (cache[608] & 0xF0) >> 4;
  318. }
  319. tx_eq = tx_preset | (tx_preset << 4);
  320. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
  321. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
  322. }
  323. static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
  324. {
  325. u32 rx_preset;
  326. u8 rx_eq, *cache = ppd->qsfp_info.cache;
  327. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
  328. return;
  329. get_platform_config_field(
  330. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  331. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
  332. &rx_preset, 4);
  333. if (!rx_preset) {
  334. dd_dev_info(
  335. ppd->dd,
  336. "%s: RX_EMP_APPLY is set to disabled\n",
  337. __func__);
  338. return;
  339. }
  340. get_platform_config_field(
  341. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  342. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
  343. &rx_preset, 4);
  344. if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
  345. dd_dev_info(
  346. ppd->dd,
  347. "%s: Requested RX EMP %x\n",
  348. __func__, rx_preset);
  349. dd_dev_info(
  350. ppd->dd,
  351. "%s: Applying supported EMP %x\n",
  352. __func__, cache[608] & 0xF);
  353. rx_preset = cache[608] & 0xF;
  354. }
  355. rx_eq = rx_preset | (rx_preset << 4);
  356. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
  357. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
  358. }
  359. static void apply_eq_settings(struct hfi1_pportdata *ppd,
  360. u32 rx_preset_index, u32 tx_preset_index)
  361. {
  362. u8 *cache = ppd->qsfp_info.cache;
  363. /* no point going on w/o a page 3 */
  364. if (cache[2] & 4) {
  365. dd_dev_info(ppd->dd,
  366. "%s: Upper page 03 not present\n",
  367. __func__);
  368. return;
  369. }
  370. apply_tx_eq_auto(ppd);
  371. apply_tx_eq_prog(ppd, tx_preset_index);
  372. apply_rx_eq_emp(ppd, rx_preset_index);
  373. }
  374. static void apply_rx_amplitude_settings(
  375. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  376. u32 tx_preset_index)
  377. {
  378. u32 rx_preset;
  379. u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
  380. /* no point going on w/o a page 3 */
  381. if (cache[2] & 4) {
  382. dd_dev_info(ppd->dd,
  383. "%s: Upper page 03 not present\n",
  384. __func__);
  385. return;
  386. }
  387. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
  388. dd_dev_info(ppd->dd,
  389. "%s: RX_AMP_APPLY is set to disabled\n",
  390. __func__);
  391. return;
  392. }
  393. get_platform_config_field(ppd->dd,
  394. PLATFORM_CONFIG_RX_PRESET_TABLE,
  395. rx_preset_index,
  396. RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
  397. &rx_preset, 4);
  398. if (!rx_preset) {
  399. dd_dev_info(ppd->dd,
  400. "%s: RX_AMP_APPLY is set to disabled\n",
  401. __func__);
  402. return;
  403. }
  404. get_platform_config_field(ppd->dd,
  405. PLATFORM_CONFIG_RX_PRESET_TABLE,
  406. rx_preset_index,
  407. RX_PRESET_TABLE_QSFP_RX_AMP,
  408. &rx_preset, 4);
  409. dd_dev_info(ppd->dd,
  410. "%s: Requested RX AMP %x\n",
  411. __func__,
  412. rx_preset);
  413. for (i = 0; i < 4; i++) {
  414. if (cache[(128 * 3) + 225] & (1 << i)) {
  415. preferred = i;
  416. if (preferred == rx_preset)
  417. break;
  418. }
  419. }
  420. /*
  421. * Verify that preferred RX amplitude is not just a
  422. * fall through of the default
  423. */
  424. if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
  425. dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
  426. return;
  427. }
  428. dd_dev_info(ppd->dd,
  429. "%s: Applying RX AMP %x\n", __func__, preferred);
  430. rx_amp = preferred | (preferred << 4);
  431. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
  432. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
  433. }
  434. #define OPA_INVALID_INDEX 0xFFF
  435. static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
  436. u32 config_data, const char *message)
  437. {
  438. u8 i;
  439. int ret = HCMD_SUCCESS;
  440. for (i = 0; i < 4; i++) {
  441. ret = load_8051_config(ppd->dd, field_id, i, config_data);
  442. if (ret != HCMD_SUCCESS) {
  443. dd_dev_err(
  444. ppd->dd,
  445. "%s: %s for lane %u failed\n",
  446. message, __func__, i);
  447. }
  448. }
  449. }
  450. static void apply_tunings(
  451. struct hfi1_pportdata *ppd, u32 tx_preset_index,
  452. u8 tuning_method, u32 total_atten, u8 limiting_active)
  453. {
  454. int ret = 0;
  455. u32 config_data = 0, tx_preset = 0;
  456. u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
  457. u8 *cache = ppd->qsfp_info.cache;
  458. /* Pass tuning method to 8051 */
  459. read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  460. &config_data);
  461. config_data &= ~(0xff << TUNING_METHOD_SHIFT);
  462. config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
  463. ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  464. config_data);
  465. if (ret != HCMD_SUCCESS)
  466. dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
  467. __func__);
  468. /* Set same channel loss for both TX and RX */
  469. config_data = 0 | (total_atten << 16) | (total_atten << 24);
  470. apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
  471. "Setting channel loss");
  472. /* Inform 8051 of cable capabilities */
  473. if (ppd->qsfp_info.cache_valid) {
  474. external_device_config =
  475. ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
  476. ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
  477. ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
  478. (cache[QSFP_EQ_INFO_OFFS] & 0x4);
  479. ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  480. GENERAL_CONFIG, &config_data);
  481. /* Clear, then set the external device config field */
  482. config_data &= ~(u32)0xFF;
  483. config_data |= external_device_config;
  484. ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  485. GENERAL_CONFIG, config_data);
  486. if (ret != HCMD_SUCCESS)
  487. dd_dev_info(ppd->dd,
  488. "%s: Failed set ext device config params\n",
  489. __func__);
  490. }
  491. if (tx_preset_index == OPA_INVALID_INDEX) {
  492. if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
  493. dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
  494. __func__);
  495. return;
  496. }
  497. /* Following for limiting active channels only */
  498. get_platform_config_field(
  499. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  500. TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
  501. precur = tx_preset;
  502. get_platform_config_field(
  503. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  504. tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
  505. attn = tx_preset;
  506. get_platform_config_field(
  507. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  508. tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
  509. postcur = tx_preset;
  510. config_data = precur | (attn << 8) | (postcur << 16);
  511. apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
  512. "Applying TX settings");
  513. }
  514. /* Must be holding the QSFP i2c resource */
  515. static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
  516. u32 *ptr_rx_preset, u32 *ptr_total_atten)
  517. {
  518. int ret;
  519. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  520. u8 *cache = ppd->qsfp_info.cache;
  521. ppd->qsfp_info.limiting_active = 1;
  522. ret = set_qsfp_tx(ppd, 0);
  523. if (ret)
  524. return ret;
  525. ret = qual_power(ppd);
  526. if (ret)
  527. return ret;
  528. ret = qual_bitrate(ppd);
  529. if (ret)
  530. return ret;
  531. /*
  532. * We'll change the QSFP memory contents from here on out, thus we set a
  533. * flag here to remind ourselves to reset the QSFP module. This prevents
  534. * reuse of stale settings established in our previous pass through.
  535. */
  536. if (ppd->qsfp_info.reset_needed) {
  537. reset_qsfp(ppd);
  538. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  539. } else {
  540. ppd->qsfp_info.reset_needed = 1;
  541. }
  542. ret = set_qsfp_high_power(ppd);
  543. if (ret)
  544. return ret;
  545. if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
  546. ret = get_platform_config_field(
  547. ppd->dd,
  548. PLATFORM_CONFIG_PORT_TABLE, 0,
  549. PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
  550. ptr_tx_preset, 4);
  551. if (ret) {
  552. *ptr_tx_preset = OPA_INVALID_INDEX;
  553. return ret;
  554. }
  555. } else {
  556. ret = get_platform_config_field(
  557. ppd->dd,
  558. PLATFORM_CONFIG_PORT_TABLE, 0,
  559. PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
  560. ptr_tx_preset, 4);
  561. if (ret) {
  562. *ptr_tx_preset = OPA_INVALID_INDEX;
  563. return ret;
  564. }
  565. }
  566. ret = get_platform_config_field(
  567. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  568. PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
  569. if (ret) {
  570. *ptr_rx_preset = OPA_INVALID_INDEX;
  571. return ret;
  572. }
  573. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  574. get_platform_config_field(
  575. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  576. PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
  577. else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
  578. get_platform_config_field(
  579. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  580. PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
  581. apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  582. apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  583. apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  584. ret = set_qsfp_tx(ppd, 1);
  585. return ret;
  586. }
  587. static int tune_qsfp(struct hfi1_pportdata *ppd,
  588. u32 *ptr_tx_preset, u32 *ptr_rx_preset,
  589. u8 *ptr_tuning_method, u32 *ptr_total_atten)
  590. {
  591. u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
  592. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  593. int ret = 0;
  594. u8 *cache = ppd->qsfp_info.cache;
  595. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  596. case 0xA ... 0xB:
  597. ret = get_platform_config_field(
  598. ppd->dd,
  599. PLATFORM_CONFIG_PORT_TABLE, 0,
  600. PORT_TABLE_LOCAL_ATTEN_25G,
  601. &platform_atten, 4);
  602. if (ret)
  603. return ret;
  604. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  605. cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
  606. else if ((lss & OPA_LINK_SPEED_12_5G) &&
  607. (lse & OPA_LINK_SPEED_12_5G))
  608. cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
  609. /* Fallback to configured attenuation if cable memory is bad */
  610. if (cable_atten == 0 || cable_atten > 36) {
  611. ret = get_platform_config_field(
  612. ppd->dd,
  613. PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  614. SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
  615. &cable_atten, 4);
  616. if (ret)
  617. return ret;
  618. }
  619. ret = get_platform_config_field(
  620. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  621. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  622. if (ret)
  623. return ret;
  624. *ptr_total_atten = platform_atten + cable_atten + remote_atten;
  625. *ptr_tuning_method = OPA_PASSIVE_TUNING;
  626. break;
  627. case 0x0 ... 0x9: /* fallthrough */
  628. case 0xC: /* fallthrough */
  629. case 0xE:
  630. ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
  631. ptr_total_atten);
  632. if (ret)
  633. return ret;
  634. *ptr_tuning_method = OPA_ACTIVE_TUNING;
  635. break;
  636. case 0xD: /* fallthrough */
  637. case 0xF:
  638. default:
  639. dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
  640. __func__);
  641. break;
  642. }
  643. return ret;
  644. }
  645. /*
  646. * This function communicates its success or failure via ppd->driver_link_ready
  647. * Thus, it depends on its association with start_link(...) which checks
  648. * driver_link_ready before proceeding with the link negotiation and
  649. * initialization process.
  650. */
  651. void tune_serdes(struct hfi1_pportdata *ppd)
  652. {
  653. int ret = 0;
  654. u32 total_atten = 0;
  655. u32 remote_atten = 0, platform_atten = 0;
  656. u32 rx_preset_index, tx_preset_index;
  657. u8 tuning_method = 0, limiting_active = 0;
  658. struct hfi1_devdata *dd = ppd->dd;
  659. rx_preset_index = OPA_INVALID_INDEX;
  660. tx_preset_index = OPA_INVALID_INDEX;
  661. /* the link defaults to enabled */
  662. ppd->link_enabled = 1;
  663. /* the driver link ready state defaults to not ready */
  664. ppd->driver_link_ready = 0;
  665. ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  666. /* Skip the tuning for testing (loopback != none) and simulations */
  667. if (loopback != LOOPBACK_NONE ||
  668. ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  669. ppd->driver_link_ready = 1;
  670. return;
  671. }
  672. switch (ppd->port_type) {
  673. case PORT_TYPE_DISCONNECTED:
  674. ppd->offline_disabled_reason =
  675. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
  676. dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
  677. __func__);
  678. goto bail;
  679. case PORT_TYPE_FIXED:
  680. /* platform_atten, remote_atten pre-zeroed to catch error */
  681. get_platform_config_field(
  682. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  683. PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
  684. get_platform_config_field(
  685. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  686. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  687. total_atten = platform_atten + remote_atten;
  688. tuning_method = OPA_PASSIVE_TUNING;
  689. break;
  690. case PORT_TYPE_VARIABLE:
  691. if (qsfp_mod_present(ppd)) {
  692. /*
  693. * platform_atten, remote_atten pre-zeroed to
  694. * catch error
  695. */
  696. get_platform_config_field(
  697. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  698. PORT_TABLE_LOCAL_ATTEN_25G,
  699. &platform_atten, 4);
  700. get_platform_config_field(
  701. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  702. PORT_TABLE_REMOTE_ATTEN_25G,
  703. &remote_atten, 4);
  704. total_atten = platform_atten + remote_atten;
  705. tuning_method = OPA_PASSIVE_TUNING;
  706. } else {
  707. ppd->offline_disabled_reason =
  708. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
  709. goto bail;
  710. }
  711. break;
  712. case PORT_TYPE_QSFP:
  713. if (qsfp_mod_present(ppd)) {
  714. ret = acquire_chip_resource(ppd->dd,
  715. qsfp_resource(ppd->dd),
  716. QSFP_WAIT);
  717. if (ret) {
  718. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  719. __func__, (int)ppd->dd->hfi1_id);
  720. goto bail;
  721. }
  722. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  723. if (ppd->qsfp_info.cache_valid) {
  724. ret = tune_qsfp(ppd,
  725. &tx_preset_index,
  726. &rx_preset_index,
  727. &tuning_method,
  728. &total_atten);
  729. /*
  730. * We may have modified the QSFP memory, so
  731. * update the cache to reflect the changes
  732. */
  733. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  734. limiting_active =
  735. ppd->qsfp_info.limiting_active;
  736. } else {
  737. dd_dev_err(dd,
  738. "%s: Reading QSFP memory failed\n",
  739. __func__);
  740. ret = -EINVAL; /* a fail indication */
  741. }
  742. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  743. if (ret)
  744. goto bail;
  745. } else {
  746. ppd->offline_disabled_reason =
  747. HFI1_ODR_MASK(
  748. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  749. goto bail;
  750. }
  751. break;
  752. default:
  753. dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
  754. ppd->port_type = PORT_TYPE_UNKNOWN;
  755. tuning_method = OPA_UNKNOWN_TUNING;
  756. total_atten = 0;
  757. limiting_active = 0;
  758. tx_preset_index = OPA_INVALID_INDEX;
  759. break;
  760. }
  761. if (ppd->offline_disabled_reason ==
  762. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  763. apply_tunings(ppd, tx_preset_index, tuning_method,
  764. total_atten, limiting_active);
  765. if (!ret)
  766. ppd->driver_link_ready = 1;
  767. return;
  768. bail:
  769. ppd->driver_link_ready = 0;
  770. }