platform.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include "hfi.h"
  48. #include "efivar.h"
  49. #include "eprom.h"
  50. static int validate_scratch_checksum(struct hfi1_devdata *dd)
  51. {
  52. u64 checksum = 0, temp_scratch = 0;
  53. int i, j, version;
  54. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  55. version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
  56. /* Prevent power on default of all zeroes from passing checksum */
  57. if (!version)
  58. return 0;
  59. /*
  60. * ASIC scratch 0 only contains the checksum and bitmap version as
  61. * fields of interest, both of which are handled separately from the
  62. * loop below, so skip it
  63. */
  64. checksum += version;
  65. for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
  66. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
  67. for (j = sizeof(u64); j != 0; j -= 2) {
  68. checksum += (temp_scratch & 0xFFFF);
  69. temp_scratch >>= 16;
  70. }
  71. }
  72. while (checksum >> 16)
  73. checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
  74. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  75. temp_scratch &= CHECKSUM_SMASK;
  76. temp_scratch >>= CHECKSUM_SHIFT;
  77. if (checksum + temp_scratch == 0xFFFF)
  78. return 1;
  79. return 0;
  80. }
  81. static void save_platform_config_fields(struct hfi1_devdata *dd)
  82. {
  83. struct hfi1_pportdata *ppd = dd->pport;
  84. u64 temp_scratch = 0, temp_dest = 0;
  85. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
  86. temp_dest = temp_scratch &
  87. (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
  88. PORT0_PORT_TYPE_SMASK);
  89. ppd->port_type = temp_dest >>
  90. (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
  91. PORT0_PORT_TYPE_SHIFT);
  92. temp_dest = temp_scratch &
  93. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
  94. PORT0_LOCAL_ATTEN_SMASK);
  95. ppd->local_atten = temp_dest >>
  96. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
  97. PORT0_LOCAL_ATTEN_SHIFT);
  98. temp_dest = temp_scratch &
  99. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
  100. PORT0_REMOTE_ATTEN_SMASK);
  101. ppd->remote_atten = temp_dest >>
  102. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
  103. PORT0_REMOTE_ATTEN_SHIFT);
  104. temp_dest = temp_scratch &
  105. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
  106. PORT0_DEFAULT_ATTEN_SMASK);
  107. ppd->default_atten = temp_dest >>
  108. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
  109. PORT0_DEFAULT_ATTEN_SHIFT);
  110. temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
  111. ASIC_CFG_SCRATCH_2);
  112. ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
  113. ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
  114. ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
  115. ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
  116. QSFP_MAX_POWER_SHIFT;
  117. }
  118. void get_platform_config(struct hfi1_devdata *dd)
  119. {
  120. int ret = 0;
  121. unsigned long size = 0;
  122. u8 *temp_platform_config = NULL;
  123. u32 esize;
  124. if (is_integrated(dd)) {
  125. if (validate_scratch_checksum(dd)) {
  126. save_platform_config_fields(dd);
  127. return;
  128. }
  129. dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
  130. __func__);
  131. dd_dev_err(dd,
  132. "%s: Please update your BIOS to support active channels\n",
  133. __func__);
  134. } else {
  135. ret = eprom_read_platform_config(dd,
  136. (void **)&temp_platform_config,
  137. &esize);
  138. if (!ret) {
  139. /* success */
  140. dd->platform_config.data = temp_platform_config;
  141. dd->platform_config.size = esize;
  142. return;
  143. }
  144. /* fail, try EFI variable */
  145. ret = read_hfi1_efi_var(dd, "configuration", &size,
  146. (void **)&temp_platform_config);
  147. if (!ret) {
  148. dd->platform_config.data = temp_platform_config;
  149. dd->platform_config.size = size;
  150. return;
  151. }
  152. }
  153. dd_dev_err(dd,
  154. "%s: Failed to get platform config, falling back to sub-optimal default file\n",
  155. __func__);
  156. /* fall back to request firmware */
  157. platform_config_load = 1;
  158. }
  159. void free_platform_config(struct hfi1_devdata *dd)
  160. {
  161. if (!platform_config_load) {
  162. /*
  163. * was loaded from EFI or the EPROM, release memory
  164. * allocated by read_efi_var/eprom_read_platform_config
  165. */
  166. kfree(dd->platform_config.data);
  167. }
  168. /*
  169. * else do nothing, dispose_firmware will release
  170. * struct firmware platform_config on driver exit
  171. */
  172. }
  173. void get_port_type(struct hfi1_pportdata *ppd)
  174. {
  175. int ret;
  176. u32 temp;
  177. ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  178. PORT_TABLE_PORT_TYPE, &temp,
  179. 4);
  180. if (ret) {
  181. ppd->port_type = PORT_TYPE_UNKNOWN;
  182. return;
  183. }
  184. ppd->port_type = temp;
  185. }
  186. int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
  187. {
  188. u8 tx_ctrl_byte = on ? 0x0 : 0xF;
  189. int ret = 0;
  190. ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
  191. &tx_ctrl_byte, 1);
  192. /* we expected 1, so consider 0 an error */
  193. if (ret == 0)
  194. ret = -EIO;
  195. else if (ret == 1)
  196. ret = 0;
  197. return ret;
  198. }
  199. static int qual_power(struct hfi1_pportdata *ppd)
  200. {
  201. u32 cable_power_class = 0, power_class_max = 0;
  202. u8 *cache = ppd->qsfp_info.cache;
  203. int ret = 0;
  204. ret = get_platform_config_field(
  205. ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  206. SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
  207. if (ret)
  208. return ret;
  209. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  210. if (cable_power_class > power_class_max)
  211. ppd->offline_disabled_reason =
  212. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
  213. if (ppd->offline_disabled_reason ==
  214. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
  215. dd_dev_info(
  216. ppd->dd,
  217. "%s: Port disabled due to system power restrictions\n",
  218. __func__);
  219. ret = -EPERM;
  220. }
  221. return ret;
  222. }
  223. static int qual_bitrate(struct hfi1_pportdata *ppd)
  224. {
  225. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  226. u8 *cache = ppd->qsfp_info.cache;
  227. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
  228. cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
  229. ppd->offline_disabled_reason =
  230. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  231. if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
  232. cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
  233. ppd->offline_disabled_reason =
  234. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  235. if (ppd->offline_disabled_reason ==
  236. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
  237. dd_dev_info(
  238. ppd->dd,
  239. "%s: Cable failed bitrate check, disabling port\n",
  240. __func__);
  241. return -EPERM;
  242. }
  243. return 0;
  244. }
  245. static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
  246. {
  247. u8 cable_power_class = 0, power_ctrl_byte = 0;
  248. u8 *cache = ppd->qsfp_info.cache;
  249. int ret;
  250. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  251. if (cable_power_class > QSFP_POWER_CLASS_1) {
  252. power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
  253. power_ctrl_byte |= 1;
  254. power_ctrl_byte &= ~(0x2);
  255. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  256. QSFP_PWR_CTRL_BYTE_OFFS,
  257. &power_ctrl_byte, 1);
  258. if (ret != 1)
  259. return -EIO;
  260. if (cable_power_class > QSFP_POWER_CLASS_4) {
  261. power_ctrl_byte |= (1 << 2);
  262. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  263. QSFP_PWR_CTRL_BYTE_OFFS,
  264. &power_ctrl_byte, 1);
  265. if (ret != 1)
  266. return -EIO;
  267. }
  268. /* SFF 8679 rev 1.7 LPMode Deassert time */
  269. msleep(300);
  270. }
  271. return 0;
  272. }
  273. static void apply_rx_cdr(struct hfi1_pportdata *ppd,
  274. u32 rx_preset_index,
  275. u8 *cdr_ctrl_byte)
  276. {
  277. u32 rx_preset;
  278. u8 *cache = ppd->qsfp_info.cache;
  279. int cable_power_class;
  280. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
  281. (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
  282. return;
  283. /* RX CDR present, bypass supported */
  284. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  285. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  286. /* Power class <= 3, ignore config & turn RX CDR on */
  287. *cdr_ctrl_byte |= 0xF;
  288. return;
  289. }
  290. get_platform_config_field(
  291. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  292. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
  293. &rx_preset, 4);
  294. if (!rx_preset) {
  295. dd_dev_info(
  296. ppd->dd,
  297. "%s: RX_CDR_APPLY is set to disabled\n",
  298. __func__);
  299. return;
  300. }
  301. get_platform_config_field(
  302. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  303. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
  304. &rx_preset, 4);
  305. /* Expand cdr setting to all 4 lanes */
  306. rx_preset = (rx_preset | (rx_preset << 1) |
  307. (rx_preset << 2) | (rx_preset << 3));
  308. if (rx_preset) {
  309. *cdr_ctrl_byte |= rx_preset;
  310. } else {
  311. *cdr_ctrl_byte &= rx_preset;
  312. /* Preserve current TX CDR status */
  313. *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
  314. }
  315. }
  316. static void apply_tx_cdr(struct hfi1_pportdata *ppd,
  317. u32 tx_preset_index,
  318. u8 *cdr_ctrl_byte)
  319. {
  320. u32 tx_preset;
  321. u8 *cache = ppd->qsfp_info.cache;
  322. int cable_power_class;
  323. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
  324. (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
  325. return;
  326. /* TX CDR present, bypass supported */
  327. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  328. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  329. /* Power class <= 3, ignore config & turn TX CDR on */
  330. *cdr_ctrl_byte |= 0xF0;
  331. return;
  332. }
  333. get_platform_config_field(
  334. ppd->dd,
  335. PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  336. TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
  337. if (!tx_preset) {
  338. dd_dev_info(
  339. ppd->dd,
  340. "%s: TX_CDR_APPLY is set to disabled\n",
  341. __func__);
  342. return;
  343. }
  344. get_platform_config_field(
  345. ppd->dd,
  346. PLATFORM_CONFIG_TX_PRESET_TABLE,
  347. tx_preset_index,
  348. TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
  349. /* Expand cdr setting to all 4 lanes */
  350. tx_preset = (tx_preset | (tx_preset << 1) |
  351. (tx_preset << 2) | (tx_preset << 3));
  352. if (tx_preset)
  353. *cdr_ctrl_byte |= (tx_preset << 4);
  354. else
  355. /* Preserve current/determined RX CDR status */
  356. *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
  357. }
  358. static void apply_cdr_settings(
  359. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  360. u32 tx_preset_index)
  361. {
  362. u8 *cache = ppd->qsfp_info.cache;
  363. u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
  364. apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
  365. apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
  366. qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
  367. &cdr_ctrl_byte, 1);
  368. }
  369. static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
  370. {
  371. u8 *cache = ppd->qsfp_info.cache;
  372. u8 tx_eq;
  373. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
  374. return;
  375. /* Disable adaptive TX EQ if present */
  376. tx_eq = cache[(128 * 3) + 241];
  377. tx_eq &= 0xF0;
  378. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
  379. }
  380. static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
  381. {
  382. u8 *cache = ppd->qsfp_info.cache;
  383. u32 tx_preset;
  384. u8 tx_eq;
  385. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
  386. return;
  387. get_platform_config_field(
  388. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  389. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
  390. &tx_preset, 4);
  391. if (!tx_preset) {
  392. dd_dev_info(
  393. ppd->dd,
  394. "%s: TX_EQ_APPLY is set to disabled\n",
  395. __func__);
  396. return;
  397. }
  398. get_platform_config_field(
  399. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  400. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
  401. &tx_preset, 4);
  402. if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
  403. dd_dev_info(
  404. ppd->dd,
  405. "%s: TX EQ %x unsupported\n",
  406. __func__, tx_preset);
  407. dd_dev_info(
  408. ppd->dd,
  409. "%s: Applying EQ %x\n",
  410. __func__, cache[608] & 0xF0);
  411. tx_preset = (cache[608] & 0xF0) >> 4;
  412. }
  413. tx_eq = tx_preset | (tx_preset << 4);
  414. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
  415. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
  416. }
  417. static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
  418. {
  419. u32 rx_preset;
  420. u8 rx_eq, *cache = ppd->qsfp_info.cache;
  421. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
  422. return;
  423. get_platform_config_field(
  424. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  425. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
  426. &rx_preset, 4);
  427. if (!rx_preset) {
  428. dd_dev_info(
  429. ppd->dd,
  430. "%s: RX_EMP_APPLY is set to disabled\n",
  431. __func__);
  432. return;
  433. }
  434. get_platform_config_field(
  435. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  436. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
  437. &rx_preset, 4);
  438. if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
  439. dd_dev_info(
  440. ppd->dd,
  441. "%s: Requested RX EMP %x\n",
  442. __func__, rx_preset);
  443. dd_dev_info(
  444. ppd->dd,
  445. "%s: Applying supported EMP %x\n",
  446. __func__, cache[608] & 0xF);
  447. rx_preset = cache[608] & 0xF;
  448. }
  449. rx_eq = rx_preset | (rx_preset << 4);
  450. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
  451. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
  452. }
  453. static void apply_eq_settings(struct hfi1_pportdata *ppd,
  454. u32 rx_preset_index, u32 tx_preset_index)
  455. {
  456. u8 *cache = ppd->qsfp_info.cache;
  457. /* no point going on w/o a page 3 */
  458. if (cache[2] & 4) {
  459. dd_dev_info(ppd->dd,
  460. "%s: Upper page 03 not present\n",
  461. __func__);
  462. return;
  463. }
  464. apply_tx_eq_auto(ppd);
  465. apply_tx_eq_prog(ppd, tx_preset_index);
  466. apply_rx_eq_emp(ppd, rx_preset_index);
  467. }
  468. static void apply_rx_amplitude_settings(
  469. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  470. u32 tx_preset_index)
  471. {
  472. u32 rx_preset;
  473. u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
  474. /* no point going on w/o a page 3 */
  475. if (cache[2] & 4) {
  476. dd_dev_info(ppd->dd,
  477. "%s: Upper page 03 not present\n",
  478. __func__);
  479. return;
  480. }
  481. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
  482. dd_dev_info(ppd->dd,
  483. "%s: RX_AMP_APPLY is set to disabled\n",
  484. __func__);
  485. return;
  486. }
  487. get_platform_config_field(ppd->dd,
  488. PLATFORM_CONFIG_RX_PRESET_TABLE,
  489. rx_preset_index,
  490. RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
  491. &rx_preset, 4);
  492. if (!rx_preset) {
  493. dd_dev_info(ppd->dd,
  494. "%s: RX_AMP_APPLY is set to disabled\n",
  495. __func__);
  496. return;
  497. }
  498. get_platform_config_field(ppd->dd,
  499. PLATFORM_CONFIG_RX_PRESET_TABLE,
  500. rx_preset_index,
  501. RX_PRESET_TABLE_QSFP_RX_AMP,
  502. &rx_preset, 4);
  503. dd_dev_info(ppd->dd,
  504. "%s: Requested RX AMP %x\n",
  505. __func__,
  506. rx_preset);
  507. for (i = 0; i < 4; i++) {
  508. if (cache[(128 * 3) + 225] & (1 << i)) {
  509. preferred = i;
  510. if (preferred == rx_preset)
  511. break;
  512. }
  513. }
  514. /*
  515. * Verify that preferred RX amplitude is not just a
  516. * fall through of the default
  517. */
  518. if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
  519. dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
  520. return;
  521. }
  522. dd_dev_info(ppd->dd,
  523. "%s: Applying RX AMP %x\n", __func__, preferred);
  524. rx_amp = preferred | (preferred << 4);
  525. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
  526. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
  527. }
  528. #define OPA_INVALID_INDEX 0xFFF
  529. static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
  530. u32 config_data, const char *message)
  531. {
  532. u8 i;
  533. int ret = HCMD_SUCCESS;
  534. for (i = 0; i < 4; i++) {
  535. ret = load_8051_config(ppd->dd, field_id, i, config_data);
  536. if (ret != HCMD_SUCCESS) {
  537. dd_dev_err(
  538. ppd->dd,
  539. "%s: %s for lane %u failed\n",
  540. message, __func__, i);
  541. }
  542. }
  543. }
  544. /*
  545. * Return a special SerDes setting for low power AOC cables. The power class
  546. * threshold and setting being used were all found by empirical testing.
  547. *
  548. * Summary of the logic:
  549. *
  550. * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
  551. * return 0xe
  552. * return 0; // leave at default
  553. */
  554. static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
  555. {
  556. u8 *cache = ppd->qsfp_info.cache;
  557. int power_class;
  558. /* QSFP only */
  559. if (ppd->port_type != PORT_TYPE_QSFP)
  560. return 0; /* leave at default */
  561. /* active optical cables only */
  562. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  563. case 0x0 ... 0x9: /* fallthrough */
  564. case 0xC: /* fallthrough */
  565. case 0xE:
  566. /* active AOC */
  567. power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  568. if (power_class < QSFP_POWER_CLASS_4)
  569. return 0xe;
  570. }
  571. return 0; /* leave at default */
  572. }
  573. static void apply_tunings(
  574. struct hfi1_pportdata *ppd, u32 tx_preset_index,
  575. u8 tuning_method, u32 total_atten, u8 limiting_active)
  576. {
  577. int ret = 0;
  578. u32 config_data = 0, tx_preset = 0;
  579. u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
  580. u8 *cache = ppd->qsfp_info.cache;
  581. /* Pass tuning method to 8051 */
  582. read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  583. &config_data);
  584. config_data &= ~(0xff << TUNING_METHOD_SHIFT);
  585. config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
  586. ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  587. config_data);
  588. if (ret != HCMD_SUCCESS)
  589. dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
  590. __func__);
  591. /* Set same channel loss for both TX and RX */
  592. config_data = 0 | (total_atten << 16) | (total_atten << 24);
  593. apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
  594. "Setting channel loss");
  595. /* Inform 8051 of cable capabilities */
  596. if (ppd->qsfp_info.cache_valid) {
  597. external_device_config =
  598. ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
  599. ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
  600. ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
  601. (cache[QSFP_EQ_INFO_OFFS] & 0x4);
  602. ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  603. GENERAL_CONFIG, &config_data);
  604. /* Clear, then set the external device config field */
  605. config_data &= ~(u32)0xFF;
  606. config_data |= external_device_config;
  607. ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  608. GENERAL_CONFIG, config_data);
  609. if (ret != HCMD_SUCCESS)
  610. dd_dev_info(ppd->dd,
  611. "%s: Failed set ext device config params\n",
  612. __func__);
  613. }
  614. if (tx_preset_index == OPA_INVALID_INDEX) {
  615. if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
  616. dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
  617. __func__);
  618. return;
  619. }
  620. /* Following for limiting active channels only */
  621. get_platform_config_field(
  622. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  623. TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
  624. precur = tx_preset;
  625. get_platform_config_field(
  626. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  627. tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
  628. attn = tx_preset;
  629. get_platform_config_field(
  630. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  631. tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
  632. postcur = tx_preset;
  633. /*
  634. * NOTES:
  635. * o The aoc_low_power_setting is applied to all lanes even
  636. * though only lane 0's value is examined by the firmware.
  637. * o A lingering low power setting after a cable swap does
  638. * not occur. On cable unplug the 8051 is reset and
  639. * restarted on cable insert. This resets all settings to
  640. * their default, erasing any previous low power setting.
  641. */
  642. config_data = precur | (attn << 8) | (postcur << 16) |
  643. (aoc_low_power_setting(ppd) << 24);
  644. apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
  645. "Applying TX settings");
  646. }
  647. /* Must be holding the QSFP i2c resource */
  648. static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
  649. u32 *ptr_rx_preset, u32 *ptr_total_atten)
  650. {
  651. int ret;
  652. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  653. u8 *cache = ppd->qsfp_info.cache;
  654. ppd->qsfp_info.limiting_active = 1;
  655. ret = set_qsfp_tx(ppd, 0);
  656. if (ret)
  657. return ret;
  658. ret = qual_power(ppd);
  659. if (ret)
  660. return ret;
  661. ret = qual_bitrate(ppd);
  662. if (ret)
  663. return ret;
  664. /*
  665. * We'll change the QSFP memory contents from here on out, thus we set a
  666. * flag here to remind ourselves to reset the QSFP module. This prevents
  667. * reuse of stale settings established in our previous pass through.
  668. */
  669. if (ppd->qsfp_info.reset_needed) {
  670. reset_qsfp(ppd);
  671. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  672. } else {
  673. ppd->qsfp_info.reset_needed = 1;
  674. }
  675. ret = set_qsfp_high_power(ppd);
  676. if (ret)
  677. return ret;
  678. if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
  679. ret = get_platform_config_field(
  680. ppd->dd,
  681. PLATFORM_CONFIG_PORT_TABLE, 0,
  682. PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
  683. ptr_tx_preset, 4);
  684. if (ret) {
  685. *ptr_tx_preset = OPA_INVALID_INDEX;
  686. return ret;
  687. }
  688. } else {
  689. ret = get_platform_config_field(
  690. ppd->dd,
  691. PLATFORM_CONFIG_PORT_TABLE, 0,
  692. PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
  693. ptr_tx_preset, 4);
  694. if (ret) {
  695. *ptr_tx_preset = OPA_INVALID_INDEX;
  696. return ret;
  697. }
  698. }
  699. ret = get_platform_config_field(
  700. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  701. PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
  702. if (ret) {
  703. *ptr_rx_preset = OPA_INVALID_INDEX;
  704. return ret;
  705. }
  706. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  707. get_platform_config_field(
  708. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  709. PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
  710. else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
  711. get_platform_config_field(
  712. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  713. PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
  714. apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  715. apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  716. apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  717. ret = set_qsfp_tx(ppd, 1);
  718. return ret;
  719. }
  720. static int tune_qsfp(struct hfi1_pportdata *ppd,
  721. u32 *ptr_tx_preset, u32 *ptr_rx_preset,
  722. u8 *ptr_tuning_method, u32 *ptr_total_atten)
  723. {
  724. u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
  725. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  726. int ret = 0;
  727. u8 *cache = ppd->qsfp_info.cache;
  728. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  729. case 0xA ... 0xB:
  730. ret = get_platform_config_field(
  731. ppd->dd,
  732. PLATFORM_CONFIG_PORT_TABLE, 0,
  733. PORT_TABLE_LOCAL_ATTEN_25G,
  734. &platform_atten, 4);
  735. if (ret)
  736. return ret;
  737. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  738. cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
  739. else if ((lss & OPA_LINK_SPEED_12_5G) &&
  740. (lse & OPA_LINK_SPEED_12_5G))
  741. cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
  742. /* Fallback to configured attenuation if cable memory is bad */
  743. if (cable_atten == 0 || cable_atten > 36) {
  744. ret = get_platform_config_field(
  745. ppd->dd,
  746. PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  747. SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
  748. &cable_atten, 4);
  749. if (ret)
  750. return ret;
  751. }
  752. ret = get_platform_config_field(
  753. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  754. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  755. if (ret)
  756. return ret;
  757. *ptr_total_atten = platform_atten + cable_atten + remote_atten;
  758. *ptr_tuning_method = OPA_PASSIVE_TUNING;
  759. break;
  760. case 0x0 ... 0x9: /* fallthrough */
  761. case 0xC: /* fallthrough */
  762. case 0xE:
  763. ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
  764. ptr_total_atten);
  765. if (ret)
  766. return ret;
  767. *ptr_tuning_method = OPA_ACTIVE_TUNING;
  768. break;
  769. case 0xD: /* fallthrough */
  770. case 0xF:
  771. default:
  772. dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
  773. __func__);
  774. break;
  775. }
  776. return ret;
  777. }
  778. /*
  779. * This function communicates its success or failure via ppd->driver_link_ready
  780. * Thus, it depends on its association with start_link(...) which checks
  781. * driver_link_ready before proceeding with the link negotiation and
  782. * initialization process.
  783. */
  784. void tune_serdes(struct hfi1_pportdata *ppd)
  785. {
  786. int ret = 0;
  787. u32 total_atten = 0;
  788. u32 remote_atten = 0, platform_atten = 0;
  789. u32 rx_preset_index, tx_preset_index;
  790. u8 tuning_method = 0, limiting_active = 0;
  791. struct hfi1_devdata *dd = ppd->dd;
  792. rx_preset_index = OPA_INVALID_INDEX;
  793. tx_preset_index = OPA_INVALID_INDEX;
  794. /* the link defaults to enabled */
  795. ppd->link_enabled = 1;
  796. /* the driver link ready state defaults to not ready */
  797. ppd->driver_link_ready = 0;
  798. ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  799. /* Skip the tuning for testing (loopback != none) and simulations */
  800. if (loopback != LOOPBACK_NONE ||
  801. ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  802. ppd->driver_link_ready = 1;
  803. return;
  804. }
  805. switch (ppd->port_type) {
  806. case PORT_TYPE_DISCONNECTED:
  807. ppd->offline_disabled_reason =
  808. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
  809. dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
  810. __func__);
  811. goto bail;
  812. case PORT_TYPE_FIXED:
  813. /* platform_atten, remote_atten pre-zeroed to catch error */
  814. get_platform_config_field(
  815. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  816. PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
  817. get_platform_config_field(
  818. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  819. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  820. total_atten = platform_atten + remote_atten;
  821. tuning_method = OPA_PASSIVE_TUNING;
  822. break;
  823. case PORT_TYPE_VARIABLE:
  824. if (qsfp_mod_present(ppd)) {
  825. /*
  826. * platform_atten, remote_atten pre-zeroed to
  827. * catch error
  828. */
  829. get_platform_config_field(
  830. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  831. PORT_TABLE_LOCAL_ATTEN_25G,
  832. &platform_atten, 4);
  833. get_platform_config_field(
  834. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  835. PORT_TABLE_REMOTE_ATTEN_25G,
  836. &remote_atten, 4);
  837. total_atten = platform_atten + remote_atten;
  838. tuning_method = OPA_PASSIVE_TUNING;
  839. } else {
  840. ppd->offline_disabled_reason =
  841. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
  842. goto bail;
  843. }
  844. break;
  845. case PORT_TYPE_QSFP:
  846. if (qsfp_mod_present(ppd)) {
  847. ret = acquire_chip_resource(ppd->dd,
  848. qsfp_resource(ppd->dd),
  849. QSFP_WAIT);
  850. if (ret) {
  851. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  852. __func__, (int)ppd->dd->hfi1_id);
  853. goto bail;
  854. }
  855. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  856. if (ppd->qsfp_info.cache_valid) {
  857. ret = tune_qsfp(ppd,
  858. &tx_preset_index,
  859. &rx_preset_index,
  860. &tuning_method,
  861. &total_atten);
  862. /*
  863. * We may have modified the QSFP memory, so
  864. * update the cache to reflect the changes
  865. */
  866. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  867. limiting_active =
  868. ppd->qsfp_info.limiting_active;
  869. } else {
  870. dd_dev_err(dd,
  871. "%s: Reading QSFP memory failed\n",
  872. __func__);
  873. ret = -EINVAL; /* a fail indication */
  874. }
  875. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  876. if (ret)
  877. goto bail;
  878. } else {
  879. ppd->offline_disabled_reason =
  880. HFI1_ODR_MASK(
  881. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  882. goto bail;
  883. }
  884. break;
  885. default:
  886. dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
  887. ppd->port_type = PORT_TYPE_UNKNOWN;
  888. tuning_method = OPA_UNKNOWN_TUNING;
  889. total_atten = 0;
  890. limiting_active = 0;
  891. tx_preset_index = OPA_INVALID_INDEX;
  892. break;
  893. }
  894. if (ppd->offline_disabled_reason ==
  895. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  896. apply_tunings(ppd, tx_preset_index, tuning_method,
  897. total_atten, limiting_active);
  898. if (!ret)
  899. ppd->driver_link_ready = 1;
  900. return;
  901. bail:
  902. ppd->driver_link_ready = 0;
  903. }