cmd.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/sched.h>
  35. #include <linux/slab.h>
  36. #include <linux/export.h>
  37. #include <linux/pci.h>
  38. #include <linux/errno.h>
  39. #include <linux/mlx4/cmd.h>
  40. #include <linux/mlx4/device.h>
  41. #include <linux/semaphore.h>
  42. #include <rdma/ib_smi.h>
  43. #include <linux/delay.h>
  44. #include <asm/io.h>
  45. #include "mlx4.h"
  46. #include "fw.h"
  47. #define CMD_POLL_TOKEN 0xffff
  48. #define INBOX_MASK 0xffffffffffffff00ULL
  49. #define CMD_CHAN_VER 1
  50. #define CMD_CHAN_IF_REV 1
  51. enum {
  52. /* command completed successfully: */
  53. CMD_STAT_OK = 0x00,
  54. /* Internal error (such as a bus error) occurred while processing command: */
  55. CMD_STAT_INTERNAL_ERR = 0x01,
  56. /* Operation/command not supported or opcode modifier not supported: */
  57. CMD_STAT_BAD_OP = 0x02,
  58. /* Parameter not supported or parameter out of range: */
  59. CMD_STAT_BAD_PARAM = 0x03,
  60. /* System not enabled or bad system state: */
  61. CMD_STAT_BAD_SYS_STATE = 0x04,
  62. /* Attempt to access reserved or unallocaterd resource: */
  63. CMD_STAT_BAD_RESOURCE = 0x05,
  64. /* Requested resource is currently executing a command, or is otherwise busy: */
  65. CMD_STAT_RESOURCE_BUSY = 0x06,
  66. /* Required capability exceeds device limits: */
  67. CMD_STAT_EXCEED_LIM = 0x08,
  68. /* Resource is not in the appropriate state or ownership: */
  69. CMD_STAT_BAD_RES_STATE = 0x09,
  70. /* Index out of range: */
  71. CMD_STAT_BAD_INDEX = 0x0a,
  72. /* FW image corrupted: */
  73. CMD_STAT_BAD_NVMEM = 0x0b,
  74. /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  75. CMD_STAT_ICM_ERROR = 0x0c,
  76. /* Attempt to modify a QP/EE which is not in the presumed state: */
  77. CMD_STAT_BAD_QP_STATE = 0x10,
  78. /* Bad segment parameters (Address/Size): */
  79. CMD_STAT_BAD_SEG_PARAM = 0x20,
  80. /* Memory Region has Memory Windows bound to: */
  81. CMD_STAT_REG_BOUND = 0x21,
  82. /* HCA local attached memory not present: */
  83. CMD_STAT_LAM_NOT_PRE = 0x22,
  84. /* Bad management packet (silently discarded): */
  85. CMD_STAT_BAD_PKT = 0x30,
  86. /* More outstanding CQEs in CQ than new CQ size: */
  87. CMD_STAT_BAD_SIZE = 0x40,
  88. /* Multi Function device support required: */
  89. CMD_STAT_MULTI_FUNC_REQ = 0x50,
  90. };
  91. enum {
  92. HCR_IN_PARAM_OFFSET = 0x00,
  93. HCR_IN_MODIFIER_OFFSET = 0x08,
  94. HCR_OUT_PARAM_OFFSET = 0x0c,
  95. HCR_TOKEN_OFFSET = 0x14,
  96. HCR_STATUS_OFFSET = 0x18,
  97. HCR_OPMOD_SHIFT = 12,
  98. HCR_T_BIT = 21,
  99. HCR_E_BIT = 22,
  100. HCR_GO_BIT = 23
  101. };
  102. enum {
  103. GO_BIT_TIMEOUT_MSECS = 10000
  104. };
  105. enum mlx4_vlan_transition {
  106. MLX4_VLAN_TRANSITION_VST_VST = 0,
  107. MLX4_VLAN_TRANSITION_VST_VGT = 1,
  108. MLX4_VLAN_TRANSITION_VGT_VST = 2,
  109. MLX4_VLAN_TRANSITION_VGT_VGT = 3,
  110. };
  111. struct mlx4_cmd_context {
  112. struct completion done;
  113. int result;
  114. int next;
  115. u64 out_param;
  116. u16 token;
  117. u8 fw_status;
  118. };
  119. static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
  120. struct mlx4_vhcr_cmd *in_vhcr);
  121. static int mlx4_status_to_errno(u8 status)
  122. {
  123. static const int trans_table[] = {
  124. [CMD_STAT_INTERNAL_ERR] = -EIO,
  125. [CMD_STAT_BAD_OP] = -EPERM,
  126. [CMD_STAT_BAD_PARAM] = -EINVAL,
  127. [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
  128. [CMD_STAT_BAD_RESOURCE] = -EBADF,
  129. [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
  130. [CMD_STAT_EXCEED_LIM] = -ENOMEM,
  131. [CMD_STAT_BAD_RES_STATE] = -EBADF,
  132. [CMD_STAT_BAD_INDEX] = -EBADF,
  133. [CMD_STAT_BAD_NVMEM] = -EFAULT,
  134. [CMD_STAT_ICM_ERROR] = -ENFILE,
  135. [CMD_STAT_BAD_QP_STATE] = -EINVAL,
  136. [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
  137. [CMD_STAT_REG_BOUND] = -EBUSY,
  138. [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
  139. [CMD_STAT_BAD_PKT] = -EINVAL,
  140. [CMD_STAT_BAD_SIZE] = -ENOMEM,
  141. [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
  142. };
  143. if (status >= ARRAY_SIZE(trans_table) ||
  144. (status != CMD_STAT_OK && trans_table[status] == 0))
  145. return -EIO;
  146. return trans_table[status];
  147. }
  148. static u8 mlx4_errno_to_status(int errno)
  149. {
  150. switch (errno) {
  151. case -EPERM:
  152. return CMD_STAT_BAD_OP;
  153. case -EINVAL:
  154. return CMD_STAT_BAD_PARAM;
  155. case -ENXIO:
  156. return CMD_STAT_BAD_SYS_STATE;
  157. case -EBUSY:
  158. return CMD_STAT_RESOURCE_BUSY;
  159. case -ENOMEM:
  160. return CMD_STAT_EXCEED_LIM;
  161. case -ENFILE:
  162. return CMD_STAT_ICM_ERROR;
  163. default:
  164. return CMD_STAT_INTERNAL_ERR;
  165. }
  166. }
  167. static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
  168. u8 op_modifier)
  169. {
  170. switch (op) {
  171. case MLX4_CMD_UNMAP_ICM:
  172. case MLX4_CMD_UNMAP_ICM_AUX:
  173. case MLX4_CMD_UNMAP_FA:
  174. case MLX4_CMD_2RST_QP:
  175. case MLX4_CMD_HW2SW_EQ:
  176. case MLX4_CMD_HW2SW_CQ:
  177. case MLX4_CMD_HW2SW_SRQ:
  178. case MLX4_CMD_HW2SW_MPT:
  179. case MLX4_CMD_CLOSE_HCA:
  180. case MLX4_QP_FLOW_STEERING_DETACH:
  181. case MLX4_CMD_FREE_RES:
  182. case MLX4_CMD_CLOSE_PORT:
  183. return CMD_STAT_OK;
  184. case MLX4_CMD_QP_ATTACH:
  185. /* On Detach case return success */
  186. if (op_modifier == 0)
  187. return CMD_STAT_OK;
  188. return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  189. default:
  190. return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  191. }
  192. }
  193. static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
  194. {
  195. /* Any error during the closing commands below is considered fatal */
  196. if (op == MLX4_CMD_CLOSE_HCA ||
  197. op == MLX4_CMD_HW2SW_EQ ||
  198. op == MLX4_CMD_HW2SW_CQ ||
  199. op == MLX4_CMD_2RST_QP ||
  200. op == MLX4_CMD_HW2SW_SRQ ||
  201. op == MLX4_CMD_SYNC_TPT ||
  202. op == MLX4_CMD_UNMAP_ICM ||
  203. op == MLX4_CMD_UNMAP_ICM_AUX ||
  204. op == MLX4_CMD_UNMAP_FA)
  205. return 1;
  206. /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
  207. * CMD_STAT_REG_BOUND.
  208. * This status indicates that memory region has memory windows bound to it
  209. * which may result from invalid user space usage and is not fatal.
  210. */
  211. if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
  212. return 1;
  213. return 0;
  214. }
  215. static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
  216. int err)
  217. {
  218. /* Only if reset flow is really active return code is based on
  219. * command, otherwise current error code is returned.
  220. */
  221. if (mlx4_internal_err_reset) {
  222. mlx4_enter_error_state(dev->persist);
  223. err = mlx4_internal_err_ret_value(dev, op, op_modifier);
  224. }
  225. return err;
  226. }
  227. static int comm_pending(struct mlx4_dev *dev)
  228. {
  229. struct mlx4_priv *priv = mlx4_priv(dev);
  230. u32 status = readl(&priv->mfunc.comm->slave_read);
  231. return (swab32(status) >> 31) != priv->cmd.comm_toggle;
  232. }
  233. static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
  234. {
  235. struct mlx4_priv *priv = mlx4_priv(dev);
  236. u32 val;
  237. /* To avoid writing to unknown addresses after the device state was
  238. * changed to internal error and the function was rest,
  239. * check the INTERNAL_ERROR flag which is updated under
  240. * device_state_mutex lock.
  241. */
  242. mutex_lock(&dev->persist->device_state_mutex);
  243. if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
  244. mutex_unlock(&dev->persist->device_state_mutex);
  245. return -EIO;
  246. }
  247. priv->cmd.comm_toggle ^= 1;
  248. val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
  249. __raw_writel((__force u32) cpu_to_be32(val),
  250. &priv->mfunc.comm->slave_write);
  251. mmiowb();
  252. mutex_unlock(&dev->persist->device_state_mutex);
  253. return 0;
  254. }
  255. static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
  256. unsigned long timeout)
  257. {
  258. struct mlx4_priv *priv = mlx4_priv(dev);
  259. unsigned long end;
  260. int err = 0;
  261. int ret_from_pending = 0;
  262. /* First, verify that the master reports correct status */
  263. if (comm_pending(dev)) {
  264. mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
  265. priv->cmd.comm_toggle, cmd);
  266. return -EAGAIN;
  267. }
  268. /* Write command */
  269. down(&priv->cmd.poll_sem);
  270. if (mlx4_comm_cmd_post(dev, cmd, param)) {
  271. /* Only in case the device state is INTERNAL_ERROR,
  272. * mlx4_comm_cmd_post returns with an error
  273. */
  274. err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  275. goto out;
  276. }
  277. end = msecs_to_jiffies(timeout) + jiffies;
  278. while (comm_pending(dev) && time_before(jiffies, end))
  279. cond_resched();
  280. ret_from_pending = comm_pending(dev);
  281. if (ret_from_pending) {
  282. /* check if the slave is trying to boot in the middle of
  283. * FLR process. The only non-zero result in the RESET command
  284. * is MLX4_DELAY_RESET_SLAVE*/
  285. if ((MLX4_COMM_CMD_RESET == cmd)) {
  286. err = MLX4_DELAY_RESET_SLAVE;
  287. goto out;
  288. } else {
  289. mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
  290. cmd);
  291. err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  292. }
  293. }
  294. if (err)
  295. mlx4_enter_error_state(dev->persist);
  296. out:
  297. up(&priv->cmd.poll_sem);
  298. return err;
  299. }
  300. static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
  301. u16 param, u16 op, unsigned long timeout)
  302. {
  303. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  304. struct mlx4_cmd_context *context;
  305. unsigned long end;
  306. int err = 0;
  307. down(&cmd->event_sem);
  308. spin_lock(&cmd->context_lock);
  309. BUG_ON(cmd->free_head < 0);
  310. context = &cmd->context[cmd->free_head];
  311. context->token += cmd->token_mask + 1;
  312. cmd->free_head = context->next;
  313. spin_unlock(&cmd->context_lock);
  314. reinit_completion(&context->done);
  315. if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
  316. /* Only in case the device state is INTERNAL_ERROR,
  317. * mlx4_comm_cmd_post returns with an error
  318. */
  319. err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  320. goto out;
  321. }
  322. if (!wait_for_completion_timeout(&context->done,
  323. msecs_to_jiffies(timeout))) {
  324. mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
  325. vhcr_cmd, op);
  326. goto out_reset;
  327. }
  328. err = context->result;
  329. if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
  330. mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
  331. vhcr_cmd, context->fw_status);
  332. if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
  333. goto out_reset;
  334. }
  335. /* wait for comm channel ready
  336. * this is necessary for prevention the race
  337. * when switching between event to polling mode
  338. * Skipping this section in case the device is in FATAL_ERROR state,
  339. * In this state, no commands are sent via the comm channel until
  340. * the device has returned from reset.
  341. */
  342. if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
  343. end = msecs_to_jiffies(timeout) + jiffies;
  344. while (comm_pending(dev) && time_before(jiffies, end))
  345. cond_resched();
  346. }
  347. goto out;
  348. out_reset:
  349. err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  350. mlx4_enter_error_state(dev->persist);
  351. out:
  352. spin_lock(&cmd->context_lock);
  353. context->next = cmd->free_head;
  354. cmd->free_head = context - cmd->context;
  355. spin_unlock(&cmd->context_lock);
  356. up(&cmd->event_sem);
  357. return err;
  358. }
  359. int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
  360. u16 op, unsigned long timeout)
  361. {
  362. if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
  363. return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  364. if (mlx4_priv(dev)->cmd.use_events)
  365. return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
  366. return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
  367. }
  368. static int cmd_pending(struct mlx4_dev *dev)
  369. {
  370. u32 status;
  371. if (pci_channel_offline(dev->persist->pdev))
  372. return -EIO;
  373. status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
  374. return (status & swab32(1 << HCR_GO_BIT)) ||
  375. (mlx4_priv(dev)->cmd.toggle ==
  376. !!(status & swab32(1 << HCR_T_BIT)));
  377. }
  378. static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
  379. u32 in_modifier, u8 op_modifier, u16 op, u16 token,
  380. int event)
  381. {
  382. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  383. u32 __iomem *hcr = cmd->hcr;
  384. int ret = -EIO;
  385. unsigned long end;
  386. mutex_lock(&dev->persist->device_state_mutex);
  387. /* To avoid writing to unknown addresses after the device state was
  388. * changed to internal error and the chip was reset,
  389. * check the INTERNAL_ERROR flag which is updated under
  390. * device_state_mutex lock.
  391. */
  392. if (pci_channel_offline(dev->persist->pdev) ||
  393. (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
  394. /*
  395. * Device is going through error recovery
  396. * and cannot accept commands.
  397. */
  398. goto out;
  399. }
  400. end = jiffies;
  401. if (event)
  402. end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
  403. while (cmd_pending(dev)) {
  404. if (pci_channel_offline(dev->persist->pdev)) {
  405. /*
  406. * Device is going through error recovery
  407. * and cannot accept commands.
  408. */
  409. goto out;
  410. }
  411. if (time_after_eq(jiffies, end)) {
  412. mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
  413. goto out;
  414. }
  415. cond_resched();
  416. }
  417. /*
  418. * We use writel (instead of something like memcpy_toio)
  419. * because writes of less than 32 bits to the HCR don't work
  420. * (and some architectures such as ia64 implement memcpy_toio
  421. * in terms of writeb).
  422. */
  423. __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
  424. __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
  425. __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
  426. __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
  427. __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
  428. __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
  429. /* __raw_writel may not order writes. */
  430. wmb();
  431. __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
  432. (cmd->toggle << HCR_T_BIT) |
  433. (event ? (1 << HCR_E_BIT) : 0) |
  434. (op_modifier << HCR_OPMOD_SHIFT) |
  435. op), hcr + 6);
  436. /*
  437. * Make sure that our HCR writes don't get mixed in with
  438. * writes from another CPU starting a FW command.
  439. */
  440. mmiowb();
  441. cmd->toggle = cmd->toggle ^ 1;
  442. ret = 0;
  443. out:
  444. if (ret)
  445. mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
  446. op, ret, in_param, in_modifier, op_modifier);
  447. mutex_unlock(&dev->persist->device_state_mutex);
  448. return ret;
  449. }
  450. static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  451. int out_is_imm, u32 in_modifier, u8 op_modifier,
  452. u16 op, unsigned long timeout)
  453. {
  454. struct mlx4_priv *priv = mlx4_priv(dev);
  455. struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
  456. int ret;
  457. mutex_lock(&priv->cmd.slave_cmd_mutex);
  458. vhcr->in_param = cpu_to_be64(in_param);
  459. vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
  460. vhcr->in_modifier = cpu_to_be32(in_modifier);
  461. vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
  462. vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
  463. vhcr->status = 0;
  464. vhcr->flags = !!(priv->cmd.use_events) << 6;
  465. if (mlx4_is_master(dev)) {
  466. ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
  467. if (!ret) {
  468. if (out_is_imm) {
  469. if (out_param)
  470. *out_param =
  471. be64_to_cpu(vhcr->out_param);
  472. else {
  473. mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
  474. op);
  475. vhcr->status = CMD_STAT_BAD_PARAM;
  476. }
  477. }
  478. ret = mlx4_status_to_errno(vhcr->status);
  479. }
  480. if (ret &&
  481. dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
  482. ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
  483. } else {
  484. ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
  485. MLX4_COMM_TIME + timeout);
  486. if (!ret) {
  487. if (out_is_imm) {
  488. if (out_param)
  489. *out_param =
  490. be64_to_cpu(vhcr->out_param);
  491. else {
  492. mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
  493. op);
  494. vhcr->status = CMD_STAT_BAD_PARAM;
  495. }
  496. }
  497. ret = mlx4_status_to_errno(vhcr->status);
  498. } else {
  499. if (dev->persist->state &
  500. MLX4_DEVICE_STATE_INTERNAL_ERROR)
  501. ret = mlx4_internal_err_ret_value(dev, op,
  502. op_modifier);
  503. else
  504. mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
  505. }
  506. }
  507. mutex_unlock(&priv->cmd.slave_cmd_mutex);
  508. return ret;
  509. }
  510. static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  511. int out_is_imm, u32 in_modifier, u8 op_modifier,
  512. u16 op, unsigned long timeout)
  513. {
  514. struct mlx4_priv *priv = mlx4_priv(dev);
  515. void __iomem *hcr = priv->cmd.hcr;
  516. int err = 0;
  517. unsigned long end;
  518. u32 stat;
  519. down(&priv->cmd.poll_sem);
  520. if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
  521. /*
  522. * Device is going through error recovery
  523. * and cannot accept commands.
  524. */
  525. err = mlx4_internal_err_ret_value(dev, op, op_modifier);
  526. goto out;
  527. }
  528. if (out_is_imm && !out_param) {
  529. mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
  530. op);
  531. err = -EINVAL;
  532. goto out;
  533. }
  534. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  535. in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  536. if (err)
  537. goto out_reset;
  538. end = msecs_to_jiffies(timeout) + jiffies;
  539. while (cmd_pending(dev) && time_before(jiffies, end)) {
  540. if (pci_channel_offline(dev->persist->pdev)) {
  541. /*
  542. * Device is going through error recovery
  543. * and cannot accept commands.
  544. */
  545. err = -EIO;
  546. goto out_reset;
  547. }
  548. if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
  549. err = mlx4_internal_err_ret_value(dev, op, op_modifier);
  550. goto out;
  551. }
  552. cond_resched();
  553. }
  554. if (cmd_pending(dev)) {
  555. mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
  556. op);
  557. err = -EIO;
  558. goto out_reset;
  559. }
  560. if (out_is_imm)
  561. *out_param =
  562. (u64) be32_to_cpu((__force __be32)
  563. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
  564. (u64) be32_to_cpu((__force __be32)
  565. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
  566. stat = be32_to_cpu((__force __be32)
  567. __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
  568. err = mlx4_status_to_errno(stat);
  569. if (err) {
  570. mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
  571. op, stat);
  572. if (mlx4_closing_cmd_fatal_error(op, stat))
  573. goto out_reset;
  574. goto out;
  575. }
  576. out_reset:
  577. if (err)
  578. err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
  579. out:
  580. up(&priv->cmd.poll_sem);
  581. return err;
  582. }
  583. void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
  584. {
  585. struct mlx4_priv *priv = mlx4_priv(dev);
  586. struct mlx4_cmd_context *context =
  587. &priv->cmd.context[token & priv->cmd.token_mask];
  588. /* previously timed out command completing at long last */
  589. if (token != context->token)
  590. return;
  591. context->fw_status = status;
  592. context->result = mlx4_status_to_errno(status);
  593. context->out_param = out_param;
  594. complete(&context->done);
  595. }
  596. static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  597. int out_is_imm, u32 in_modifier, u8 op_modifier,
  598. u16 op, unsigned long timeout)
  599. {
  600. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  601. struct mlx4_cmd_context *context;
  602. int err = 0;
  603. down(&cmd->event_sem);
  604. spin_lock(&cmd->context_lock);
  605. BUG_ON(cmd->free_head < 0);
  606. context = &cmd->context[cmd->free_head];
  607. context->token += cmd->token_mask + 1;
  608. cmd->free_head = context->next;
  609. spin_unlock(&cmd->context_lock);
  610. if (out_is_imm && !out_param) {
  611. mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
  612. op);
  613. err = -EINVAL;
  614. goto out;
  615. }
  616. reinit_completion(&context->done);
  617. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  618. in_modifier, op_modifier, op, context->token, 1);
  619. if (err)
  620. goto out_reset;
  621. if (!wait_for_completion_timeout(&context->done,
  622. msecs_to_jiffies(timeout))) {
  623. mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
  624. op);
  625. err = -EIO;
  626. goto out_reset;
  627. }
  628. err = context->result;
  629. if (err) {
  630. /* Since we do not want to have this error message always
  631. * displayed at driver start when there are ConnectX2 HCAs
  632. * on the host, we deprecate the error message for this
  633. * specific command/input_mod/opcode_mod/fw-status to be debug.
  634. */
  635. if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
  636. op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
  637. mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
  638. op, context->fw_status);
  639. else
  640. mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
  641. op, context->fw_status);
  642. if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
  643. err = mlx4_internal_err_ret_value(dev, op, op_modifier);
  644. else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
  645. goto out_reset;
  646. goto out;
  647. }
  648. if (out_is_imm)
  649. *out_param = context->out_param;
  650. out_reset:
  651. if (err)
  652. err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
  653. out:
  654. spin_lock(&cmd->context_lock);
  655. context->next = cmd->free_head;
  656. cmd->free_head = context - cmd->context;
  657. spin_unlock(&cmd->context_lock);
  658. up(&cmd->event_sem);
  659. return err;
  660. }
  661. int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  662. int out_is_imm, u32 in_modifier, u8 op_modifier,
  663. u16 op, unsigned long timeout, int native)
  664. {
  665. if (pci_channel_offline(dev->persist->pdev))
  666. return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
  667. if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
  668. if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
  669. return mlx4_internal_err_ret_value(dev, op,
  670. op_modifier);
  671. if (mlx4_priv(dev)->cmd.use_events)
  672. return mlx4_cmd_wait(dev, in_param, out_param,
  673. out_is_imm, in_modifier,
  674. op_modifier, op, timeout);
  675. else
  676. return mlx4_cmd_poll(dev, in_param, out_param,
  677. out_is_imm, in_modifier,
  678. op_modifier, op, timeout);
  679. }
  680. return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
  681. in_modifier, op_modifier, op, timeout);
  682. }
  683. EXPORT_SYMBOL_GPL(__mlx4_cmd);
  684. int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
  685. {
  686. return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
  687. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  688. }
  689. static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
  690. int slave, u64 slave_addr,
  691. int size, int is_read)
  692. {
  693. u64 in_param;
  694. u64 out_param;
  695. if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
  696. (slave & ~0x7f) | (size & 0xff)) {
  697. mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
  698. slave_addr, master_addr, slave, size);
  699. return -EINVAL;
  700. }
  701. if (is_read) {
  702. in_param = (u64) slave | slave_addr;
  703. out_param = (u64) dev->caps.function | master_addr;
  704. } else {
  705. in_param = (u64) dev->caps.function | master_addr;
  706. out_param = (u64) slave | slave_addr;
  707. }
  708. return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
  709. MLX4_CMD_ACCESS_MEM,
  710. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  711. }
  712. static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
  713. struct mlx4_cmd_mailbox *inbox,
  714. struct mlx4_cmd_mailbox *outbox)
  715. {
  716. struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
  717. struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
  718. int err;
  719. int i;
  720. if (index & 0x1f)
  721. return -EINVAL;
  722. in_mad->attr_mod = cpu_to_be32(index / 32);
  723. err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
  724. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  725. MLX4_CMD_NATIVE);
  726. if (err)
  727. return err;
  728. for (i = 0; i < 32; ++i)
  729. pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
  730. return err;
  731. }
  732. static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
  733. struct mlx4_cmd_mailbox *inbox,
  734. struct mlx4_cmd_mailbox *outbox)
  735. {
  736. int i;
  737. int err;
  738. for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
  739. err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
  740. if (err)
  741. return err;
  742. }
  743. return 0;
  744. }
  745. #define PORT_CAPABILITY_LOCATION_IN_SMP 20
  746. #define PORT_STATE_OFFSET 32
  747. static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
  748. {
  749. if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
  750. return IB_PORT_ACTIVE;
  751. else
  752. return IB_PORT_DOWN;
  753. }
  754. static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
  755. struct mlx4_vhcr *vhcr,
  756. struct mlx4_cmd_mailbox *inbox,
  757. struct mlx4_cmd_mailbox *outbox,
  758. struct mlx4_cmd_info *cmd)
  759. {
  760. struct ib_smp *smp = inbox->buf;
  761. u32 index;
  762. u8 port;
  763. u8 opcode_modifier;
  764. u16 *table;
  765. int err;
  766. int vidx, pidx;
  767. int network_view;
  768. struct mlx4_priv *priv = mlx4_priv(dev);
  769. struct ib_smp *outsmp = outbox->buf;
  770. __be16 *outtab = (__be16 *)(outsmp->data);
  771. __be32 slave_cap_mask;
  772. __be64 slave_node_guid;
  773. port = vhcr->in_modifier;
  774. /* network-view bit is for driver use only, and should not be passed to FW */
  775. opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
  776. network_view = !!(vhcr->op_modifier & 0x8);
  777. if (smp->base_version == 1 &&
  778. smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
  779. smp->class_version == 1) {
  780. /* host view is paravirtualized */
  781. if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
  782. if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
  783. index = be32_to_cpu(smp->attr_mod);
  784. if (port < 1 || port > dev->caps.num_ports)
  785. return -EINVAL;
  786. table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
  787. sizeof(*table) * 32, GFP_KERNEL);
  788. if (!table)
  789. return -ENOMEM;
  790. /* need to get the full pkey table because the paravirtualized
  791. * pkeys may be scattered among several pkey blocks.
  792. */
  793. err = get_full_pkey_table(dev, port, table, inbox, outbox);
  794. if (!err) {
  795. for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
  796. pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
  797. outtab[vidx % 32] = cpu_to_be16(table[pidx]);
  798. }
  799. }
  800. kfree(table);
  801. return err;
  802. }
  803. if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
  804. /*get the slave specific caps:*/
  805. /*do the command */
  806. err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
  807. vhcr->in_modifier, opcode_modifier,
  808. vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
  809. /* modify the response for slaves */
  810. if (!err && slave != mlx4_master_func_num(dev)) {
  811. u8 *state = outsmp->data + PORT_STATE_OFFSET;
  812. *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
  813. slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
  814. memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
  815. }
  816. return err;
  817. }
  818. if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
  819. /* compute slave's gid block */
  820. smp->attr_mod = cpu_to_be32(slave / 8);
  821. /* execute cmd */
  822. err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
  823. vhcr->in_modifier, opcode_modifier,
  824. vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
  825. if (!err) {
  826. /* if needed, move slave gid to index 0 */
  827. if (slave % 8)
  828. memcpy(outsmp->data,
  829. outsmp->data + (slave % 8) * 8, 8);
  830. /* delete all other gids */
  831. memset(outsmp->data + 8, 0, 56);
  832. }
  833. return err;
  834. }
  835. if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
  836. err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
  837. vhcr->in_modifier, opcode_modifier,
  838. vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
  839. if (!err) {
  840. slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
  841. memcpy(outsmp->data + 12, &slave_node_guid, 8);
  842. }
  843. return err;
  844. }
  845. }
  846. }
  847. /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
  848. * These are the MADs used by ib verbs (such as ib_query_gids).
  849. */
  850. if (slave != mlx4_master_func_num(dev) &&
  851. !mlx4_vf_smi_enabled(dev, slave, port)) {
  852. if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
  853. smp->method == IB_MGMT_METHOD_GET) || network_view) {
  854. mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
  855. slave, smp->method, smp->mgmt_class,
  856. network_view ? "Network" : "Host",
  857. be16_to_cpu(smp->attr_id));
  858. return -EPERM;
  859. }
  860. }
  861. return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
  862. vhcr->in_modifier, opcode_modifier,
  863. vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
  864. }
  865. static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
  866. struct mlx4_vhcr *vhcr,
  867. struct mlx4_cmd_mailbox *inbox,
  868. struct mlx4_cmd_mailbox *outbox,
  869. struct mlx4_cmd_info *cmd)
  870. {
  871. return -EPERM;
  872. }
  873. int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
  874. struct mlx4_vhcr *vhcr,
  875. struct mlx4_cmd_mailbox *inbox,
  876. struct mlx4_cmd_mailbox *outbox,
  877. struct mlx4_cmd_info *cmd)
  878. {
  879. u64 in_param;
  880. u64 out_param;
  881. int err;
  882. in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
  883. out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
  884. if (cmd->encode_slave_id) {
  885. in_param &= 0xffffffffffffff00ll;
  886. in_param |= slave;
  887. }
  888. err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
  889. vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
  890. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  891. if (cmd->out_is_imm)
  892. vhcr->out_param = out_param;
  893. return err;
  894. }
  895. static struct mlx4_cmd_info cmd_info[] = {
  896. {
  897. .opcode = MLX4_CMD_QUERY_FW,
  898. .has_inbox = false,
  899. .has_outbox = true,
  900. .out_is_imm = false,
  901. .encode_slave_id = false,
  902. .verify = NULL,
  903. .wrapper = mlx4_QUERY_FW_wrapper
  904. },
  905. {
  906. .opcode = MLX4_CMD_QUERY_HCA,
  907. .has_inbox = false,
  908. .has_outbox = true,
  909. .out_is_imm = false,
  910. .encode_slave_id = false,
  911. .verify = NULL,
  912. .wrapper = NULL
  913. },
  914. {
  915. .opcode = MLX4_CMD_QUERY_DEV_CAP,
  916. .has_inbox = false,
  917. .has_outbox = true,
  918. .out_is_imm = false,
  919. .encode_slave_id = false,
  920. .verify = NULL,
  921. .wrapper = mlx4_QUERY_DEV_CAP_wrapper
  922. },
  923. {
  924. .opcode = MLX4_CMD_QUERY_FUNC_CAP,
  925. .has_inbox = false,
  926. .has_outbox = true,
  927. .out_is_imm = false,
  928. .encode_slave_id = false,
  929. .verify = NULL,
  930. .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
  931. },
  932. {
  933. .opcode = MLX4_CMD_QUERY_ADAPTER,
  934. .has_inbox = false,
  935. .has_outbox = true,
  936. .out_is_imm = false,
  937. .encode_slave_id = false,
  938. .verify = NULL,
  939. .wrapper = NULL
  940. },
  941. {
  942. .opcode = MLX4_CMD_INIT_PORT,
  943. .has_inbox = false,
  944. .has_outbox = false,
  945. .out_is_imm = false,
  946. .encode_slave_id = false,
  947. .verify = NULL,
  948. .wrapper = mlx4_INIT_PORT_wrapper
  949. },
  950. {
  951. .opcode = MLX4_CMD_CLOSE_PORT,
  952. .has_inbox = false,
  953. .has_outbox = false,
  954. .out_is_imm = false,
  955. .encode_slave_id = false,
  956. .verify = NULL,
  957. .wrapper = mlx4_CLOSE_PORT_wrapper
  958. },
  959. {
  960. .opcode = MLX4_CMD_QUERY_PORT,
  961. .has_inbox = false,
  962. .has_outbox = true,
  963. .out_is_imm = false,
  964. .encode_slave_id = false,
  965. .verify = NULL,
  966. .wrapper = mlx4_QUERY_PORT_wrapper
  967. },
  968. {
  969. .opcode = MLX4_CMD_SET_PORT,
  970. .has_inbox = true,
  971. .has_outbox = false,
  972. .out_is_imm = false,
  973. .encode_slave_id = false,
  974. .verify = NULL,
  975. .wrapper = mlx4_SET_PORT_wrapper
  976. },
  977. {
  978. .opcode = MLX4_CMD_MAP_EQ,
  979. .has_inbox = false,
  980. .has_outbox = false,
  981. .out_is_imm = false,
  982. .encode_slave_id = false,
  983. .verify = NULL,
  984. .wrapper = mlx4_MAP_EQ_wrapper
  985. },
  986. {
  987. .opcode = MLX4_CMD_SW2HW_EQ,
  988. .has_inbox = true,
  989. .has_outbox = false,
  990. .out_is_imm = false,
  991. .encode_slave_id = true,
  992. .verify = NULL,
  993. .wrapper = mlx4_SW2HW_EQ_wrapper
  994. },
  995. {
  996. .opcode = MLX4_CMD_HW_HEALTH_CHECK,
  997. .has_inbox = false,
  998. .has_outbox = false,
  999. .out_is_imm = false,
  1000. .encode_slave_id = false,
  1001. .verify = NULL,
  1002. .wrapper = NULL
  1003. },
  1004. {
  1005. .opcode = MLX4_CMD_NOP,
  1006. .has_inbox = false,
  1007. .has_outbox = false,
  1008. .out_is_imm = false,
  1009. .encode_slave_id = false,
  1010. .verify = NULL,
  1011. .wrapper = NULL
  1012. },
  1013. {
  1014. .opcode = MLX4_CMD_CONFIG_DEV,
  1015. .has_inbox = false,
  1016. .has_outbox = true,
  1017. .out_is_imm = false,
  1018. .encode_slave_id = false,
  1019. .verify = NULL,
  1020. .wrapper = mlx4_CONFIG_DEV_wrapper
  1021. },
  1022. {
  1023. .opcode = MLX4_CMD_ALLOC_RES,
  1024. .has_inbox = false,
  1025. .has_outbox = false,
  1026. .out_is_imm = true,
  1027. .encode_slave_id = false,
  1028. .verify = NULL,
  1029. .wrapper = mlx4_ALLOC_RES_wrapper
  1030. },
  1031. {
  1032. .opcode = MLX4_CMD_FREE_RES,
  1033. .has_inbox = false,
  1034. .has_outbox = false,
  1035. .out_is_imm = false,
  1036. .encode_slave_id = false,
  1037. .verify = NULL,
  1038. .wrapper = mlx4_FREE_RES_wrapper
  1039. },
  1040. {
  1041. .opcode = MLX4_CMD_SW2HW_MPT,
  1042. .has_inbox = true,
  1043. .has_outbox = false,
  1044. .out_is_imm = false,
  1045. .encode_slave_id = true,
  1046. .verify = NULL,
  1047. .wrapper = mlx4_SW2HW_MPT_wrapper
  1048. },
  1049. {
  1050. .opcode = MLX4_CMD_QUERY_MPT,
  1051. .has_inbox = false,
  1052. .has_outbox = true,
  1053. .out_is_imm = false,
  1054. .encode_slave_id = false,
  1055. .verify = NULL,
  1056. .wrapper = mlx4_QUERY_MPT_wrapper
  1057. },
  1058. {
  1059. .opcode = MLX4_CMD_HW2SW_MPT,
  1060. .has_inbox = false,
  1061. .has_outbox = false,
  1062. .out_is_imm = false,
  1063. .encode_slave_id = false,
  1064. .verify = NULL,
  1065. .wrapper = mlx4_HW2SW_MPT_wrapper
  1066. },
  1067. {
  1068. .opcode = MLX4_CMD_READ_MTT,
  1069. .has_inbox = false,
  1070. .has_outbox = true,
  1071. .out_is_imm = false,
  1072. .encode_slave_id = false,
  1073. .verify = NULL,
  1074. .wrapper = NULL
  1075. },
  1076. {
  1077. .opcode = MLX4_CMD_WRITE_MTT,
  1078. .has_inbox = true,
  1079. .has_outbox = false,
  1080. .out_is_imm = false,
  1081. .encode_slave_id = false,
  1082. .verify = NULL,
  1083. .wrapper = mlx4_WRITE_MTT_wrapper
  1084. },
  1085. {
  1086. .opcode = MLX4_CMD_SYNC_TPT,
  1087. .has_inbox = true,
  1088. .has_outbox = false,
  1089. .out_is_imm = false,
  1090. .encode_slave_id = false,
  1091. .verify = NULL,
  1092. .wrapper = NULL
  1093. },
  1094. {
  1095. .opcode = MLX4_CMD_HW2SW_EQ,
  1096. .has_inbox = false,
  1097. .has_outbox = false,
  1098. .out_is_imm = false,
  1099. .encode_slave_id = true,
  1100. .verify = NULL,
  1101. .wrapper = mlx4_HW2SW_EQ_wrapper
  1102. },
  1103. {
  1104. .opcode = MLX4_CMD_QUERY_EQ,
  1105. .has_inbox = false,
  1106. .has_outbox = true,
  1107. .out_is_imm = false,
  1108. .encode_slave_id = true,
  1109. .verify = NULL,
  1110. .wrapper = mlx4_QUERY_EQ_wrapper
  1111. },
  1112. {
  1113. .opcode = MLX4_CMD_SW2HW_CQ,
  1114. .has_inbox = true,
  1115. .has_outbox = false,
  1116. .out_is_imm = false,
  1117. .encode_slave_id = true,
  1118. .verify = NULL,
  1119. .wrapper = mlx4_SW2HW_CQ_wrapper
  1120. },
  1121. {
  1122. .opcode = MLX4_CMD_HW2SW_CQ,
  1123. .has_inbox = false,
  1124. .has_outbox = false,
  1125. .out_is_imm = false,
  1126. .encode_slave_id = false,
  1127. .verify = NULL,
  1128. .wrapper = mlx4_HW2SW_CQ_wrapper
  1129. },
  1130. {
  1131. .opcode = MLX4_CMD_QUERY_CQ,
  1132. .has_inbox = false,
  1133. .has_outbox = true,
  1134. .out_is_imm = false,
  1135. .encode_slave_id = false,
  1136. .verify = NULL,
  1137. .wrapper = mlx4_QUERY_CQ_wrapper
  1138. },
  1139. {
  1140. .opcode = MLX4_CMD_MODIFY_CQ,
  1141. .has_inbox = true,
  1142. .has_outbox = false,
  1143. .out_is_imm = true,
  1144. .encode_slave_id = false,
  1145. .verify = NULL,
  1146. .wrapper = mlx4_MODIFY_CQ_wrapper
  1147. },
  1148. {
  1149. .opcode = MLX4_CMD_SW2HW_SRQ,
  1150. .has_inbox = true,
  1151. .has_outbox = false,
  1152. .out_is_imm = false,
  1153. .encode_slave_id = true,
  1154. .verify = NULL,
  1155. .wrapper = mlx4_SW2HW_SRQ_wrapper
  1156. },
  1157. {
  1158. .opcode = MLX4_CMD_HW2SW_SRQ,
  1159. .has_inbox = false,
  1160. .has_outbox = false,
  1161. .out_is_imm = false,
  1162. .encode_slave_id = false,
  1163. .verify = NULL,
  1164. .wrapper = mlx4_HW2SW_SRQ_wrapper
  1165. },
  1166. {
  1167. .opcode = MLX4_CMD_QUERY_SRQ,
  1168. .has_inbox = false,
  1169. .has_outbox = true,
  1170. .out_is_imm = false,
  1171. .encode_slave_id = false,
  1172. .verify = NULL,
  1173. .wrapper = mlx4_QUERY_SRQ_wrapper
  1174. },
  1175. {
  1176. .opcode = MLX4_CMD_ARM_SRQ,
  1177. .has_inbox = false,
  1178. .has_outbox = false,
  1179. .out_is_imm = false,
  1180. .encode_slave_id = false,
  1181. .verify = NULL,
  1182. .wrapper = mlx4_ARM_SRQ_wrapper
  1183. },
  1184. {
  1185. .opcode = MLX4_CMD_RST2INIT_QP,
  1186. .has_inbox = true,
  1187. .has_outbox = false,
  1188. .out_is_imm = false,
  1189. .encode_slave_id = true,
  1190. .verify = NULL,
  1191. .wrapper = mlx4_RST2INIT_QP_wrapper
  1192. },
  1193. {
  1194. .opcode = MLX4_CMD_INIT2INIT_QP,
  1195. .has_inbox = true,
  1196. .has_outbox = false,
  1197. .out_is_imm = false,
  1198. .encode_slave_id = false,
  1199. .verify = NULL,
  1200. .wrapper = mlx4_INIT2INIT_QP_wrapper
  1201. },
  1202. {
  1203. .opcode = MLX4_CMD_INIT2RTR_QP,
  1204. .has_inbox = true,
  1205. .has_outbox = false,
  1206. .out_is_imm = false,
  1207. .encode_slave_id = false,
  1208. .verify = NULL,
  1209. .wrapper = mlx4_INIT2RTR_QP_wrapper
  1210. },
  1211. {
  1212. .opcode = MLX4_CMD_RTR2RTS_QP,
  1213. .has_inbox = true,
  1214. .has_outbox = false,
  1215. .out_is_imm = false,
  1216. .encode_slave_id = false,
  1217. .verify = NULL,
  1218. .wrapper = mlx4_RTR2RTS_QP_wrapper
  1219. },
  1220. {
  1221. .opcode = MLX4_CMD_RTS2RTS_QP,
  1222. .has_inbox = true,
  1223. .has_outbox = false,
  1224. .out_is_imm = false,
  1225. .encode_slave_id = false,
  1226. .verify = NULL,
  1227. .wrapper = mlx4_RTS2RTS_QP_wrapper
  1228. },
  1229. {
  1230. .opcode = MLX4_CMD_SQERR2RTS_QP,
  1231. .has_inbox = true,
  1232. .has_outbox = false,
  1233. .out_is_imm = false,
  1234. .encode_slave_id = false,
  1235. .verify = NULL,
  1236. .wrapper = mlx4_SQERR2RTS_QP_wrapper
  1237. },
  1238. {
  1239. .opcode = MLX4_CMD_2ERR_QP,
  1240. .has_inbox = false,
  1241. .has_outbox = false,
  1242. .out_is_imm = false,
  1243. .encode_slave_id = false,
  1244. .verify = NULL,
  1245. .wrapper = mlx4_GEN_QP_wrapper
  1246. },
  1247. {
  1248. .opcode = MLX4_CMD_RTS2SQD_QP,
  1249. .has_inbox = false,
  1250. .has_outbox = false,
  1251. .out_is_imm = false,
  1252. .encode_slave_id = false,
  1253. .verify = NULL,
  1254. .wrapper = mlx4_GEN_QP_wrapper
  1255. },
  1256. {
  1257. .opcode = MLX4_CMD_SQD2SQD_QP,
  1258. .has_inbox = true,
  1259. .has_outbox = false,
  1260. .out_is_imm = false,
  1261. .encode_slave_id = false,
  1262. .verify = NULL,
  1263. .wrapper = mlx4_SQD2SQD_QP_wrapper
  1264. },
  1265. {
  1266. .opcode = MLX4_CMD_SQD2RTS_QP,
  1267. .has_inbox = true,
  1268. .has_outbox = false,
  1269. .out_is_imm = false,
  1270. .encode_slave_id = false,
  1271. .verify = NULL,
  1272. .wrapper = mlx4_SQD2RTS_QP_wrapper
  1273. },
  1274. {
  1275. .opcode = MLX4_CMD_2RST_QP,
  1276. .has_inbox = false,
  1277. .has_outbox = false,
  1278. .out_is_imm = false,
  1279. .encode_slave_id = false,
  1280. .verify = NULL,
  1281. .wrapper = mlx4_2RST_QP_wrapper
  1282. },
  1283. {
  1284. .opcode = MLX4_CMD_QUERY_QP,
  1285. .has_inbox = false,
  1286. .has_outbox = true,
  1287. .out_is_imm = false,
  1288. .encode_slave_id = false,
  1289. .verify = NULL,
  1290. .wrapper = mlx4_GEN_QP_wrapper
  1291. },
  1292. {
  1293. .opcode = MLX4_CMD_SUSPEND_QP,
  1294. .has_inbox = false,
  1295. .has_outbox = false,
  1296. .out_is_imm = false,
  1297. .encode_slave_id = false,
  1298. .verify = NULL,
  1299. .wrapper = mlx4_GEN_QP_wrapper
  1300. },
  1301. {
  1302. .opcode = MLX4_CMD_UNSUSPEND_QP,
  1303. .has_inbox = false,
  1304. .has_outbox = false,
  1305. .out_is_imm = false,
  1306. .encode_slave_id = false,
  1307. .verify = NULL,
  1308. .wrapper = mlx4_GEN_QP_wrapper
  1309. },
  1310. {
  1311. .opcode = MLX4_CMD_UPDATE_QP,
  1312. .has_inbox = true,
  1313. .has_outbox = false,
  1314. .out_is_imm = false,
  1315. .encode_slave_id = false,
  1316. .verify = NULL,
  1317. .wrapper = mlx4_UPDATE_QP_wrapper
  1318. },
  1319. {
  1320. .opcode = MLX4_CMD_GET_OP_REQ,
  1321. .has_inbox = false,
  1322. .has_outbox = false,
  1323. .out_is_imm = false,
  1324. .encode_slave_id = false,
  1325. .verify = NULL,
  1326. .wrapper = mlx4_CMD_EPERM_wrapper,
  1327. },
  1328. {
  1329. .opcode = MLX4_CMD_CONF_SPECIAL_QP,
  1330. .has_inbox = false,
  1331. .has_outbox = false,
  1332. .out_is_imm = false,
  1333. .encode_slave_id = false,
  1334. .verify = NULL, /* XXX verify: only demux can do this */
  1335. .wrapper = NULL
  1336. },
  1337. {
  1338. .opcode = MLX4_CMD_MAD_IFC,
  1339. .has_inbox = true,
  1340. .has_outbox = true,
  1341. .out_is_imm = false,
  1342. .encode_slave_id = false,
  1343. .verify = NULL,
  1344. .wrapper = mlx4_MAD_IFC_wrapper
  1345. },
  1346. {
  1347. .opcode = MLX4_CMD_MAD_DEMUX,
  1348. .has_inbox = false,
  1349. .has_outbox = false,
  1350. .out_is_imm = false,
  1351. .encode_slave_id = false,
  1352. .verify = NULL,
  1353. .wrapper = mlx4_CMD_EPERM_wrapper
  1354. },
  1355. {
  1356. .opcode = MLX4_CMD_QUERY_IF_STAT,
  1357. .has_inbox = false,
  1358. .has_outbox = true,
  1359. .out_is_imm = false,
  1360. .encode_slave_id = false,
  1361. .verify = NULL,
  1362. .wrapper = mlx4_QUERY_IF_STAT_wrapper
  1363. },
  1364. {
  1365. .opcode = MLX4_CMD_ACCESS_REG,
  1366. .has_inbox = true,
  1367. .has_outbox = true,
  1368. .out_is_imm = false,
  1369. .encode_slave_id = false,
  1370. .verify = NULL,
  1371. .wrapper = mlx4_ACCESS_REG_wrapper,
  1372. },
  1373. {
  1374. .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
  1375. .has_inbox = false,
  1376. .has_outbox = false,
  1377. .out_is_imm = false,
  1378. .encode_slave_id = false,
  1379. .verify = NULL,
  1380. .wrapper = mlx4_CMD_EPERM_wrapper,
  1381. },
  1382. /* Native multicast commands are not available for guests */
  1383. {
  1384. .opcode = MLX4_CMD_QP_ATTACH,
  1385. .has_inbox = true,
  1386. .has_outbox = false,
  1387. .out_is_imm = false,
  1388. .encode_slave_id = false,
  1389. .verify = NULL,
  1390. .wrapper = mlx4_QP_ATTACH_wrapper
  1391. },
  1392. {
  1393. .opcode = MLX4_CMD_PROMISC,
  1394. .has_inbox = false,
  1395. .has_outbox = false,
  1396. .out_is_imm = false,
  1397. .encode_slave_id = false,
  1398. .verify = NULL,
  1399. .wrapper = mlx4_PROMISC_wrapper
  1400. },
  1401. /* Ethernet specific commands */
  1402. {
  1403. .opcode = MLX4_CMD_SET_VLAN_FLTR,
  1404. .has_inbox = true,
  1405. .has_outbox = false,
  1406. .out_is_imm = false,
  1407. .encode_slave_id = false,
  1408. .verify = NULL,
  1409. .wrapper = mlx4_SET_VLAN_FLTR_wrapper
  1410. },
  1411. {
  1412. .opcode = MLX4_CMD_SET_MCAST_FLTR,
  1413. .has_inbox = false,
  1414. .has_outbox = false,
  1415. .out_is_imm = false,
  1416. .encode_slave_id = false,
  1417. .verify = NULL,
  1418. .wrapper = mlx4_SET_MCAST_FLTR_wrapper
  1419. },
  1420. {
  1421. .opcode = MLX4_CMD_DUMP_ETH_STATS,
  1422. .has_inbox = false,
  1423. .has_outbox = true,
  1424. .out_is_imm = false,
  1425. .encode_slave_id = false,
  1426. .verify = NULL,
  1427. .wrapper = mlx4_DUMP_ETH_STATS_wrapper
  1428. },
  1429. {
  1430. .opcode = MLX4_CMD_INFORM_FLR_DONE,
  1431. .has_inbox = false,
  1432. .has_outbox = false,
  1433. .out_is_imm = false,
  1434. .encode_slave_id = false,
  1435. .verify = NULL,
  1436. .wrapper = NULL
  1437. },
  1438. /* flow steering commands */
  1439. {
  1440. .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
  1441. .has_inbox = true,
  1442. .has_outbox = false,
  1443. .out_is_imm = true,
  1444. .encode_slave_id = false,
  1445. .verify = NULL,
  1446. .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
  1447. },
  1448. {
  1449. .opcode = MLX4_QP_FLOW_STEERING_DETACH,
  1450. .has_inbox = false,
  1451. .has_outbox = false,
  1452. .out_is_imm = false,
  1453. .encode_slave_id = false,
  1454. .verify = NULL,
  1455. .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
  1456. },
  1457. {
  1458. .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
  1459. .has_inbox = false,
  1460. .has_outbox = false,
  1461. .out_is_imm = false,
  1462. .encode_slave_id = false,
  1463. .verify = NULL,
  1464. .wrapper = mlx4_CMD_EPERM_wrapper
  1465. },
  1466. {
  1467. .opcode = MLX4_CMD_VIRT_PORT_MAP,
  1468. .has_inbox = false,
  1469. .has_outbox = false,
  1470. .out_is_imm = false,
  1471. .encode_slave_id = false,
  1472. .verify = NULL,
  1473. .wrapper = mlx4_CMD_EPERM_wrapper
  1474. },
  1475. };
  1476. static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
  1477. struct mlx4_vhcr_cmd *in_vhcr)
  1478. {
  1479. struct mlx4_priv *priv = mlx4_priv(dev);
  1480. struct mlx4_cmd_info *cmd = NULL;
  1481. struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
  1482. struct mlx4_vhcr *vhcr;
  1483. struct mlx4_cmd_mailbox *inbox = NULL;
  1484. struct mlx4_cmd_mailbox *outbox = NULL;
  1485. u64 in_param;
  1486. u64 out_param;
  1487. int ret = 0;
  1488. int i;
  1489. int err = 0;
  1490. /* Create sw representation of Virtual HCR */
  1491. vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
  1492. if (!vhcr)
  1493. return -ENOMEM;
  1494. /* DMA in the vHCR */
  1495. if (!in_vhcr) {
  1496. ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
  1497. priv->mfunc.master.slave_state[slave].vhcr_dma,
  1498. ALIGN(sizeof(struct mlx4_vhcr_cmd),
  1499. MLX4_ACCESS_MEM_ALIGN), 1);
  1500. if (ret) {
  1501. if (!(dev->persist->state &
  1502. MLX4_DEVICE_STATE_INTERNAL_ERROR))
  1503. mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
  1504. __func__, ret);
  1505. kfree(vhcr);
  1506. return ret;
  1507. }
  1508. }
  1509. /* Fill SW VHCR fields */
  1510. vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
  1511. vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
  1512. vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
  1513. vhcr->token = be16_to_cpu(vhcr_cmd->token);
  1514. vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
  1515. vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
  1516. vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
  1517. /* Lookup command */
  1518. for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
  1519. if (vhcr->op == cmd_info[i].opcode) {
  1520. cmd = &cmd_info[i];
  1521. break;
  1522. }
  1523. }
  1524. if (!cmd) {
  1525. mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
  1526. vhcr->op, slave);
  1527. vhcr_cmd->status = CMD_STAT_BAD_PARAM;
  1528. goto out_status;
  1529. }
  1530. /* Read inbox */
  1531. if (cmd->has_inbox) {
  1532. vhcr->in_param &= INBOX_MASK;
  1533. inbox = mlx4_alloc_cmd_mailbox(dev);
  1534. if (IS_ERR(inbox)) {
  1535. vhcr_cmd->status = CMD_STAT_BAD_SIZE;
  1536. inbox = NULL;
  1537. goto out_status;
  1538. }
  1539. ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
  1540. vhcr->in_param,
  1541. MLX4_MAILBOX_SIZE, 1);
  1542. if (ret) {
  1543. if (!(dev->persist->state &
  1544. MLX4_DEVICE_STATE_INTERNAL_ERROR))
  1545. mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
  1546. __func__, cmd->opcode);
  1547. vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
  1548. goto out_status;
  1549. }
  1550. }
  1551. /* Apply permission and bound checks if applicable */
  1552. if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
  1553. mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
  1554. vhcr->op, slave, vhcr->in_modifier);
  1555. vhcr_cmd->status = CMD_STAT_BAD_OP;
  1556. goto out_status;
  1557. }
  1558. /* Allocate outbox */
  1559. if (cmd->has_outbox) {
  1560. outbox = mlx4_alloc_cmd_mailbox(dev);
  1561. if (IS_ERR(outbox)) {
  1562. vhcr_cmd->status = CMD_STAT_BAD_SIZE;
  1563. outbox = NULL;
  1564. goto out_status;
  1565. }
  1566. }
  1567. /* Execute the command! */
  1568. if (cmd->wrapper) {
  1569. err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
  1570. cmd);
  1571. if (cmd->out_is_imm)
  1572. vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
  1573. } else {
  1574. in_param = cmd->has_inbox ? (u64) inbox->dma :
  1575. vhcr->in_param;
  1576. out_param = cmd->has_outbox ? (u64) outbox->dma :
  1577. vhcr->out_param;
  1578. err = __mlx4_cmd(dev, in_param, &out_param,
  1579. cmd->out_is_imm, vhcr->in_modifier,
  1580. vhcr->op_modifier, vhcr->op,
  1581. MLX4_CMD_TIME_CLASS_A,
  1582. MLX4_CMD_NATIVE);
  1583. if (cmd->out_is_imm) {
  1584. vhcr->out_param = out_param;
  1585. vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
  1586. }
  1587. }
  1588. if (err) {
  1589. if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
  1590. mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
  1591. vhcr->op, slave, vhcr->errno, err);
  1592. vhcr_cmd->status = mlx4_errno_to_status(err);
  1593. goto out_status;
  1594. }
  1595. /* Write outbox if command completed successfully */
  1596. if (cmd->has_outbox && !vhcr_cmd->status) {
  1597. ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
  1598. vhcr->out_param,
  1599. MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
  1600. if (ret) {
  1601. /* If we failed to write back the outbox after the
  1602. *command was successfully executed, we must fail this
  1603. * slave, as it is now in undefined state */
  1604. if (!(dev->persist->state &
  1605. MLX4_DEVICE_STATE_INTERNAL_ERROR))
  1606. mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
  1607. goto out;
  1608. }
  1609. }
  1610. out_status:
  1611. /* DMA back vhcr result */
  1612. if (!in_vhcr) {
  1613. ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
  1614. priv->mfunc.master.slave_state[slave].vhcr_dma,
  1615. ALIGN(sizeof(struct mlx4_vhcr),
  1616. MLX4_ACCESS_MEM_ALIGN),
  1617. MLX4_CMD_WRAPPED);
  1618. if (ret)
  1619. mlx4_err(dev, "%s:Failed writing vhcr result\n",
  1620. __func__);
  1621. else if (vhcr->e_bit &&
  1622. mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
  1623. mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
  1624. slave);
  1625. }
  1626. out:
  1627. kfree(vhcr);
  1628. mlx4_free_cmd_mailbox(dev, inbox);
  1629. mlx4_free_cmd_mailbox(dev, outbox);
  1630. return ret;
  1631. }
  1632. static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
  1633. int slave, int port)
  1634. {
  1635. struct mlx4_vport_oper_state *vp_oper;
  1636. struct mlx4_vport_state *vp_admin;
  1637. struct mlx4_vf_immed_vlan_work *work;
  1638. struct mlx4_dev *dev = &(priv->dev);
  1639. int err;
  1640. int admin_vlan_ix = NO_INDX;
  1641. vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
  1642. vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
  1643. if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
  1644. vp_oper->state.default_qos == vp_admin->default_qos &&
  1645. vp_oper->state.link_state == vp_admin->link_state)
  1646. return 0;
  1647. if (!(priv->mfunc.master.slave_state[slave].active &&
  1648. dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
  1649. /* even if the UPDATE_QP command isn't supported, we still want
  1650. * to set this VF link according to the admin directive
  1651. */
  1652. vp_oper->state.link_state = vp_admin->link_state;
  1653. return -1;
  1654. }
  1655. mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
  1656. slave, port);
  1657. mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
  1658. vp_admin->default_vlan, vp_admin->default_qos,
  1659. vp_admin->link_state);
  1660. work = kzalloc(sizeof(*work), GFP_KERNEL);
  1661. if (!work)
  1662. return -ENOMEM;
  1663. if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
  1664. if (MLX4_VGT != vp_admin->default_vlan) {
  1665. err = __mlx4_register_vlan(&priv->dev, port,
  1666. vp_admin->default_vlan,
  1667. &admin_vlan_ix);
  1668. if (err) {
  1669. kfree(work);
  1670. mlx4_warn(&priv->dev,
  1671. "No vlan resources slave %d, port %d\n",
  1672. slave, port);
  1673. return err;
  1674. }
  1675. } else {
  1676. admin_vlan_ix = NO_INDX;
  1677. }
  1678. work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
  1679. mlx4_dbg(&priv->dev,
  1680. "alloc vlan %d idx %d slave %d port %d\n",
  1681. (int)(vp_admin->default_vlan),
  1682. admin_vlan_ix, slave, port);
  1683. }
  1684. /* save original vlan ix and vlan id */
  1685. work->orig_vlan_id = vp_oper->state.default_vlan;
  1686. work->orig_vlan_ix = vp_oper->vlan_idx;
  1687. /* handle new qos */
  1688. if (vp_oper->state.default_qos != vp_admin->default_qos)
  1689. work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
  1690. if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
  1691. vp_oper->vlan_idx = admin_vlan_ix;
  1692. vp_oper->state.default_vlan = vp_admin->default_vlan;
  1693. vp_oper->state.default_qos = vp_admin->default_qos;
  1694. vp_oper->state.link_state = vp_admin->link_state;
  1695. if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
  1696. work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
  1697. /* iterate over QPs owned by this slave, using UPDATE_QP */
  1698. work->port = port;
  1699. work->slave = slave;
  1700. work->qos = vp_oper->state.default_qos;
  1701. work->vlan_id = vp_oper->state.default_vlan;
  1702. work->vlan_ix = vp_oper->vlan_idx;
  1703. work->priv = priv;
  1704. INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
  1705. queue_work(priv->mfunc.master.comm_wq, &work->work);
  1706. return 0;
  1707. }
  1708. static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
  1709. {
  1710. int port, err;
  1711. struct mlx4_vport_state *vp_admin;
  1712. struct mlx4_vport_oper_state *vp_oper;
  1713. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
  1714. &priv->dev, slave);
  1715. int min_port = find_first_bit(actv_ports.ports,
  1716. priv->dev.caps.num_ports) + 1;
  1717. int max_port = min_port - 1 +
  1718. bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
  1719. for (port = min_port; port <= max_port; port++) {
  1720. if (!test_bit(port - 1, actv_ports.ports))
  1721. continue;
  1722. priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
  1723. priv->mfunc.master.vf_admin[slave].enable_smi[port];
  1724. vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
  1725. vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
  1726. vp_oper->state = *vp_admin;
  1727. if (MLX4_VGT != vp_admin->default_vlan) {
  1728. err = __mlx4_register_vlan(&priv->dev, port,
  1729. vp_admin->default_vlan, &(vp_oper->vlan_idx));
  1730. if (err) {
  1731. vp_oper->vlan_idx = NO_INDX;
  1732. mlx4_warn(&priv->dev,
  1733. "No vlan resources slave %d, port %d\n",
  1734. slave, port);
  1735. return err;
  1736. }
  1737. mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
  1738. (int)(vp_oper->state.default_vlan),
  1739. vp_oper->vlan_idx, slave, port);
  1740. }
  1741. if (vp_admin->spoofchk) {
  1742. vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
  1743. port,
  1744. vp_admin->mac);
  1745. if (0 > vp_oper->mac_idx) {
  1746. err = vp_oper->mac_idx;
  1747. vp_oper->mac_idx = NO_INDX;
  1748. mlx4_warn(&priv->dev,
  1749. "No mac resources slave %d, port %d\n",
  1750. slave, port);
  1751. return err;
  1752. }
  1753. mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
  1754. vp_oper->state.mac, vp_oper->mac_idx, slave, port);
  1755. }
  1756. }
  1757. return 0;
  1758. }
  1759. static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
  1760. {
  1761. int port;
  1762. struct mlx4_vport_oper_state *vp_oper;
  1763. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
  1764. &priv->dev, slave);
  1765. int min_port = find_first_bit(actv_ports.ports,
  1766. priv->dev.caps.num_ports) + 1;
  1767. int max_port = min_port - 1 +
  1768. bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
  1769. for (port = min_port; port <= max_port; port++) {
  1770. if (!test_bit(port - 1, actv_ports.ports))
  1771. continue;
  1772. priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
  1773. MLX4_VF_SMI_DISABLED;
  1774. vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
  1775. if (NO_INDX != vp_oper->vlan_idx) {
  1776. __mlx4_unregister_vlan(&priv->dev,
  1777. port, vp_oper->state.default_vlan);
  1778. vp_oper->vlan_idx = NO_INDX;
  1779. }
  1780. if (NO_INDX != vp_oper->mac_idx) {
  1781. __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
  1782. vp_oper->mac_idx = NO_INDX;
  1783. }
  1784. }
  1785. return;
  1786. }
  1787. static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
  1788. u16 param, u8 toggle)
  1789. {
  1790. struct mlx4_priv *priv = mlx4_priv(dev);
  1791. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  1792. u32 reply;
  1793. u8 is_going_down = 0;
  1794. int i;
  1795. unsigned long flags;
  1796. slave_state[slave].comm_toggle ^= 1;
  1797. reply = (u32) slave_state[slave].comm_toggle << 31;
  1798. if (toggle != slave_state[slave].comm_toggle) {
  1799. mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
  1800. toggle, slave);
  1801. goto reset_slave;
  1802. }
  1803. if (cmd == MLX4_COMM_CMD_RESET) {
  1804. mlx4_warn(dev, "Received reset from slave:%d\n", slave);
  1805. slave_state[slave].active = false;
  1806. slave_state[slave].old_vlan_api = false;
  1807. mlx4_master_deactivate_admin_state(priv, slave);
  1808. for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
  1809. slave_state[slave].event_eq[i].eqn = -1;
  1810. slave_state[slave].event_eq[i].token = 0;
  1811. }
  1812. /*check if we are in the middle of FLR process,
  1813. if so return "retry" status to the slave*/
  1814. if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
  1815. goto inform_slave_state;
  1816. mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
  1817. /* write the version in the event field */
  1818. reply |= mlx4_comm_get_version();
  1819. goto reset_slave;
  1820. }
  1821. /*command from slave in the middle of FLR*/
  1822. if (cmd != MLX4_COMM_CMD_RESET &&
  1823. MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
  1824. mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
  1825. slave, cmd);
  1826. return;
  1827. }
  1828. switch (cmd) {
  1829. case MLX4_COMM_CMD_VHCR0:
  1830. if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
  1831. goto reset_slave;
  1832. slave_state[slave].vhcr_dma = ((u64) param) << 48;
  1833. priv->mfunc.master.slave_state[slave].cookie = 0;
  1834. mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1835. break;
  1836. case MLX4_COMM_CMD_VHCR1:
  1837. if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
  1838. goto reset_slave;
  1839. slave_state[slave].vhcr_dma |= ((u64) param) << 32;
  1840. break;
  1841. case MLX4_COMM_CMD_VHCR2:
  1842. if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
  1843. goto reset_slave;
  1844. slave_state[slave].vhcr_dma |= ((u64) param) << 16;
  1845. break;
  1846. case MLX4_COMM_CMD_VHCR_EN:
  1847. if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
  1848. goto reset_slave;
  1849. slave_state[slave].vhcr_dma |= param;
  1850. if (mlx4_master_activate_admin_state(priv, slave))
  1851. goto reset_slave;
  1852. slave_state[slave].active = true;
  1853. mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
  1854. break;
  1855. case MLX4_COMM_CMD_VHCR_POST:
  1856. if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
  1857. (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
  1858. mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
  1859. slave, cmd, slave_state[slave].last_cmd);
  1860. goto reset_slave;
  1861. }
  1862. mutex_lock(&priv->cmd.slave_cmd_mutex);
  1863. if (mlx4_master_process_vhcr(dev, slave, NULL)) {
  1864. mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
  1865. slave);
  1866. mutex_unlock(&priv->cmd.slave_cmd_mutex);
  1867. goto reset_slave;
  1868. }
  1869. mutex_unlock(&priv->cmd.slave_cmd_mutex);
  1870. break;
  1871. default:
  1872. mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
  1873. goto reset_slave;
  1874. }
  1875. spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
  1876. if (!slave_state[slave].is_slave_going_down)
  1877. slave_state[slave].last_cmd = cmd;
  1878. else
  1879. is_going_down = 1;
  1880. spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
  1881. if (is_going_down) {
  1882. mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
  1883. cmd, slave);
  1884. return;
  1885. }
  1886. __raw_writel((__force u32) cpu_to_be32(reply),
  1887. &priv->mfunc.comm[slave].slave_read);
  1888. mmiowb();
  1889. return;
  1890. reset_slave:
  1891. /* cleanup any slave resources */
  1892. if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
  1893. mlx4_delete_all_resources_for_slave(dev, slave);
  1894. if (cmd != MLX4_COMM_CMD_RESET) {
  1895. mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
  1896. slave, cmd);
  1897. /* Turn on internal error letting slave reset itself immeditaly,
  1898. * otherwise it might take till timeout on command is passed
  1899. */
  1900. reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
  1901. }
  1902. spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
  1903. if (!slave_state[slave].is_slave_going_down)
  1904. slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
  1905. spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
  1906. /*with slave in the middle of flr, no need to clean resources again.*/
  1907. inform_slave_state:
  1908. memset(&slave_state[slave].event_eq, 0,
  1909. sizeof(struct mlx4_slave_event_eq_info));
  1910. __raw_writel((__force u32) cpu_to_be32(reply),
  1911. &priv->mfunc.comm[slave].slave_read);
  1912. wmb();
  1913. }
  1914. /* master command processing */
  1915. void mlx4_master_comm_channel(struct work_struct *work)
  1916. {
  1917. struct mlx4_mfunc_master_ctx *master =
  1918. container_of(work,
  1919. struct mlx4_mfunc_master_ctx,
  1920. comm_work);
  1921. struct mlx4_mfunc *mfunc =
  1922. container_of(master, struct mlx4_mfunc, master);
  1923. struct mlx4_priv *priv =
  1924. container_of(mfunc, struct mlx4_priv, mfunc);
  1925. struct mlx4_dev *dev = &priv->dev;
  1926. __be32 *bit_vec;
  1927. u32 comm_cmd;
  1928. u32 vec;
  1929. int i, j, slave;
  1930. int toggle;
  1931. int served = 0;
  1932. int reported = 0;
  1933. u32 slt;
  1934. bit_vec = master->comm_arm_bit_vector;
  1935. for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
  1936. vec = be32_to_cpu(bit_vec[i]);
  1937. for (j = 0; j < 32; j++) {
  1938. if (!(vec & (1 << j)))
  1939. continue;
  1940. ++reported;
  1941. slave = (i * 32) + j;
  1942. comm_cmd = swab32(readl(
  1943. &mfunc->comm[slave].slave_write));
  1944. slt = swab32(readl(&mfunc->comm[slave].slave_read))
  1945. >> 31;
  1946. toggle = comm_cmd >> 31;
  1947. if (toggle != slt) {
  1948. if (master->slave_state[slave].comm_toggle
  1949. != slt) {
  1950. pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
  1951. slave, slt,
  1952. master->slave_state[slave].comm_toggle);
  1953. master->slave_state[slave].comm_toggle =
  1954. slt;
  1955. }
  1956. mlx4_master_do_cmd(dev, slave,
  1957. comm_cmd >> 16 & 0xff,
  1958. comm_cmd & 0xffff, toggle);
  1959. ++served;
  1960. }
  1961. }
  1962. }
  1963. if (reported && reported != served)
  1964. mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
  1965. reported, served);
  1966. if (mlx4_ARM_COMM_CHANNEL(dev))
  1967. mlx4_warn(dev, "Failed to arm comm channel events\n");
  1968. }
  1969. static int sync_toggles(struct mlx4_dev *dev)
  1970. {
  1971. struct mlx4_priv *priv = mlx4_priv(dev);
  1972. u32 wr_toggle;
  1973. u32 rd_toggle;
  1974. unsigned long end;
  1975. wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
  1976. if (wr_toggle == 0xffffffff)
  1977. end = jiffies + msecs_to_jiffies(30000);
  1978. else
  1979. end = jiffies + msecs_to_jiffies(5000);
  1980. while (time_before(jiffies, end)) {
  1981. rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
  1982. if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
  1983. /* PCI might be offline */
  1984. msleep(100);
  1985. wr_toggle = swab32(readl(&priv->mfunc.comm->
  1986. slave_write));
  1987. continue;
  1988. }
  1989. if (rd_toggle >> 31 == wr_toggle >> 31) {
  1990. priv->cmd.comm_toggle = rd_toggle >> 31;
  1991. return 0;
  1992. }
  1993. cond_resched();
  1994. }
  1995. /*
  1996. * we could reach here if for example the previous VM using this
  1997. * function misbehaved and left the channel with unsynced state. We
  1998. * should fix this here and give this VM a chance to use a properly
  1999. * synced channel
  2000. */
  2001. mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
  2002. __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
  2003. __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
  2004. priv->cmd.comm_toggle = 0;
  2005. return 0;
  2006. }
  2007. int mlx4_multi_func_init(struct mlx4_dev *dev)
  2008. {
  2009. struct mlx4_priv *priv = mlx4_priv(dev);
  2010. struct mlx4_slave_state *s_state;
  2011. int i, j, err, port;
  2012. if (mlx4_is_master(dev))
  2013. priv->mfunc.comm =
  2014. ioremap(pci_resource_start(dev->persist->pdev,
  2015. priv->fw.comm_bar) +
  2016. priv->fw.comm_base, MLX4_COMM_PAGESIZE);
  2017. else
  2018. priv->mfunc.comm =
  2019. ioremap(pci_resource_start(dev->persist->pdev, 2) +
  2020. MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
  2021. if (!priv->mfunc.comm) {
  2022. mlx4_err(dev, "Couldn't map communication vector\n");
  2023. goto err_vhcr;
  2024. }
  2025. if (mlx4_is_master(dev)) {
  2026. priv->mfunc.master.slave_state =
  2027. kzalloc(dev->num_slaves *
  2028. sizeof(struct mlx4_slave_state), GFP_KERNEL);
  2029. if (!priv->mfunc.master.slave_state)
  2030. goto err_comm;
  2031. priv->mfunc.master.vf_admin =
  2032. kzalloc(dev->num_slaves *
  2033. sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
  2034. if (!priv->mfunc.master.vf_admin)
  2035. goto err_comm_admin;
  2036. priv->mfunc.master.vf_oper =
  2037. kzalloc(dev->num_slaves *
  2038. sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
  2039. if (!priv->mfunc.master.vf_oper)
  2040. goto err_comm_oper;
  2041. for (i = 0; i < dev->num_slaves; ++i) {
  2042. s_state = &priv->mfunc.master.slave_state[i];
  2043. s_state->last_cmd = MLX4_COMM_CMD_RESET;
  2044. for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
  2045. s_state->event_eq[j].eqn = -1;
  2046. __raw_writel((__force u32) 0,
  2047. &priv->mfunc.comm[i].slave_write);
  2048. __raw_writel((__force u32) 0,
  2049. &priv->mfunc.comm[i].slave_read);
  2050. mmiowb();
  2051. for (port = 1; port <= MLX4_MAX_PORTS; port++) {
  2052. s_state->vlan_filter[port] =
  2053. kzalloc(sizeof(struct mlx4_vlan_fltr),
  2054. GFP_KERNEL);
  2055. if (!s_state->vlan_filter[port]) {
  2056. if (--port)
  2057. kfree(s_state->vlan_filter[port]);
  2058. goto err_slaves;
  2059. }
  2060. INIT_LIST_HEAD(&s_state->mcast_filters[port]);
  2061. priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
  2062. priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
  2063. priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
  2064. priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
  2065. }
  2066. spin_lock_init(&s_state->lock);
  2067. }
  2068. memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
  2069. priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
  2070. INIT_WORK(&priv->mfunc.master.comm_work,
  2071. mlx4_master_comm_channel);
  2072. INIT_WORK(&priv->mfunc.master.slave_event_work,
  2073. mlx4_gen_slave_eqe);
  2074. INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
  2075. mlx4_master_handle_slave_flr);
  2076. spin_lock_init(&priv->mfunc.master.slave_state_lock);
  2077. spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
  2078. priv->mfunc.master.comm_wq =
  2079. create_singlethread_workqueue("mlx4_comm");
  2080. if (!priv->mfunc.master.comm_wq)
  2081. goto err_slaves;
  2082. if (mlx4_init_resource_tracker(dev))
  2083. goto err_thread;
  2084. } else {
  2085. err = sync_toggles(dev);
  2086. if (err) {
  2087. mlx4_err(dev, "Couldn't sync toggles\n");
  2088. goto err_comm;
  2089. }
  2090. }
  2091. return 0;
  2092. err_thread:
  2093. flush_workqueue(priv->mfunc.master.comm_wq);
  2094. destroy_workqueue(priv->mfunc.master.comm_wq);
  2095. err_slaves:
  2096. while (--i) {
  2097. for (port = 1; port <= MLX4_MAX_PORTS; port++)
  2098. kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
  2099. }
  2100. kfree(priv->mfunc.master.vf_oper);
  2101. err_comm_oper:
  2102. kfree(priv->mfunc.master.vf_admin);
  2103. err_comm_admin:
  2104. kfree(priv->mfunc.master.slave_state);
  2105. err_comm:
  2106. iounmap(priv->mfunc.comm);
  2107. err_vhcr:
  2108. dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
  2109. priv->mfunc.vhcr,
  2110. priv->mfunc.vhcr_dma);
  2111. priv->mfunc.vhcr = NULL;
  2112. return -ENOMEM;
  2113. }
  2114. int mlx4_cmd_init(struct mlx4_dev *dev)
  2115. {
  2116. struct mlx4_priv *priv = mlx4_priv(dev);
  2117. int flags = 0;
  2118. if (!priv->cmd.initialized) {
  2119. mutex_init(&priv->cmd.slave_cmd_mutex);
  2120. sema_init(&priv->cmd.poll_sem, 1);
  2121. priv->cmd.use_events = 0;
  2122. priv->cmd.toggle = 1;
  2123. priv->cmd.initialized = 1;
  2124. flags |= MLX4_CMD_CLEANUP_STRUCT;
  2125. }
  2126. if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
  2127. priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
  2128. 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
  2129. if (!priv->cmd.hcr) {
  2130. mlx4_err(dev, "Couldn't map command register\n");
  2131. goto err;
  2132. }
  2133. flags |= MLX4_CMD_CLEANUP_HCR;
  2134. }
  2135. if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
  2136. priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
  2137. PAGE_SIZE,
  2138. &priv->mfunc.vhcr_dma,
  2139. GFP_KERNEL);
  2140. if (!priv->mfunc.vhcr)
  2141. goto err;
  2142. flags |= MLX4_CMD_CLEANUP_VHCR;
  2143. }
  2144. if (!priv->cmd.pool) {
  2145. priv->cmd.pool = pci_pool_create("mlx4_cmd",
  2146. dev->persist->pdev,
  2147. MLX4_MAILBOX_SIZE,
  2148. MLX4_MAILBOX_SIZE, 0);
  2149. if (!priv->cmd.pool)
  2150. goto err;
  2151. flags |= MLX4_CMD_CLEANUP_POOL;
  2152. }
  2153. return 0;
  2154. err:
  2155. mlx4_cmd_cleanup(dev, flags);
  2156. return -ENOMEM;
  2157. }
  2158. void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
  2159. {
  2160. struct mlx4_priv *priv = mlx4_priv(dev);
  2161. int slave;
  2162. u32 slave_read;
  2163. /* Report an internal error event to all
  2164. * communication channels.
  2165. */
  2166. for (slave = 0; slave < dev->num_slaves; slave++) {
  2167. slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
  2168. slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
  2169. __raw_writel((__force u32)cpu_to_be32(slave_read),
  2170. &priv->mfunc.comm[slave].slave_read);
  2171. /* Make sure that our comm channel write doesn't
  2172. * get mixed in with writes from another CPU.
  2173. */
  2174. mmiowb();
  2175. }
  2176. }
  2177. void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
  2178. {
  2179. struct mlx4_priv *priv = mlx4_priv(dev);
  2180. int i, port;
  2181. if (mlx4_is_master(dev)) {
  2182. flush_workqueue(priv->mfunc.master.comm_wq);
  2183. destroy_workqueue(priv->mfunc.master.comm_wq);
  2184. for (i = 0; i < dev->num_slaves; i++) {
  2185. for (port = 1; port <= MLX4_MAX_PORTS; port++)
  2186. kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
  2187. }
  2188. kfree(priv->mfunc.master.slave_state);
  2189. kfree(priv->mfunc.master.vf_admin);
  2190. kfree(priv->mfunc.master.vf_oper);
  2191. dev->num_slaves = 0;
  2192. }
  2193. iounmap(priv->mfunc.comm);
  2194. }
  2195. void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
  2196. {
  2197. struct mlx4_priv *priv = mlx4_priv(dev);
  2198. if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
  2199. pci_pool_destroy(priv->cmd.pool);
  2200. priv->cmd.pool = NULL;
  2201. }
  2202. if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
  2203. (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
  2204. iounmap(priv->cmd.hcr);
  2205. priv->cmd.hcr = NULL;
  2206. }
  2207. if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
  2208. (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
  2209. dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
  2210. priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
  2211. priv->mfunc.vhcr = NULL;
  2212. }
  2213. if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
  2214. priv->cmd.initialized = 0;
  2215. }
  2216. /*
  2217. * Switch to using events to issue FW commands (can only be called
  2218. * after event queue for command events has been initialized).
  2219. */
  2220. int mlx4_cmd_use_events(struct mlx4_dev *dev)
  2221. {
  2222. struct mlx4_priv *priv = mlx4_priv(dev);
  2223. int i;
  2224. int err = 0;
  2225. priv->cmd.context = kmalloc(priv->cmd.max_cmds *
  2226. sizeof (struct mlx4_cmd_context),
  2227. GFP_KERNEL);
  2228. if (!priv->cmd.context)
  2229. return -ENOMEM;
  2230. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  2231. priv->cmd.context[i].token = i;
  2232. priv->cmd.context[i].next = i + 1;
  2233. /* To support fatal error flow, initialize all
  2234. * cmd contexts to allow simulating completions
  2235. * with complete() at any time.
  2236. */
  2237. init_completion(&priv->cmd.context[i].done);
  2238. }
  2239. priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
  2240. priv->cmd.free_head = 0;
  2241. sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
  2242. spin_lock_init(&priv->cmd.context_lock);
  2243. for (priv->cmd.token_mask = 1;
  2244. priv->cmd.token_mask < priv->cmd.max_cmds;
  2245. priv->cmd.token_mask <<= 1)
  2246. ; /* nothing */
  2247. --priv->cmd.token_mask;
  2248. down(&priv->cmd.poll_sem);
  2249. priv->cmd.use_events = 1;
  2250. return err;
  2251. }
  2252. /*
  2253. * Switch back to polling (used when shutting down the device)
  2254. */
  2255. void mlx4_cmd_use_polling(struct mlx4_dev *dev)
  2256. {
  2257. struct mlx4_priv *priv = mlx4_priv(dev);
  2258. int i;
  2259. priv->cmd.use_events = 0;
  2260. for (i = 0; i < priv->cmd.max_cmds; ++i)
  2261. down(&priv->cmd.event_sem);
  2262. kfree(priv->cmd.context);
  2263. up(&priv->cmd.poll_sem);
  2264. }
  2265. struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
  2266. {
  2267. struct mlx4_cmd_mailbox *mailbox;
  2268. mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
  2269. if (!mailbox)
  2270. return ERR_PTR(-ENOMEM);
  2271. mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
  2272. &mailbox->dma);
  2273. if (!mailbox->buf) {
  2274. kfree(mailbox);
  2275. return ERR_PTR(-ENOMEM);
  2276. }
  2277. memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
  2278. return mailbox;
  2279. }
  2280. EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
  2281. void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
  2282. struct mlx4_cmd_mailbox *mailbox)
  2283. {
  2284. if (!mailbox)
  2285. return;
  2286. pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
  2287. kfree(mailbox);
  2288. }
  2289. EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
  2290. u32 mlx4_comm_get_version(void)
  2291. {
  2292. return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
  2293. }
  2294. static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
  2295. {
  2296. if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
  2297. mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
  2298. vf, dev->persist->num_vfs);
  2299. return -EINVAL;
  2300. }
  2301. return vf+1;
  2302. }
  2303. int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
  2304. {
  2305. if (slave < 1 || slave > dev->persist->num_vfs) {
  2306. mlx4_err(dev,
  2307. "Bad slave number:%d (number of activated slaves: %lu)\n",
  2308. slave, dev->num_slaves);
  2309. return -EINVAL;
  2310. }
  2311. return slave - 1;
  2312. }
  2313. void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
  2314. {
  2315. struct mlx4_priv *priv = mlx4_priv(dev);
  2316. struct mlx4_cmd_context *context;
  2317. int i;
  2318. spin_lock(&priv->cmd.context_lock);
  2319. if (priv->cmd.context) {
  2320. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  2321. context = &priv->cmd.context[i];
  2322. context->fw_status = CMD_STAT_INTERNAL_ERR;
  2323. context->result =
  2324. mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
  2325. complete(&context->done);
  2326. }
  2327. }
  2328. spin_unlock(&priv->cmd.context_lock);
  2329. }
  2330. struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
  2331. {
  2332. struct mlx4_active_ports actv_ports;
  2333. int vf;
  2334. bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
  2335. if (slave == 0) {
  2336. bitmap_fill(actv_ports.ports, dev->caps.num_ports);
  2337. return actv_ports;
  2338. }
  2339. vf = mlx4_get_vf_indx(dev, slave);
  2340. if (vf < 0)
  2341. return actv_ports;
  2342. bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
  2343. min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
  2344. dev->caps.num_ports));
  2345. return actv_ports;
  2346. }
  2347. EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
  2348. int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
  2349. {
  2350. unsigned n;
  2351. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
  2352. unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
  2353. if (port <= 0 || port > m)
  2354. return -EINVAL;
  2355. n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
  2356. if (port <= n)
  2357. port = n + 1;
  2358. return port;
  2359. }
  2360. EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
  2361. int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
  2362. {
  2363. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
  2364. if (test_bit(port - 1, actv_ports.ports))
  2365. return port -
  2366. find_first_bit(actv_ports.ports, dev->caps.num_ports);
  2367. return -1;
  2368. }
  2369. EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
  2370. struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
  2371. int port)
  2372. {
  2373. unsigned i;
  2374. struct mlx4_slaves_pport slaves_pport;
  2375. bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
  2376. if (port <= 0 || port > dev->caps.num_ports)
  2377. return slaves_pport;
  2378. for (i = 0; i < dev->persist->num_vfs + 1; i++) {
  2379. struct mlx4_active_ports actv_ports =
  2380. mlx4_get_active_ports(dev, i);
  2381. if (test_bit(port - 1, actv_ports.ports))
  2382. set_bit(i, slaves_pport.slaves);
  2383. }
  2384. return slaves_pport;
  2385. }
  2386. EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
  2387. struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
  2388. struct mlx4_dev *dev,
  2389. const struct mlx4_active_ports *crit_ports)
  2390. {
  2391. unsigned i;
  2392. struct mlx4_slaves_pport slaves_pport;
  2393. bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
  2394. for (i = 0; i < dev->persist->num_vfs + 1; i++) {
  2395. struct mlx4_active_ports actv_ports =
  2396. mlx4_get_active_ports(dev, i);
  2397. if (bitmap_equal(crit_ports->ports, actv_ports.ports,
  2398. dev->caps.num_ports))
  2399. set_bit(i, slaves_pport.slaves);
  2400. }
  2401. return slaves_pport;
  2402. }
  2403. EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
  2404. static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
  2405. {
  2406. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
  2407. int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
  2408. + 1;
  2409. int max_port = min_port +
  2410. bitmap_weight(actv_ports.ports, dev->caps.num_ports);
  2411. if (port < min_port)
  2412. port = min_port;
  2413. else if (port >= max_port)
  2414. port = max_port - 1;
  2415. return port;
  2416. }
  2417. int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
  2418. {
  2419. struct mlx4_priv *priv = mlx4_priv(dev);
  2420. struct mlx4_vport_state *s_info;
  2421. int slave;
  2422. if (!mlx4_is_master(dev))
  2423. return -EPROTONOSUPPORT;
  2424. slave = mlx4_get_slave_indx(dev, vf);
  2425. if (slave < 0)
  2426. return -EINVAL;
  2427. port = mlx4_slaves_closest_port(dev, slave, port);
  2428. s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
  2429. s_info->mac = mac;
  2430. mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
  2431. vf, port, s_info->mac);
  2432. return 0;
  2433. }
  2434. EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
  2435. int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
  2436. {
  2437. struct mlx4_priv *priv = mlx4_priv(dev);
  2438. struct mlx4_vport_state *vf_admin;
  2439. int slave;
  2440. if ((!mlx4_is_master(dev)) ||
  2441. !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
  2442. return -EPROTONOSUPPORT;
  2443. if ((vlan > 4095) || (qos > 7))
  2444. return -EINVAL;
  2445. slave = mlx4_get_slave_indx(dev, vf);
  2446. if (slave < 0)
  2447. return -EINVAL;
  2448. port = mlx4_slaves_closest_port(dev, slave, port);
  2449. vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
  2450. if ((0 == vlan) && (0 == qos))
  2451. vf_admin->default_vlan = MLX4_VGT;
  2452. else
  2453. vf_admin->default_vlan = vlan;
  2454. vf_admin->default_qos = qos;
  2455. if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
  2456. mlx4_info(dev,
  2457. "updating vf %d port %d config will take effect on next VF restart\n",
  2458. vf, port);
  2459. return 0;
  2460. }
  2461. EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
  2462. /* mlx4_get_slave_default_vlan -
  2463. * return true if VST ( default vlan)
  2464. * if VST, will return vlan & qos (if not NULL)
  2465. */
  2466. bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
  2467. u16 *vlan, u8 *qos)
  2468. {
  2469. struct mlx4_vport_oper_state *vp_oper;
  2470. struct mlx4_priv *priv;
  2471. priv = mlx4_priv(dev);
  2472. port = mlx4_slaves_closest_port(dev, slave, port);
  2473. vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
  2474. if (MLX4_VGT != vp_oper->state.default_vlan) {
  2475. if (vlan)
  2476. *vlan = vp_oper->state.default_vlan;
  2477. if (qos)
  2478. *qos = vp_oper->state.default_qos;
  2479. return true;
  2480. }
  2481. return false;
  2482. }
  2483. EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
  2484. int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
  2485. {
  2486. struct mlx4_priv *priv = mlx4_priv(dev);
  2487. struct mlx4_vport_state *s_info;
  2488. int slave;
  2489. if ((!mlx4_is_master(dev)) ||
  2490. !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
  2491. return -EPROTONOSUPPORT;
  2492. slave = mlx4_get_slave_indx(dev, vf);
  2493. if (slave < 0)
  2494. return -EINVAL;
  2495. port = mlx4_slaves_closest_port(dev, slave, port);
  2496. s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
  2497. s_info->spoofchk = setting;
  2498. return 0;
  2499. }
  2500. EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
  2501. int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
  2502. {
  2503. struct mlx4_priv *priv = mlx4_priv(dev);
  2504. struct mlx4_vport_state *s_info;
  2505. int slave;
  2506. if (!mlx4_is_master(dev))
  2507. return -EPROTONOSUPPORT;
  2508. slave = mlx4_get_slave_indx(dev, vf);
  2509. if (slave < 0)
  2510. return -EINVAL;
  2511. s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
  2512. ivf->vf = vf;
  2513. /* need to convert it to a func */
  2514. ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
  2515. ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
  2516. ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
  2517. ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
  2518. ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
  2519. ivf->mac[5] = ((s_info->mac) & 0xff);
  2520. ivf->vlan = s_info->default_vlan;
  2521. ivf->qos = s_info->default_qos;
  2522. ivf->max_tx_rate = s_info->tx_rate;
  2523. ivf->min_tx_rate = 0;
  2524. ivf->spoofchk = s_info->spoofchk;
  2525. ivf->linkstate = s_info->link_state;
  2526. return 0;
  2527. }
  2528. EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
  2529. int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
  2530. {
  2531. struct mlx4_priv *priv = mlx4_priv(dev);
  2532. struct mlx4_vport_state *s_info;
  2533. int slave;
  2534. u8 link_stat_event;
  2535. slave = mlx4_get_slave_indx(dev, vf);
  2536. if (slave < 0)
  2537. return -EINVAL;
  2538. port = mlx4_slaves_closest_port(dev, slave, port);
  2539. switch (link_state) {
  2540. case IFLA_VF_LINK_STATE_AUTO:
  2541. /* get current link state */
  2542. if (!priv->sense.do_sense_port[port])
  2543. link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
  2544. else
  2545. link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
  2546. break;
  2547. case IFLA_VF_LINK_STATE_ENABLE:
  2548. link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
  2549. break;
  2550. case IFLA_VF_LINK_STATE_DISABLE:
  2551. link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
  2552. break;
  2553. default:
  2554. mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
  2555. link_state, slave, port);
  2556. return -EINVAL;
  2557. };
  2558. s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
  2559. s_info->link_state = link_state;
  2560. /* send event */
  2561. mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
  2562. if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
  2563. mlx4_dbg(dev,
  2564. "updating vf %d port %d no link state HW enforcment\n",
  2565. vf, port);
  2566. return 0;
  2567. }
  2568. EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
  2569. int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
  2570. {
  2571. struct mlx4_priv *priv = mlx4_priv(dev);
  2572. if (slave < 1 || slave >= dev->num_slaves ||
  2573. port < 1 || port > MLX4_MAX_PORTS)
  2574. return 0;
  2575. return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
  2576. MLX4_VF_SMI_ENABLED;
  2577. }
  2578. EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
  2579. int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
  2580. {
  2581. struct mlx4_priv *priv = mlx4_priv(dev);
  2582. if (slave == mlx4_master_func_num(dev))
  2583. return 1;
  2584. if (slave < 1 || slave >= dev->num_slaves ||
  2585. port < 1 || port > MLX4_MAX_PORTS)
  2586. return 0;
  2587. return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
  2588. MLX4_VF_SMI_ENABLED;
  2589. }
  2590. EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
  2591. int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
  2592. int enabled)
  2593. {
  2594. struct mlx4_priv *priv = mlx4_priv(dev);
  2595. if (slave == mlx4_master_func_num(dev))
  2596. return 0;
  2597. if (slave < 1 || slave >= dev->num_slaves ||
  2598. port < 1 || port > MLX4_MAX_PORTS ||
  2599. enabled < 0 || enabled > 1)
  2600. return -EINVAL;
  2601. priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
  2602. return 0;
  2603. }
  2604. EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);