vnic_dev.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/if_ether.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_wq.h"
  29. #include "vnic_stats.h"
  30. #include "enic.h"
  31. #define VNIC_MAX_RES_HDR_SIZE \
  32. (sizeof(struct vnic_resource_header) + \
  33. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  34. #define VNIC_RES_STRIDE 128
  35. void *vnic_dev_priv(struct vnic_dev *vdev)
  36. {
  37. return vdev->priv;
  38. }
  39. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  40. struct vnic_dev_bar *bar, unsigned int num_bars)
  41. {
  42. struct vnic_resource_header __iomem *rh;
  43. struct mgmt_barmap_hdr __iomem *mrh;
  44. struct vnic_resource __iomem *r;
  45. u8 type;
  46. if (num_bars == 0)
  47. return -EINVAL;
  48. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  49. vdev_err(vdev, "vNIC BAR0 res hdr length error\n");
  50. return -EINVAL;
  51. }
  52. rh = bar->vaddr;
  53. mrh = bar->vaddr;
  54. if (!rh) {
  55. vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n");
  56. return -EINVAL;
  57. }
  58. /* Check for mgmt vnic in addition to normal vnic */
  59. if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
  60. (ioread32(&rh->version) != VNIC_RES_VERSION)) {
  61. if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
  62. (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
  63. vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
  64. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  65. MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
  66. ioread32(&rh->magic), ioread32(&rh->version));
  67. return -EINVAL;
  68. }
  69. }
  70. if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
  71. r = (struct vnic_resource __iomem *)(mrh + 1);
  72. else
  73. r = (struct vnic_resource __iomem *)(rh + 1);
  74. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  75. u8 bar_num = ioread8(&r->bar);
  76. u32 bar_offset = ioread32(&r->bar_offset);
  77. u32 count = ioread32(&r->count);
  78. u32 len;
  79. r++;
  80. if (bar_num >= num_bars)
  81. continue;
  82. if (!bar[bar_num].len || !bar[bar_num].vaddr)
  83. continue;
  84. switch (type) {
  85. case RES_TYPE_WQ:
  86. case RES_TYPE_RQ:
  87. case RES_TYPE_CQ:
  88. case RES_TYPE_INTR_CTRL:
  89. /* each count is stride bytes long */
  90. len = count * VNIC_RES_STRIDE;
  91. if (len + bar_offset > bar[bar_num].len) {
  92. vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
  93. type, bar_offset, len,
  94. bar[bar_num].len);
  95. return -EINVAL;
  96. }
  97. break;
  98. case RES_TYPE_INTR_PBA_LEGACY:
  99. case RES_TYPE_DEVCMD:
  100. case RES_TYPE_DEVCMD2:
  101. len = count;
  102. break;
  103. default:
  104. continue;
  105. }
  106. vdev->res[type].count = count;
  107. vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
  108. bar_offset;
  109. vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
  110. }
  111. return 0;
  112. }
  113. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  114. enum vnic_res_type type)
  115. {
  116. return vdev->res[type].count;
  117. }
  118. EXPORT_SYMBOL(vnic_dev_get_res_count);
  119. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  120. unsigned int index)
  121. {
  122. if (!vdev->res[type].vaddr)
  123. return NULL;
  124. switch (type) {
  125. case RES_TYPE_WQ:
  126. case RES_TYPE_RQ:
  127. case RES_TYPE_CQ:
  128. case RES_TYPE_INTR_CTRL:
  129. return (char __iomem *)vdev->res[type].vaddr +
  130. index * VNIC_RES_STRIDE;
  131. default:
  132. return (char __iomem *)vdev->res[type].vaddr;
  133. }
  134. }
  135. EXPORT_SYMBOL(vnic_dev_get_res);
  136. static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  137. unsigned int desc_count, unsigned int desc_size)
  138. {
  139. /* The base address of the desc rings must be 512 byte aligned.
  140. * Descriptor count is aligned to groups of 32 descriptors. A
  141. * count of 0 means the maximum 4096 descriptors. Descriptor
  142. * size is aligned to 16 bytes.
  143. */
  144. unsigned int count_align = 32;
  145. unsigned int desc_align = 16;
  146. ring->base_align = 512;
  147. if (desc_count == 0)
  148. desc_count = 4096;
  149. ring->desc_count = ALIGN(desc_count, count_align);
  150. ring->desc_size = ALIGN(desc_size, desc_align);
  151. ring->size = ring->desc_count * ring->desc_size;
  152. ring->size_unaligned = ring->size + ring->base_align;
  153. return ring->size_unaligned;
  154. }
  155. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  156. {
  157. memset(ring->descs, 0, ring->size);
  158. }
  159. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  160. unsigned int desc_count, unsigned int desc_size)
  161. {
  162. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  163. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  164. ring->size_unaligned,
  165. &ring->base_addr_unaligned);
  166. if (!ring->descs_unaligned) {
  167. vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
  168. (int)ring->size);
  169. return -ENOMEM;
  170. }
  171. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  172. ring->base_align);
  173. ring->descs = (u8 *)ring->descs_unaligned +
  174. (ring->base_addr - ring->base_addr_unaligned);
  175. vnic_dev_clear_desc_ring(ring);
  176. ring->desc_avail = ring->desc_count - 1;
  177. return 0;
  178. }
  179. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  180. {
  181. if (ring->descs) {
  182. pci_free_consistent(vdev->pdev,
  183. ring->size_unaligned,
  184. ring->descs_unaligned,
  185. ring->base_addr_unaligned);
  186. ring->descs = NULL;
  187. }
  188. }
  189. static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  190. int wait)
  191. {
  192. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  193. unsigned int i;
  194. int delay;
  195. u32 status;
  196. int err;
  197. status = ioread32(&devcmd->status);
  198. if (status == 0xFFFFFFFF) {
  199. /* PCI-e target device is gone */
  200. return -ENODEV;
  201. }
  202. if (status & STAT_BUSY) {
  203. vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd));
  204. return -EBUSY;
  205. }
  206. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  207. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  208. writeq(vdev->args[i], &devcmd->args[i]);
  209. wmb();
  210. }
  211. iowrite32(cmd, &devcmd->cmd);
  212. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  213. return 0;
  214. for (delay = 0; delay < wait; delay++) {
  215. udelay(100);
  216. status = ioread32(&devcmd->status);
  217. if (status == 0xFFFFFFFF) {
  218. /* PCI-e target device is gone */
  219. return -ENODEV;
  220. }
  221. if (!(status & STAT_BUSY)) {
  222. if (status & STAT_ERROR) {
  223. err = (int)readq(&devcmd->args[0]);
  224. if (err == ERR_EINVAL &&
  225. cmd == CMD_CAPABILITY)
  226. return -err;
  227. if (err != ERR_ECMDUNKNOWN ||
  228. cmd != CMD_CAPABILITY)
  229. vdev_neterr(vdev, "Error %d devcmd %d\n",
  230. err, _CMD_N(cmd));
  231. return -err;
  232. }
  233. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  234. rmb();
  235. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  236. vdev->args[i] = readq(&devcmd->args[i]);
  237. }
  238. return 0;
  239. }
  240. }
  241. vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd));
  242. return -ETIMEDOUT;
  243. }
  244. static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  245. int wait)
  246. {
  247. struct devcmd2_controller *dc2c = vdev->devcmd2;
  248. struct devcmd2_result *result;
  249. u8 color;
  250. unsigned int i;
  251. int delay, err;
  252. u32 fetch_index, new_posted;
  253. u32 posted = dc2c->posted;
  254. fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
  255. if (fetch_index == 0xFFFFFFFF)
  256. return -ENODEV;
  257. new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
  258. if (new_posted == fetch_index) {
  259. vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
  260. _CMD_N(cmd), fetch_index, posted);
  261. return -EBUSY;
  262. }
  263. dc2c->cmd_ring[posted].cmd = cmd;
  264. dc2c->cmd_ring[posted].flags = 0;
  265. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  266. dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
  267. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
  268. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  269. dc2c->cmd_ring[posted].args[i] = vdev->args[i];
  270. /* Adding write memory barrier prevents compiler and/or CPU reordering,
  271. * thus avoiding descriptor posting before descriptor is initialized.
  272. * Otherwise, hardware can read stale descriptor fields.
  273. */
  274. wmb();
  275. iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
  276. dc2c->posted = new_posted;
  277. if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
  278. return 0;
  279. result = dc2c->result + dc2c->next_result;
  280. color = dc2c->color;
  281. dc2c->next_result++;
  282. if (dc2c->next_result == dc2c->result_size) {
  283. dc2c->next_result = 0;
  284. dc2c->color = dc2c->color ? 0 : 1;
  285. }
  286. for (delay = 0; delay < wait; delay++) {
  287. if (result->color == color) {
  288. if (result->error) {
  289. err = result->error;
  290. if (err != ERR_ECMDUNKNOWN ||
  291. cmd != CMD_CAPABILITY)
  292. vdev_neterr(vdev, "Error %d devcmd %d\n",
  293. err, _CMD_N(cmd));
  294. return -err;
  295. }
  296. if (_CMD_DIR(cmd) & _CMD_DIR_READ)
  297. for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
  298. vdev->args[i] = result->results[i];
  299. return 0;
  300. }
  301. udelay(100);
  302. }
  303. vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd));
  304. return -ETIMEDOUT;
  305. }
  306. static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
  307. {
  308. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  309. if (!vdev->devcmd)
  310. return -ENODEV;
  311. vdev->devcmd_rtn = _vnic_dev_cmd;
  312. return 0;
  313. }
  314. static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
  315. {
  316. int err;
  317. unsigned int fetch_index;
  318. if (vdev->devcmd2)
  319. return 0;
  320. vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
  321. if (!vdev->devcmd2)
  322. return -ENOMEM;
  323. vdev->devcmd2->color = 1;
  324. vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
  325. err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
  326. DEVCMD2_DESC_SIZE);
  327. if (err)
  328. goto err_free_devcmd2;
  329. fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
  330. if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
  331. vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n");
  332. err = -ENODEV;
  333. goto err_free_wq;
  334. }
  335. enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
  336. 0);
  337. vdev->devcmd2->posted = fetch_index;
  338. vnic_wq_enable(&vdev->devcmd2->wq);
  339. err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
  340. DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
  341. if (err)
  342. goto err_disable_wq;
  343. vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
  344. vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
  345. vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
  346. vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
  347. VNIC_PADDR_TARGET;
  348. vdev->args[1] = DEVCMD2_RING_SIZE;
  349. err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
  350. if (err)
  351. goto err_free_desc_ring;
  352. vdev->devcmd_rtn = _vnic_dev_cmd2;
  353. return 0;
  354. err_free_desc_ring:
  355. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  356. err_disable_wq:
  357. vnic_wq_disable(&vdev->devcmd2->wq);
  358. err_free_wq:
  359. vnic_wq_free(&vdev->devcmd2->wq);
  360. err_free_devcmd2:
  361. kfree(vdev->devcmd2);
  362. vdev->devcmd2 = NULL;
  363. return err;
  364. }
  365. static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
  366. {
  367. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  368. vnic_wq_disable(&vdev->devcmd2->wq);
  369. vnic_wq_free(&vdev->devcmd2->wq);
  370. kfree(vdev->devcmd2);
  371. }
  372. static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
  373. enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
  374. u64 *a0, u64 *a1, int wait)
  375. {
  376. u32 status;
  377. int err;
  378. memset(vdev->args, 0, sizeof(vdev->args));
  379. vdev->args[0] = vdev->proxy_index;
  380. vdev->args[1] = cmd;
  381. vdev->args[2] = *a0;
  382. vdev->args[3] = *a1;
  383. err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
  384. if (err)
  385. return err;
  386. status = (u32)vdev->args[0];
  387. if (status & STAT_ERROR) {
  388. err = (int)vdev->args[1];
  389. if (err != ERR_ECMDUNKNOWN ||
  390. cmd != CMD_CAPABILITY)
  391. vdev_neterr(vdev, "Error %d proxy devcmd %d\n",
  392. err, _CMD_N(cmd));
  393. return err;
  394. }
  395. *a0 = vdev->args[1];
  396. *a1 = vdev->args[2];
  397. return 0;
  398. }
  399. static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
  400. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  401. {
  402. int err;
  403. vdev->args[0] = *a0;
  404. vdev->args[1] = *a1;
  405. err = vdev->devcmd_rtn(vdev, cmd, wait);
  406. *a0 = vdev->args[0];
  407. *a1 = vdev->args[1];
  408. return err;
  409. }
  410. void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
  411. {
  412. vdev->proxy = PROXY_BY_INDEX;
  413. vdev->proxy_index = index;
  414. }
  415. void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
  416. {
  417. vdev->proxy = PROXY_NONE;
  418. vdev->proxy_index = 0;
  419. }
  420. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  421. u64 *a0, u64 *a1, int wait)
  422. {
  423. memset(vdev->args, 0, sizeof(vdev->args));
  424. switch (vdev->proxy) {
  425. case PROXY_BY_INDEX:
  426. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
  427. a0, a1, wait);
  428. case PROXY_BY_BDF:
  429. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
  430. a0, a1, wait);
  431. case PROXY_NONE:
  432. default:
  433. return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
  434. }
  435. }
  436. static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
  437. {
  438. u64 a0 = (u32)cmd, a1 = 0;
  439. int wait = 1000;
  440. int err;
  441. err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
  442. return !(err || a0);
  443. }
  444. int vnic_dev_fw_info(struct vnic_dev *vdev,
  445. struct vnic_devcmd_fw_info **fw_info)
  446. {
  447. u64 a0, a1 = 0;
  448. int wait = 1000;
  449. int err = 0;
  450. if (!vdev->fw_info) {
  451. vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
  452. sizeof(struct vnic_devcmd_fw_info),
  453. &vdev->fw_info_pa);
  454. if (!vdev->fw_info)
  455. return -ENOMEM;
  456. a0 = vdev->fw_info_pa;
  457. a1 = sizeof(struct vnic_devcmd_fw_info);
  458. /* only get fw_info once and cache it */
  459. if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
  460. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
  461. &a0, &a1, wait);
  462. else
  463. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
  464. &a0, &a1, wait);
  465. }
  466. *fw_info = vdev->fw_info;
  467. return err;
  468. }
  469. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  470. void *value)
  471. {
  472. u64 a0, a1;
  473. int wait = 1000;
  474. int err;
  475. a0 = offset;
  476. a1 = size;
  477. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  478. switch (size) {
  479. case 1: *(u8 *)value = (u8)a0; break;
  480. case 2: *(u16 *)value = (u16)a0; break;
  481. case 4: *(u32 *)value = (u32)a0; break;
  482. case 8: *(u64 *)value = a0; break;
  483. default: BUG(); break;
  484. }
  485. return err;
  486. }
  487. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  488. {
  489. u64 a0, a1;
  490. int wait = 1000;
  491. if (!vdev->stats) {
  492. vdev->stats = pci_alloc_consistent(vdev->pdev,
  493. sizeof(struct vnic_stats), &vdev->stats_pa);
  494. if (!vdev->stats)
  495. return -ENOMEM;
  496. }
  497. *stats = vdev->stats;
  498. a0 = vdev->stats_pa;
  499. a1 = sizeof(struct vnic_stats);
  500. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  501. }
  502. int vnic_dev_close(struct vnic_dev *vdev)
  503. {
  504. u64 a0 = 0, a1 = 0;
  505. int wait = 1000;
  506. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  507. }
  508. int vnic_dev_enable_wait(struct vnic_dev *vdev)
  509. {
  510. u64 a0 = 0, a1 = 0;
  511. int wait = 1000;
  512. if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
  513. return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
  514. else
  515. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  516. }
  517. int vnic_dev_disable(struct vnic_dev *vdev)
  518. {
  519. u64 a0 = 0, a1 = 0;
  520. int wait = 1000;
  521. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  522. }
  523. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  524. {
  525. u64 a0 = (u32)arg, a1 = 0;
  526. int wait = 1000;
  527. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  528. }
  529. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  530. {
  531. u64 a0 = 0, a1 = 0;
  532. int wait = 1000;
  533. int err;
  534. *done = 0;
  535. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  536. if (err)
  537. return err;
  538. *done = (a0 == 0);
  539. return 0;
  540. }
  541. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  542. {
  543. u64 a0 = (u32)arg, a1 = 0;
  544. int wait = 1000;
  545. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  546. }
  547. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  548. {
  549. u64 a0 = 0, a1 = 0;
  550. int wait = 1000;
  551. int err;
  552. *done = 0;
  553. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  554. if (err)
  555. return err;
  556. *done = (a0 == 0);
  557. return 0;
  558. }
  559. int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
  560. {
  561. u64 a0 = (u32)arg, a1 = 0;
  562. int wait = 1000;
  563. int err;
  564. if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
  565. return vnic_dev_cmd(vdev, CMD_HANG_RESET,
  566. &a0, &a1, wait);
  567. } else {
  568. err = vnic_dev_soft_reset(vdev, arg);
  569. if (err)
  570. return err;
  571. return vnic_dev_init(vdev, 0);
  572. }
  573. }
  574. int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
  575. {
  576. u64 a0 = 0, a1 = 0;
  577. int wait = 1000;
  578. int err;
  579. *done = 0;
  580. if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
  581. err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
  582. &a0, &a1, wait);
  583. if (err)
  584. return err;
  585. } else {
  586. return vnic_dev_soft_reset_done(vdev, done);
  587. }
  588. *done = (a0 == 0);
  589. return 0;
  590. }
  591. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  592. {
  593. u64 a0, a1;
  594. int wait = 1000;
  595. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  596. }
  597. int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  598. {
  599. u64 a0, a1;
  600. int wait = 1000;
  601. int err, i;
  602. for (i = 0; i < ETH_ALEN; i++)
  603. mac_addr[i] = 0;
  604. err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  605. if (err)
  606. return err;
  607. for (i = 0; i < ETH_ALEN; i++)
  608. mac_addr[i] = ((u8 *)&a0)[i];
  609. return 0;
  610. }
  611. int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  612. int broadcast, int promisc, int allmulti)
  613. {
  614. u64 a0, a1 = 0;
  615. int wait = 1000;
  616. int err;
  617. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  618. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  619. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  620. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  621. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  622. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  623. if (err)
  624. vdev_neterr(vdev, "Can't set packet filter\n");
  625. return err;
  626. }
  627. int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
  628. {
  629. u64 a0 = 0, a1 = 0;
  630. int wait = 1000;
  631. int err;
  632. int i;
  633. for (i = 0; i < ETH_ALEN; i++)
  634. ((u8 *)&a0)[i] = addr[i];
  635. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  636. if (err)
  637. vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err);
  638. return err;
  639. }
  640. int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
  641. {
  642. u64 a0 = 0, a1 = 0;
  643. int wait = 1000;
  644. int err;
  645. int i;
  646. for (i = 0; i < ETH_ALEN; i++)
  647. ((u8 *)&a0)[i] = addr[i];
  648. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  649. if (err)
  650. vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err);
  651. return err;
  652. }
  653. int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
  654. u8 ig_vlan_rewrite_mode)
  655. {
  656. u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
  657. int wait = 1000;
  658. if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
  659. return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
  660. &a0, &a1, wait);
  661. else
  662. return 0;
  663. }
  664. static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
  665. void *notify_addr, dma_addr_t notify_pa, u16 intr)
  666. {
  667. u64 a0, a1;
  668. int wait = 1000;
  669. int r;
  670. memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
  671. vdev->notify = notify_addr;
  672. vdev->notify_pa = notify_pa;
  673. a0 = (u64)notify_pa;
  674. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  675. a1 += sizeof(struct vnic_devcmd_notify);
  676. r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  677. vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
  678. return r;
  679. }
  680. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  681. {
  682. void *notify_addr;
  683. dma_addr_t notify_pa;
  684. if (vdev->notify || vdev->notify_pa) {
  685. vdev_neterr(vdev, "notify block %p still allocated\n",
  686. vdev->notify);
  687. return -EINVAL;
  688. }
  689. notify_addr = pci_alloc_consistent(vdev->pdev,
  690. sizeof(struct vnic_devcmd_notify),
  691. &notify_pa);
  692. if (!notify_addr)
  693. return -ENOMEM;
  694. return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
  695. }
  696. static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
  697. {
  698. u64 a0, a1;
  699. int wait = 1000;
  700. int err;
  701. a0 = 0; /* paddr = 0 to unset notify buffer */
  702. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  703. a1 += sizeof(struct vnic_devcmd_notify);
  704. err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  705. vdev->notify = NULL;
  706. vdev->notify_pa = 0;
  707. vdev->notify_sz = 0;
  708. return err;
  709. }
  710. int vnic_dev_notify_unset(struct vnic_dev *vdev)
  711. {
  712. if (vdev->notify) {
  713. pci_free_consistent(vdev->pdev,
  714. sizeof(struct vnic_devcmd_notify),
  715. vdev->notify,
  716. vdev->notify_pa);
  717. }
  718. return vnic_dev_notify_unsetcmd(vdev);
  719. }
  720. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  721. {
  722. u32 *words;
  723. unsigned int nwords = vdev->notify_sz / 4;
  724. unsigned int i;
  725. u32 csum;
  726. if (!vdev->notify || !vdev->notify_sz)
  727. return 0;
  728. do {
  729. csum = 0;
  730. memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
  731. words = (u32 *)&vdev->notify_copy;
  732. for (i = 1; i < nwords; i++)
  733. csum += words[i];
  734. } while (csum != words[0]);
  735. return 1;
  736. }
  737. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  738. {
  739. u64 a0 = (u32)arg, a1 = 0;
  740. int wait = 1000;
  741. int r = 0;
  742. if (vnic_dev_capable(vdev, CMD_INIT))
  743. r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  744. else {
  745. vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
  746. if (a0 & CMD_INITF_DEFAULT_MAC) {
  747. /* Emulate these for old CMD_INIT_v1 which
  748. * didn't pass a0 so no CMD_INITF_*.
  749. */
  750. vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  751. vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  752. }
  753. }
  754. return r;
  755. }
  756. int vnic_dev_deinit(struct vnic_dev *vdev)
  757. {
  758. u64 a0 = 0, a1 = 0;
  759. int wait = 1000;
  760. return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
  761. }
  762. void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
  763. {
  764. /* Default: hardware intr coal timer is in units of 1.5 usecs */
  765. vdev->intr_coal_timer_info.mul = 2;
  766. vdev->intr_coal_timer_info.div = 3;
  767. vdev->intr_coal_timer_info.max_usec =
  768. vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
  769. }
  770. int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
  771. {
  772. int wait = 1000;
  773. int err;
  774. memset(vdev->args, 0, sizeof(vdev->args));
  775. if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
  776. err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
  777. else
  778. err = ERR_ECMDUNKNOWN;
  779. /* Use defaults when firmware doesn't support the devcmd at all or
  780. * supports it for only specific hardware
  781. */
  782. if ((err == ERR_ECMDUNKNOWN) ||
  783. (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
  784. vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n");
  785. vnic_dev_intr_coal_timer_info_default(vdev);
  786. return 0;
  787. }
  788. if (!err) {
  789. vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
  790. vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
  791. vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
  792. }
  793. return err;
  794. }
  795. int vnic_dev_link_status(struct vnic_dev *vdev)
  796. {
  797. if (!vnic_dev_notify_ready(vdev))
  798. return 0;
  799. return vdev->notify_copy.link_state;
  800. }
  801. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  802. {
  803. if (!vnic_dev_notify_ready(vdev))
  804. return 0;
  805. return vdev->notify_copy.port_speed;
  806. }
  807. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  808. {
  809. if (!vnic_dev_notify_ready(vdev))
  810. return 0;
  811. return vdev->notify_copy.msglvl;
  812. }
  813. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  814. {
  815. if (!vnic_dev_notify_ready(vdev))
  816. return 0;
  817. return vdev->notify_copy.mtu;
  818. }
  819. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  820. enum vnic_dev_intr_mode intr_mode)
  821. {
  822. vdev->intr_mode = intr_mode;
  823. }
  824. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  825. struct vnic_dev *vdev)
  826. {
  827. return vdev->intr_mode;
  828. }
  829. u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
  830. {
  831. return (usec * vdev->intr_coal_timer_info.mul) /
  832. vdev->intr_coal_timer_info.div;
  833. }
  834. u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
  835. {
  836. return (hw_cycles * vdev->intr_coal_timer_info.div) /
  837. vdev->intr_coal_timer_info.mul;
  838. }
  839. u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
  840. {
  841. return vdev->intr_coal_timer_info.max_usec;
  842. }
  843. void vnic_dev_unregister(struct vnic_dev *vdev)
  844. {
  845. if (vdev) {
  846. if (vdev->notify)
  847. pci_free_consistent(vdev->pdev,
  848. sizeof(struct vnic_devcmd_notify),
  849. vdev->notify,
  850. vdev->notify_pa);
  851. if (vdev->stats)
  852. pci_free_consistent(vdev->pdev,
  853. sizeof(struct vnic_stats),
  854. vdev->stats, vdev->stats_pa);
  855. if (vdev->fw_info)
  856. pci_free_consistent(vdev->pdev,
  857. sizeof(struct vnic_devcmd_fw_info),
  858. vdev->fw_info, vdev->fw_info_pa);
  859. if (vdev->devcmd2)
  860. vnic_dev_deinit_devcmd2(vdev);
  861. kfree(vdev);
  862. }
  863. }
  864. EXPORT_SYMBOL(vnic_dev_unregister);
  865. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  866. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
  867. unsigned int num_bars)
  868. {
  869. if (!vdev) {
  870. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  871. if (!vdev)
  872. return NULL;
  873. }
  874. vdev->priv = priv;
  875. vdev->pdev = pdev;
  876. if (vnic_dev_discover_res(vdev, bar, num_bars))
  877. goto err_out;
  878. return vdev;
  879. err_out:
  880. vnic_dev_unregister(vdev);
  881. return NULL;
  882. }
  883. EXPORT_SYMBOL(vnic_dev_register);
  884. struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
  885. {
  886. return vdev->pdev;
  887. }
  888. EXPORT_SYMBOL(vnic_dev_get_pdev);
  889. int vnic_devcmd_init(struct vnic_dev *vdev)
  890. {
  891. void __iomem *res;
  892. int err;
  893. res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
  894. if (res) {
  895. err = vnic_dev_init_devcmd2(vdev);
  896. if (err)
  897. vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
  898. err);
  899. else
  900. return 0;
  901. } else {
  902. vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
  903. }
  904. err = vnic_dev_init_devcmd1(vdev);
  905. if (err)
  906. vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err);
  907. return err;
  908. }
  909. int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
  910. {
  911. u64 a0, a1 = len;
  912. int wait = 1000;
  913. dma_addr_t prov_pa;
  914. void *prov_buf;
  915. int ret;
  916. prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
  917. if (!prov_buf)
  918. return -ENOMEM;
  919. memcpy(prov_buf, buf, len);
  920. a0 = prov_pa;
  921. ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
  922. pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
  923. return ret;
  924. }
  925. int vnic_dev_enable2(struct vnic_dev *vdev, int active)
  926. {
  927. u64 a0, a1 = 0;
  928. int wait = 1000;
  929. a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
  930. return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
  931. }
  932. static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  933. int *status)
  934. {
  935. u64 a0 = cmd, a1 = 0;
  936. int wait = 1000;
  937. int ret;
  938. ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
  939. if (!ret)
  940. *status = (int)a0;
  941. return ret;
  942. }
  943. int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
  944. {
  945. return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
  946. }
  947. int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
  948. {
  949. return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
  950. }
  951. int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  952. {
  953. u64 a0, a1;
  954. int wait = 1000;
  955. int i;
  956. for (i = 0; i < ETH_ALEN; i++)
  957. ((u8 *)&a0)[i] = mac_addr[i];
  958. return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
  959. }
  960. /* vnic_dev_classifier: Add/Delete classifier entries
  961. * @vdev: vdev of the device
  962. * @cmd: CLSF_ADD for Add filter
  963. * CLSF_DEL for Delete filter
  964. * @entry: In case of ADD filter, the caller passes the RQ number in this
  965. * variable.
  966. *
  967. * This function stores the filter_id returned by the firmware in the
  968. * same variable before return;
  969. *
  970. * In case of DEL filter, the caller passes the RQ number. Return
  971. * value is irrelevant.
  972. * @data: filter data
  973. */
  974. int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
  975. struct filter *data)
  976. {
  977. u64 a0, a1;
  978. int wait = 1000;
  979. dma_addr_t tlv_pa;
  980. int ret = -EINVAL;
  981. struct filter_tlv *tlv, *tlv_va;
  982. struct filter_action *action;
  983. u64 tlv_size;
  984. if (cmd == CLSF_ADD) {
  985. tlv_size = sizeof(struct filter) +
  986. sizeof(struct filter_action) +
  987. 2 * sizeof(struct filter_tlv);
  988. tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
  989. if (!tlv_va)
  990. return -ENOMEM;
  991. tlv = tlv_va;
  992. a0 = tlv_pa;
  993. a1 = tlv_size;
  994. memset(tlv, 0, tlv_size);
  995. tlv->type = CLSF_TLV_FILTER;
  996. tlv->length = sizeof(struct filter);
  997. *(struct filter *)&tlv->val = *data;
  998. tlv = (struct filter_tlv *)((char *)tlv +
  999. sizeof(struct filter_tlv) +
  1000. sizeof(struct filter));
  1001. tlv->type = CLSF_TLV_ACTION;
  1002. tlv->length = sizeof(struct filter_action);
  1003. action = (struct filter_action *)&tlv->val;
  1004. action->type = FILTER_ACTION_RQ_STEERING;
  1005. action->u.rq_idx = *entry;
  1006. ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
  1007. *entry = (u16)a0;
  1008. pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
  1009. } else if (cmd == CLSF_DEL) {
  1010. a0 = *entry;
  1011. ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
  1012. }
  1013. return ret;
  1014. }
  1015. int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
  1016. {
  1017. u64 a0 = overlay;
  1018. u64 a1 = config;
  1019. int wait = 1000;
  1020. return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
  1021. }
  1022. int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
  1023. u16 vxlan_udp_port_number)
  1024. {
  1025. u64 a1 = vxlan_udp_port_number;
  1026. u64 a0 = overlay;
  1027. int wait = 1000;
  1028. return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
  1029. }
  1030. int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
  1031. u64 *supported_versions)
  1032. {
  1033. u64 a0 = feature;
  1034. int wait = 1000;
  1035. u64 a1 = 0;
  1036. int ret;
  1037. ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
  1038. if (!ret)
  1039. *supported_versions = a0;
  1040. return ret;
  1041. }