octeon_main.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. /*! \file octeon_main.h
  19. * \brief Host Driver: This file is included by all host driver source files
  20. * to include common definitions.
  21. */
  22. #ifndef _OCTEON_MAIN_H_
  23. #define _OCTEON_MAIN_H_
  24. #include <linux/sched/signal.h>
  25. #if BITS_PER_LONG == 32
  26. #define CVM_CAST64(v) ((long long)(v))
  27. #elif BITS_PER_LONG == 64
  28. #define CVM_CAST64(v) ((long long)(long)(v))
  29. #else
  30. #error "Unknown system architecture"
  31. #endif
  32. #define DRV_NAME "LiquidIO"
  33. /** This structure is used by NIC driver to store information required
  34. * to free the sk_buff when the packet has been fetched by Octeon.
  35. * Bytes offset below assume worst-case of a 64-bit system.
  36. */
  37. struct octnet_buf_free_info {
  38. /** Bytes 1-8. Pointer to network device private structure. */
  39. struct lio *lio;
  40. /** Bytes 9-16. Pointer to sk_buff. */
  41. struct sk_buff *skb;
  42. /** Bytes 17-24. Pointer to gather list. */
  43. struct octnic_gather *g;
  44. /** Bytes 25-32. Physical address of skb->data or gather list. */
  45. u64 dptr;
  46. /** Bytes 33-47. Piggybacked soft command, if any */
  47. struct octeon_soft_command *sc;
  48. };
  49. /* BQL-related functions */
  50. void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
  51. void octeon_update_tx_completion_counters(void *buf, int reqtype,
  52. unsigned int *pkts_compl,
  53. unsigned int *bytes_compl);
  54. void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
  55. unsigned int bytes_compl);
  56. void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
  57. /** Swap 8B blocks */
  58. static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
  59. {
  60. while (blocks) {
  61. cpu_to_be64s(data);
  62. blocks--;
  63. data++;
  64. }
  65. }
  66. /**
  67. * \brief unmaps a PCI BAR
  68. * @param oct Pointer to Octeon device
  69. * @param baridx bar index
  70. */
  71. static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
  72. {
  73. dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
  74. baridx);
  75. if (oct->mmio[baridx].done)
  76. iounmap(oct->mmio[baridx].hw_addr);
  77. if (oct->mmio[baridx].start)
  78. pci_release_region(oct->pci_dev, baridx * 2);
  79. }
  80. /**
  81. * \brief maps a PCI BAR
  82. * @param oct Pointer to Octeon device
  83. * @param baridx bar index
  84. * @param max_map_len maximum length of mapped memory
  85. */
  86. static inline int octeon_map_pci_barx(struct octeon_device *oct,
  87. int baridx, int max_map_len)
  88. {
  89. u32 mapped_len = 0;
  90. if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
  91. dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
  92. baridx);
  93. return 1;
  94. }
  95. oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
  96. oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
  97. mapped_len = oct->mmio[baridx].len;
  98. if (!mapped_len)
  99. goto err_release_region;
  100. if (max_map_len && (mapped_len > max_map_len))
  101. mapped_len = max_map_len;
  102. oct->mmio[baridx].hw_addr =
  103. ioremap(oct->mmio[baridx].start, mapped_len);
  104. oct->mmio[baridx].mapped_len = mapped_len;
  105. dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
  106. baridx, oct->mmio[baridx].start, mapped_len,
  107. oct->mmio[baridx].len);
  108. if (!oct->mmio[baridx].hw_addr) {
  109. dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
  110. baridx);
  111. goto err_release_region;
  112. }
  113. oct->mmio[baridx].done = 1;
  114. return 0;
  115. err_release_region:
  116. pci_release_region(oct->pci_dev, baridx * 2);
  117. return 1;
  118. }
  119. static inline void *
  120. cnnic_numa_alloc_aligned_dma(u32 size,
  121. u32 *alloc_size,
  122. size_t *orig_ptr,
  123. int numa_node)
  124. {
  125. int retries = 0;
  126. void *ptr = NULL;
  127. #define OCTEON_MAX_ALLOC_RETRIES 1
  128. do {
  129. struct page *page = NULL;
  130. page = alloc_pages_node(numa_node,
  131. GFP_KERNEL,
  132. get_order(size));
  133. if (!page)
  134. page = alloc_pages(GFP_KERNEL,
  135. get_order(size));
  136. ptr = (void *)page_address(page);
  137. if ((unsigned long)ptr & 0x07) {
  138. __free_pages(page, get_order(size));
  139. ptr = NULL;
  140. /* Increment the size required if the first
  141. * attempt failed.
  142. */
  143. if (!retries)
  144. size += 7;
  145. }
  146. retries++;
  147. } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
  148. *alloc_size = size;
  149. *orig_ptr = (unsigned long)ptr;
  150. if ((unsigned long)ptr & 0x07)
  151. ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
  152. return ptr;
  153. }
  154. #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
  155. free_pages(orig_ptr, get_order(size))
  156. static inline int
  157. sleep_cond(wait_queue_head_t *wait_queue, int *condition)
  158. {
  159. int errno = 0;
  160. wait_queue_t we;
  161. init_waitqueue_entry(&we, current);
  162. add_wait_queue(wait_queue, &we);
  163. while (!(READ_ONCE(*condition))) {
  164. set_current_state(TASK_INTERRUPTIBLE);
  165. if (signal_pending(current)) {
  166. errno = -EINTR;
  167. goto out;
  168. }
  169. schedule();
  170. }
  171. out:
  172. set_current_state(TASK_RUNNING);
  173. remove_wait_queue(wait_queue, &we);
  174. return errno;
  175. }
  176. /* Gives up the CPU for a timeout period.
  177. * Check that the condition is not true before we go to sleep for a
  178. * timeout period.
  179. */
  180. static inline void
  181. sleep_timeout_cond(wait_queue_head_t *wait_queue,
  182. int *condition,
  183. int timeout)
  184. {
  185. wait_queue_t we;
  186. init_waitqueue_entry(&we, current);
  187. add_wait_queue(wait_queue, &we);
  188. set_current_state(TASK_INTERRUPTIBLE);
  189. if (!(*condition))
  190. schedule_timeout(timeout);
  191. set_current_state(TASK_RUNNING);
  192. remove_wait_queue(wait_queue, &we);
  193. }
  194. #ifndef ROUNDUP4
  195. #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
  196. #endif
  197. #ifndef ROUNDUP8
  198. #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
  199. #endif
  200. #ifndef ROUNDUP16
  201. #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
  202. #endif
  203. #ifndef ROUNDUP128
  204. #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
  205. #endif
  206. #endif /* _OCTEON_MAIN_H_ */