ring_buffer.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. /*
  2. *
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Authors:
  19. * Haiyang Zhang <haiyangz@microsoft.com>
  20. * Hank Janssen <hjanssen@microsoft.com>
  21. * K. Y. Srinivasan <kys@microsoft.com>
  22. *
  23. */
  24. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25. #include <linux/kernel.h>
  26. #include <linux/mm.h>
  27. #include <linux/hyperv.h>
  28. #include <linux/uio.h>
  29. #include "hyperv_vmbus.h"
  30. void hv_begin_read(struct hv_ring_buffer_info *rbi)
  31. {
  32. rbi->ring_buffer->interrupt_mask = 1;
  33. mb();
  34. }
  35. u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  36. {
  37. u32 read;
  38. u32 write;
  39. rbi->ring_buffer->interrupt_mask = 0;
  40. mb();
  41. /*
  42. * Now check to see if the ring buffer is still empty.
  43. * If it is not, we raced and we need to process new
  44. * incoming messages.
  45. */
  46. hv_get_ringbuffer_availbytes(rbi, &read, &write);
  47. return read;
  48. }
  49. /*
  50. * When we write to the ring buffer, check if the host needs to
  51. * be signaled. Here is the details of this protocol:
  52. *
  53. * 1. The host guarantees that while it is draining the
  54. * ring buffer, it will set the interrupt_mask to
  55. * indicate it does not need to be interrupted when
  56. * new data is placed.
  57. *
  58. * 2. The host guarantees that it will completely drain
  59. * the ring buffer before exiting the read loop. Further,
  60. * once the ring buffer is empty, it will clear the
  61. * interrupt_mask and re-check to see if new data has
  62. * arrived.
  63. */
  64. static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  65. {
  66. mb();
  67. if (rbi->ring_buffer->interrupt_mask)
  68. return false;
  69. /* check interrupt_mask before read_index */
  70. rmb();
  71. /*
  72. * This is the only case we need to signal when the
  73. * ring transitions from being empty to non-empty.
  74. */
  75. if (old_write == rbi->ring_buffer->read_index)
  76. return true;
  77. return false;
  78. }
  79. /*
  80. * To optimize the flow management on the send-side,
  81. * when the sender is blocked because of lack of
  82. * sufficient space in the ring buffer, potential the
  83. * consumer of the ring buffer can signal the producer.
  84. * This is controlled by the following parameters:
  85. *
  86. * 1. pending_send_sz: This is the size in bytes that the
  87. * producer is trying to send.
  88. * 2. The feature bit feat_pending_send_sz set to indicate if
  89. * the consumer of the ring will signal when the ring
  90. * state transitions from being full to a state where
  91. * there is room for the producer to send the pending packet.
  92. */
  93. static bool hv_need_to_signal_on_read(u32 old_rd,
  94. struct hv_ring_buffer_info *rbi)
  95. {
  96. u32 prev_write_sz;
  97. u32 cur_write_sz;
  98. u32 r_size;
  99. u32 write_loc = rbi->ring_buffer->write_index;
  100. u32 read_loc = rbi->ring_buffer->read_index;
  101. u32 pending_sz = rbi->ring_buffer->pending_send_sz;
  102. /*
  103. * If the other end is not blocked on write don't bother.
  104. */
  105. if (pending_sz == 0)
  106. return false;
  107. r_size = rbi->ring_datasize;
  108. cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
  109. read_loc - write_loc;
  110. prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
  111. old_rd - write_loc;
  112. if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
  113. return true;
  114. return false;
  115. }
  116. /*
  117. * hv_get_next_write_location()
  118. *
  119. * Get the next write location for the specified ring buffer
  120. *
  121. */
  122. static inline u32
  123. hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
  124. {
  125. u32 next = ring_info->ring_buffer->write_index;
  126. return next;
  127. }
  128. /*
  129. * hv_set_next_write_location()
  130. *
  131. * Set the next write location for the specified ring buffer
  132. *
  133. */
  134. static inline void
  135. hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
  136. u32 next_write_location)
  137. {
  138. ring_info->ring_buffer->write_index = next_write_location;
  139. }
  140. /*
  141. * hv_get_next_read_location()
  142. *
  143. * Get the next read location for the specified ring buffer
  144. */
  145. static inline u32
  146. hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
  147. {
  148. u32 next = ring_info->ring_buffer->read_index;
  149. return next;
  150. }
  151. /*
  152. * hv_get_next_readlocation_withoffset()
  153. *
  154. * Get the next read location + offset for the specified ring buffer.
  155. * This allows the caller to skip
  156. */
  157. static inline u32
  158. hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
  159. u32 offset)
  160. {
  161. u32 next = ring_info->ring_buffer->read_index;
  162. next += offset;
  163. next %= ring_info->ring_datasize;
  164. return next;
  165. }
  166. /*
  167. *
  168. * hv_set_next_read_location()
  169. *
  170. * Set the next read location for the specified ring buffer
  171. *
  172. */
  173. static inline void
  174. hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
  175. u32 next_read_location)
  176. {
  177. ring_info->ring_buffer->read_index = next_read_location;
  178. }
  179. /*
  180. *
  181. * hv_get_ring_buffer()
  182. *
  183. * Get the start of the ring buffer
  184. */
  185. static inline void *
  186. hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
  187. {
  188. return (void *)ring_info->ring_buffer->buffer;
  189. }
  190. /*
  191. *
  192. * hv_get_ring_buffersize()
  193. *
  194. * Get the size of the ring buffer
  195. */
  196. static inline u32
  197. hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
  198. {
  199. return ring_info->ring_datasize;
  200. }
  201. /*
  202. *
  203. * hv_get_ring_bufferindices()
  204. *
  205. * Get the read and write indices as u64 of the specified ring buffer
  206. *
  207. */
  208. static inline u64
  209. hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
  210. {
  211. return (u64)ring_info->ring_buffer->write_index << 32;
  212. }
  213. /*
  214. *
  215. * hv_copyfrom_ringbuffer()
  216. *
  217. * Helper routine to copy to source from ring buffer.
  218. * Assume there is enough room. Handles wrap-around in src case only!!
  219. *
  220. */
  221. static u32 hv_copyfrom_ringbuffer(
  222. struct hv_ring_buffer_info *ring_info,
  223. void *dest,
  224. u32 destlen,
  225. u32 start_read_offset)
  226. {
  227. void *ring_buffer = hv_get_ring_buffer(ring_info);
  228. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  229. u32 frag_len;
  230. /* wrap-around detected at the src */
  231. if (destlen > ring_buffer_size - start_read_offset) {
  232. frag_len = ring_buffer_size - start_read_offset;
  233. memcpy(dest, ring_buffer + start_read_offset, frag_len);
  234. memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
  235. } else
  236. memcpy(dest, ring_buffer + start_read_offset, destlen);
  237. start_read_offset += destlen;
  238. start_read_offset %= ring_buffer_size;
  239. return start_read_offset;
  240. }
  241. /*
  242. *
  243. * hv_copyto_ringbuffer()
  244. *
  245. * Helper routine to copy from source to ring buffer.
  246. * Assume there is enough room. Handles wrap-around in dest case only!!
  247. *
  248. */
  249. static u32 hv_copyto_ringbuffer(
  250. struct hv_ring_buffer_info *ring_info,
  251. u32 start_write_offset,
  252. void *src,
  253. u32 srclen)
  254. {
  255. void *ring_buffer = hv_get_ring_buffer(ring_info);
  256. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  257. u32 frag_len;
  258. /* wrap-around detected! */
  259. if (srclen > ring_buffer_size - start_write_offset) {
  260. frag_len = ring_buffer_size - start_write_offset;
  261. memcpy(ring_buffer + start_write_offset, src, frag_len);
  262. memcpy(ring_buffer, src + frag_len, srclen - frag_len);
  263. } else
  264. memcpy(ring_buffer + start_write_offset, src, srclen);
  265. start_write_offset += srclen;
  266. start_write_offset %= ring_buffer_size;
  267. return start_write_offset;
  268. }
  269. /*
  270. *
  271. * hv_ringbuffer_get_debuginfo()
  272. *
  273. * Get various debug metrics for the specified ring buffer
  274. *
  275. */
  276. void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
  277. struct hv_ring_buffer_debug_info *debug_info)
  278. {
  279. u32 bytes_avail_towrite;
  280. u32 bytes_avail_toread;
  281. if (ring_info->ring_buffer) {
  282. hv_get_ringbuffer_availbytes(ring_info,
  283. &bytes_avail_toread,
  284. &bytes_avail_towrite);
  285. debug_info->bytes_avail_toread = bytes_avail_toread;
  286. debug_info->bytes_avail_towrite = bytes_avail_towrite;
  287. debug_info->current_read_index =
  288. ring_info->ring_buffer->read_index;
  289. debug_info->current_write_index =
  290. ring_info->ring_buffer->write_index;
  291. debug_info->current_interrupt_mask =
  292. ring_info->ring_buffer->interrupt_mask;
  293. }
  294. }
  295. /*
  296. *
  297. * hv_ringbuffer_init()
  298. *
  299. *Initialize the ring buffer
  300. *
  301. */
  302. int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
  303. void *buffer, u32 buflen)
  304. {
  305. if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
  306. return -EINVAL;
  307. memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
  308. ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
  309. ring_info->ring_buffer->read_index =
  310. ring_info->ring_buffer->write_index = 0;
  311. /*
  312. * Set the feature bit for enabling flow control.
  313. */
  314. ring_info->ring_buffer->feature_bits.value = 1;
  315. ring_info->ring_size = buflen;
  316. ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
  317. spin_lock_init(&ring_info->ring_lock);
  318. return 0;
  319. }
  320. /*
  321. *
  322. * hv_ringbuffer_cleanup()
  323. *
  324. * Cleanup the ring buffer
  325. *
  326. */
  327. void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
  328. {
  329. }
  330. /*
  331. *
  332. * hv_ringbuffer_write()
  333. *
  334. * Write to the ring buffer
  335. *
  336. */
  337. int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
  338. struct kvec *kv_list, u32 kv_count, bool *signal)
  339. {
  340. int i = 0;
  341. u32 bytes_avail_towrite;
  342. u32 bytes_avail_toread;
  343. u32 totalbytes_towrite = 0;
  344. u32 next_write_location;
  345. u32 old_write;
  346. u64 prev_indices = 0;
  347. unsigned long flags;
  348. for (i = 0; i < kv_count; i++)
  349. totalbytes_towrite += kv_list[i].iov_len;
  350. totalbytes_towrite += sizeof(u64);
  351. spin_lock_irqsave(&outring_info->ring_lock, flags);
  352. hv_get_ringbuffer_availbytes(outring_info,
  353. &bytes_avail_toread,
  354. &bytes_avail_towrite);
  355. /* If there is only room for the packet, assume it is full. */
  356. /* Otherwise, the next time around, we think the ring buffer */
  357. /* is empty since the read index == write index */
  358. if (bytes_avail_towrite <= totalbytes_towrite) {
  359. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  360. return -EAGAIN;
  361. }
  362. /* Write to the ring buffer */
  363. next_write_location = hv_get_next_write_location(outring_info);
  364. old_write = next_write_location;
  365. for (i = 0; i < kv_count; i++) {
  366. next_write_location = hv_copyto_ringbuffer(outring_info,
  367. next_write_location,
  368. kv_list[i].iov_base,
  369. kv_list[i].iov_len);
  370. }
  371. /* Set previous packet start */
  372. prev_indices = hv_get_ring_bufferindices(outring_info);
  373. next_write_location = hv_copyto_ringbuffer(outring_info,
  374. next_write_location,
  375. &prev_indices,
  376. sizeof(u64));
  377. /* Issue a full memory barrier before updating the write index */
  378. mb();
  379. /* Now, update the write location */
  380. hv_set_next_write_location(outring_info, next_write_location);
  381. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  382. *signal = hv_need_to_signal(old_write, outring_info);
  383. return 0;
  384. }
  385. /*
  386. *
  387. * hv_ringbuffer_peek()
  388. *
  389. * Read without advancing the read index
  390. *
  391. */
  392. int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
  393. void *Buffer, u32 buflen)
  394. {
  395. u32 bytes_avail_towrite;
  396. u32 bytes_avail_toread;
  397. u32 next_read_location = 0;
  398. unsigned long flags;
  399. spin_lock_irqsave(&Inring_info->ring_lock, flags);
  400. hv_get_ringbuffer_availbytes(Inring_info,
  401. &bytes_avail_toread,
  402. &bytes_avail_towrite);
  403. /* Make sure there is something to read */
  404. if (bytes_avail_toread < buflen) {
  405. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  406. return -EAGAIN;
  407. }
  408. /* Convert to byte offset */
  409. next_read_location = hv_get_next_read_location(Inring_info);
  410. next_read_location = hv_copyfrom_ringbuffer(Inring_info,
  411. Buffer,
  412. buflen,
  413. next_read_location);
  414. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  415. return 0;
  416. }
  417. /*
  418. *
  419. * hv_ringbuffer_read()
  420. *
  421. * Read and advance the read index
  422. *
  423. */
  424. int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
  425. u32 buflen, u32 offset, bool *signal)
  426. {
  427. u32 bytes_avail_towrite;
  428. u32 bytes_avail_toread;
  429. u32 next_read_location = 0;
  430. u64 prev_indices = 0;
  431. unsigned long flags;
  432. u32 old_read;
  433. if (buflen <= 0)
  434. return -EINVAL;
  435. spin_lock_irqsave(&inring_info->ring_lock, flags);
  436. hv_get_ringbuffer_availbytes(inring_info,
  437. &bytes_avail_toread,
  438. &bytes_avail_towrite);
  439. old_read = bytes_avail_toread;
  440. /* Make sure there is something to read */
  441. if (bytes_avail_toread < buflen) {
  442. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  443. return -EAGAIN;
  444. }
  445. next_read_location =
  446. hv_get_next_readlocation_withoffset(inring_info, offset);
  447. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  448. buffer,
  449. buflen,
  450. next_read_location);
  451. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  452. &prev_indices,
  453. sizeof(u64),
  454. next_read_location);
  455. /* Make sure all reads are done before we update the read index since */
  456. /* the writer may start writing to the read area once the read index */
  457. /*is updated */
  458. mb();
  459. /* Update the read index */
  460. hv_set_next_read_location(inring_info, next_read_location);
  461. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  462. *signal = hv_need_to_signal_on_read(old_read, inring_info);
  463. return 0;
  464. }