|
@@ -112,9 +112,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
|
|
|
u32 read_loc = rbi->ring_buffer->read_index;
|
|
|
u32 pending_sz = rbi->ring_buffer->pending_send_sz;
|
|
|
|
|
|
- /*
|
|
|
- * If the other end is not blocked on write don't bother.
|
|
|
- */
|
|
|
+ /* If the other end is not blocked on write don't bother. */
|
|
|
if (pending_sz == 0)
|
|
|
return false;
|
|
|
|
|
@@ -128,12 +126,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * hv_get_next_write_location()
|
|
|
- *
|
|
|
- * Get the next write location for the specified ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Get the next write location for the specified ring buffer. */
|
|
|
static inline u32
|
|
|
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
|
|
|
{
|
|
@@ -142,12 +135,7 @@ hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
|
|
|
return next;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * hv_set_next_write_location()
|
|
|
- *
|
|
|
- * Set the next write location for the specified ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Set the next write location for the specified ring buffer. */
|
|
|
static inline void
|
|
|
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
|
|
|
u32 next_write_location)
|
|
@@ -155,11 +143,7 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
|
|
|
ring_info->ring_buffer->write_index = next_write_location;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * hv_get_next_read_location()
|
|
|
- *
|
|
|
- * Get the next read location for the specified ring buffer
|
|
|
- */
|
|
|
+/* Get the next read location for the specified ring buffer. */
|
|
|
static inline u32
|
|
|
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
|
|
|
{
|
|
@@ -169,10 +153,8 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * hv_get_next_readlocation_withoffset()
|
|
|
- *
|
|
|
* Get the next read location + offset for the specified ring buffer.
|
|
|
- * This allows the caller to skip
|
|
|
+ * This allows the caller to skip.
|
|
|
*/
|
|
|
static inline u32
|
|
|
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
|
|
@@ -186,13 +168,7 @@ hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
|
|
|
return next;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_set_next_read_location()
|
|
|
- *
|
|
|
- * Set the next read location for the specified ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Set the next read location for the specified ring buffer. */
|
|
|
static inline void
|
|
|
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
|
|
|
u32 next_read_location)
|
|
@@ -201,12 +177,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
|
|
|
}
|
|
|
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_get_ring_buffer()
|
|
|
- *
|
|
|
- * Get the start of the ring buffer
|
|
|
- */
|
|
|
+/* Get the start of the ring buffer. */
|
|
|
static inline void *
|
|
|
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
|
|
|
{
|
|
@@ -214,25 +185,14 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
|
|
|
}
|
|
|
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_get_ring_buffersize()
|
|
|
- *
|
|
|
- * Get the size of the ring buffer
|
|
|
- */
|
|
|
+/* Get the size of the ring buffer. */
|
|
|
static inline u32
|
|
|
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
|
|
|
{
|
|
|
return ring_info->ring_datasize;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_get_ring_bufferindices()
|
|
|
- *
|
|
|
- * Get the read and write indices as u64 of the specified ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Get the read and write indices as u64 of the specified ring buffer. */
|
|
|
static inline u64
|
|
|
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
|
|
|
{
|
|
@@ -240,12 +200,8 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- *
|
|
|
- * hv_copyfrom_ringbuffer()
|
|
|
- *
|
|
|
* Helper routine to copy to source from ring buffer.
|
|
|
* Assume there is enough room. Handles wrap-around in src case only!!
|
|
|
- *
|
|
|
*/
|
|
|
static u32 hv_copyfrom_ringbuffer(
|
|
|
struct hv_ring_buffer_info *ring_info,
|
|
@@ -277,12 +233,8 @@ static u32 hv_copyfrom_ringbuffer(
|
|
|
|
|
|
|
|
|
/*
|
|
|
- *
|
|
|
- * hv_copyto_ringbuffer()
|
|
|
- *
|
|
|
* Helper routine to copy from source to ring buffer.
|
|
|
* Assume there is enough room. Handles wrap-around in dest case only!!
|
|
|
- *
|
|
|
*/
|
|
|
static u32 hv_copyto_ringbuffer(
|
|
|
struct hv_ring_buffer_info *ring_info,
|
|
@@ -308,13 +260,7 @@ static u32 hv_copyto_ringbuffer(
|
|
|
return start_write_offset;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_ringbuffer_get_debuginfo()
|
|
|
- *
|
|
|
- * Get various debug metrics for the specified ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Get various debug metrics for the specified ring buffer. */
|
|
|
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
|
|
|
struct hv_ring_buffer_debug_info *debug_info)
|
|
|
{
|
|
@@ -337,13 +283,7 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_ringbuffer_init()
|
|
|
- *
|
|
|
- *Initialize the ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Initialize the ring buffer. */
|
|
|
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
|
|
void *buffer, u32 buflen)
|
|
|
{
|
|
@@ -356,9 +296,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
|
|
ring_info->ring_buffer->read_index =
|
|
|
ring_info->ring_buffer->write_index = 0;
|
|
|
|
|
|
- /*
|
|
|
- * Set the feature bit for enabling flow control.
|
|
|
- */
|
|
|
+ /* Set the feature bit for enabling flow control. */
|
|
|
ring_info->ring_buffer->feature_bits.value = 1;
|
|
|
|
|
|
ring_info->ring_size = buflen;
|
|
@@ -369,24 +307,12 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_ringbuffer_cleanup()
|
|
|
- *
|
|
|
- * Cleanup the ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Cleanup the ring buffer. */
|
|
|
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_ringbuffer_write()
|
|
|
- *
|
|
|
- * Write to the ring buffer
|
|
|
- *
|
|
|
- */
|
|
|
+/* Write to the ring buffer. */
|
|
|
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|
|
struct kvec *kv_list, u32 kv_count, bool *signal)
|
|
|
{
|
|
@@ -411,10 +337,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|
|
&bytes_avail_toread,
|
|
|
&bytes_avail_towrite);
|
|
|
|
|
|
-
|
|
|
- /* If there is only room for the packet, assume it is full. */
|
|
|
- /* Otherwise, the next time around, we think the ring buffer */
|
|
|
- /* is empty since the read index == write index */
|
|
|
+ /*
|
|
|
+ * If there is only room for the packet, assume it is full.
|
|
|
+ * Otherwise, the next time around, we think the ring buffer
|
|
|
+ * is empty since the read index == write index.
|
|
|
+ */
|
|
|
if (bytes_avail_towrite <= totalbytes_towrite) {
|
|
|
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
|
|
return -EAGAIN;
|
|
@@ -454,13 +381,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|
|
}
|
|
|
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_ringbuffer_peek()
|
|
|
- *
|
|
|
- * Read without advancing the read index
|
|
|
- *
|
|
|
- */
|
|
|
+/* Read without advancing the read index. */
|
|
|
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
|
|
|
void *Buffer, u32 buflen)
|
|
|
{
|
|
@@ -497,13 +418,7 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
|
|
|
}
|
|
|
|
|
|
|
|
|
-/*
|
|
|
- *
|
|
|
- * hv_ringbuffer_read()
|
|
|
- *
|
|
|
- * Read and advance the read index
|
|
|
- *
|
|
|
- */
|
|
|
+/* Read and advance the read index. */
|
|
|
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
|
|
|
u32 buflen, u32 offset, bool *signal)
|
|
|
{
|
|
@@ -542,9 +457,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
|
|
|
sizeof(u64),
|
|
|
next_read_location);
|
|
|
|
|
|
- /* Make sure all reads are done before we update the read index since */
|
|
|
- /* the writer may start writing to the read area once the read index */
|
|
|
- /*is updated */
|
|
|
+ /*
|
|
|
+ * Make sure all reads are done before we update the read index since
|
|
|
+ * the writer may start writing to the read area once the read index
|
|
|
+ * is updated.
|
|
|
+ */
|
|
|
mb();
|
|
|
|
|
|
/* Update the read index */
|