summaryrefslogtreecommitdiff
path: root/include/linux/hyperv.h
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2016-04-03 03:59:50 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-05-01 00:00:19 +0300
commit687f32e6d9bd1d63c5e557e877809eb446f1a6e8 (patch)
tree3dd23c5be7b299f845974fe6e86eee370968caff /include/linux/hyperv.h
parent5cc472477f928fb8584eb8e08245c9cf9002d74a (diff)
downloadlinux-687f32e6d9bd1d63c5e557e877809eb446f1a6e8.tar.xz
Drivers: hv: vmbus: Move some ring buffer functions to hyperv.h
In preparation for implementing APIs for in-place consumption of VMBUS packets, movve some ring buffer functionality into hyperv.h Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/hyperv.h')
-rw-r--r--include/linux/hyperv.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 40fd608475f7..eb7c0b215ba4 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1366,4 +1366,58 @@ extern __u32 vmbus_proto_version;
int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id);
void vmbus_set_event(struct vmbus_channel *channel);
+
+/* Get the start of the ring buffer. */
+static inline void *
+hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+{
+ return (void *)ring_info->ring_buffer->buffer;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ * producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ * the consumer of the ring will signal when the ring
+ * state transitions from being full to a state where
+ * there is room for the producer to send the pending packet.
+ */
+
+static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 cur_write_sz;
+ u32 pending_sz;
+
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ virt_mb();
+
+ pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+ /* If the other end is not blocked on write don't bother. */
+ if (pending_sz == 0)
+ return false;
+
+ cur_write_sz = hv_get_bytes_to_write(rbi);
+
+ if (cur_write_sz >= pending_sz)
+ return true;
+
+ return false;
+}
+
#endif /* _HYPERV_H */