mirror of https://gitee.com/openkylin/linux.git
Drivers: hv: vmbus: Give control over how the ring access is serialized
On the channel send side, many of the VMBUS device drivers explicity serialize access to the outgoing ring buffer. Give more control to the VMBUS device drivers in terms how to serialize accesss to the outgoing ring buffer. The default behavior will be to aquire the ring lock to preserve the current behavior. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
3eba9a77d5
commit
fe760e4d64
|
@ -639,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
|
|||
u64 aligned_data = 0;
|
||||
int ret;
|
||||
bool signal = false;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
int num_vecs = ((bufferlen != 0) ? 3 : 1);
|
||||
|
||||
|
||||
|
@ -658,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
|
|||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
|
||||
&signal);
|
||||
&signal, lock);
|
||||
|
||||
/*
|
||||
* Signalling the host is conditional on many factors:
|
||||
|
@ -738,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
|||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool signal = false;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
|
||||
if (pagecount > MAX_PAGE_BUFFER_COUNT)
|
||||
return -EINVAL;
|
||||
|
@ -774,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
|||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
|
||||
&signal, lock);
|
||||
|
||||
/*
|
||||
* Signalling the host is conditional on many factors:
|
||||
|
@ -837,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
|||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool signal = false;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
|
||||
packetlen = desc_size + bufferlen;
|
||||
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
|
||||
|
@ -856,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
|||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
|
||||
&signal, lock);
|
||||
|
||||
if (ret == 0 && signal)
|
||||
vmbus_setevent(channel);
|
||||
|
@ -881,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
|||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool signal = false;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
|
||||
multi_pagebuffer->len);
|
||||
|
||||
|
@ -919,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
|||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
|
||||
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
|
||||
&signal, lock);
|
||||
|
||||
if (ret == 0 && signal)
|
||||
vmbus_setevent(channel);
|
||||
|
|
|
@ -259,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
|
|||
return NULL;
|
||||
|
||||
channel->id = atomic_inc_return(&chan_num);
|
||||
channel->acquire_ring_lock = true;
|
||||
spin_lock_init(&channel->inbound_lock);
|
||||
spin_lock_init(&channel->lock);
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
|
|||
|
||||
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
|
||||
struct kvec *kv_list,
|
||||
u32 kv_count, bool *signal);
|
||||
u32 kv_count, bool *signal, bool lock);
|
||||
|
||||
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
||||
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
||||
|
|
|
@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
|||
|
||||
/* Write to the ring buffer. */
|
||||
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
||||
struct kvec *kv_list, u32 kv_count, bool *signal)
|
||||
struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
|
||||
{
|
||||
int i = 0;
|
||||
u32 bytes_avail_towrite;
|
||||
|
@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||
u32 next_write_location;
|
||||
u32 old_write;
|
||||
u64 prev_indices = 0;
|
||||
unsigned long flags;
|
||||
unsigned long flags = 0;
|
||||
|
||||
for (i = 0; i < kv_count; i++)
|
||||
totalbytes_towrite += kv_list[i].iov_len;
|
||||
|
||||
totalbytes_towrite += sizeof(u64);
|
||||
|
||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||
if (lock)
|
||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||
|
||||
hv_get_ringbuffer_availbytes(outring_info,
|
||||
&bytes_avail_toread,
|
||||
|
@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||
* is empty since the read index == write index.
|
||||
*/
|
||||
if (bytes_avail_towrite <= totalbytes_towrite) {
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
if (lock)
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
|
|||
hv_set_next_write_location(outring_info, next_write_location);
|
||||
|
||||
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
if (lock)
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
|
||||
*signal = hv_need_to_signal(old_write, outring_info);
|
||||
return 0;
|
||||
|
|
|
@ -811,8 +811,24 @@ struct vmbus_channel {
|
|||
* signaling control.
|
||||
*/
|
||||
enum hv_signal_policy signal_policy;
|
||||
/*
|
||||
* On the channel send side, many of the VMBUS
|
||||
* device drivers explicity serialize access to the
|
||||
* outgoing ring buffer. Give more control to the
|
||||
* VMBUS device drivers in terms how to serialize
|
||||
* accesss to the outgoing ring buffer.
|
||||
* The default behavior will be to aquire the
|
||||
* ring lock to preserve the current behavior.
|
||||
*/
|
||||
bool acquire_ring_lock;
|
||||
|
||||
};
|
||||
|
||||
static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
|
||||
{
|
||||
c->acquire_ring_lock = state;
|
||||
}
|
||||
|
||||
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
|
||||
{
|
||||
return !!(c->offermsg.offer.chn_flags &
|
||||
|
|
Loading…
Reference in New Issue