greybus: Revert "greybus: don't use spin_lock_irq()"

This reverts commit 469fbe5da0229edcb42aa08bef8e10feaa37e6d7.

It isn't correct in places.

Reported-by: Gjorgji Rosikopulos <rosikopulos_gjorgji@projectara.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2016-06-23 14:20:02 -07:00
parent 1211915127
commit 19cdabcf0b
3 changed files with 33 additions and 45 deletions

View File

@ -150,7 +150,6 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
unsigned long flags) unsigned long flags)
{ {
struct gb_connection *connection; struct gb_connection *connection;
unsigned long irqflags;
int ret; int ret;
mutex_lock(&gb_connection_mutex); mutex_lock(&gb_connection_mutex);
@ -201,7 +200,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
gb_connection_init_name(connection); gb_connection_init_name(connection);
spin_lock_irqsave(&gb_connections_lock, irqflags); spin_lock_irq(&gb_connections_lock);
list_add(&connection->hd_links, &hd->connections); list_add(&connection->hd_links, &hd->connections);
if (bundle) if (bundle)
@ -209,7 +208,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
else else
INIT_LIST_HEAD(&connection->bundle_links); INIT_LIST_HEAD(&connection->bundle_links);
spin_unlock_irqrestore(&gb_connections_lock, irqflags); spin_unlock_irq(&gb_connections_lock);
mutex_unlock(&gb_connection_mutex); mutex_unlock(&gb_connection_mutex);
@ -572,7 +571,7 @@ static int gb_connection_ping(struct gb_connection *connection)
* DISCONNECTING. * DISCONNECTING.
*/ */
static void gb_connection_cancel_operations(struct gb_connection *connection, static void gb_connection_cancel_operations(struct gb_connection *connection,
int errno, unsigned long flags) int errno)
__must_hold(&connection->lock) __must_hold(&connection->lock)
{ {
struct gb_operation *operation; struct gb_operation *operation;
@ -581,7 +580,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
operation = list_last_entry(&connection->operations, operation = list_last_entry(&connection->operations,
struct gb_operation, links); struct gb_operation, links);
gb_operation_get(operation); gb_operation_get(operation);
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
if (gb_operation_is_incoming(operation)) if (gb_operation_is_incoming(operation))
gb_operation_cancel_incoming(operation, errno); gb_operation_cancel_incoming(operation, errno);
@ -590,7 +589,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
gb_operation_put(operation); gb_operation_put(operation);
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
} }
} }
@ -601,7 +600,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
*/ */
static void static void
gb_connection_flush_incoming_operations(struct gb_connection *connection, gb_connection_flush_incoming_operations(struct gb_connection *connection,
int errno, unsigned long flags) int errno)
__must_hold(&connection->lock) __must_hold(&connection->lock)
{ {
struct gb_operation *operation; struct gb_operation *operation;
@ -621,13 +620,13 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
if (!incoming) if (!incoming)
break; break;
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
/* FIXME: flush, not cancel? */ /* FIXME: flush, not cancel? */
gb_operation_cancel_incoming(operation, errno); gb_operation_cancel_incoming(operation, errno);
gb_operation_put(operation); gb_operation_put(operation);
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
} }
} }
@ -643,7 +642,6 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
*/ */
static int _gb_connection_enable(struct gb_connection *connection, bool rx) static int _gb_connection_enable(struct gb_connection *connection, bool rx)
{ {
unsigned long flags;
int ret; int ret;
/* Handle ENABLED_TX -> ENABLED transitions. */ /* Handle ENABLED_TX -> ENABLED transitions. */
@ -651,9 +649,9 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
if (!(connection->handler && rx)) if (!(connection->handler && rx))
return 0; return 0;
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_ENABLED; connection->state = GB_CONNECTION_STATE_ENABLED;
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
return 0; return 0;
} }
@ -670,12 +668,12 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
if (ret) if (ret)
goto err_svc_connection_destroy; goto err_svc_connection_destroy;
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
if (connection->handler && rx) if (connection->handler && rx)
connection->state = GB_CONNECTION_STATE_ENABLED; connection->state = GB_CONNECTION_STATE_ENABLED;
else else
connection->state = GB_CONNECTION_STATE_ENABLED_TX; connection->state = GB_CONNECTION_STATE_ENABLED_TX;
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
ret = gb_connection_control_connected(connection); ret = gb_connection_control_connected(connection);
if (ret) if (ret)
@ -686,10 +684,10 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
err_control_disconnecting: err_control_disconnecting:
gb_connection_control_disconnecting(connection); gb_connection_control_disconnecting(connection);
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING; connection->state = GB_CONNECTION_STATE_DISCONNECTING;
gb_connection_cancel_operations(connection, -ESHUTDOWN, flags); gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
/* Transmit queue should already be empty. */ /* Transmit queue should already be empty. */
gb_connection_hd_cport_flush(connection); gb_connection_hd_cport_flush(connection);
@ -755,18 +753,16 @@ EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
void gb_connection_disable_rx(struct gb_connection *connection) void gb_connection_disable_rx(struct gb_connection *connection)
{ {
unsigned long flags;
mutex_lock(&connection->mutex); mutex_lock(&connection->mutex);
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
if (connection->state != GB_CONNECTION_STATE_ENABLED) { if (connection->state != GB_CONNECTION_STATE_ENABLED) {
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
goto out_unlock; goto out_unlock;
} }
connection->state = GB_CONNECTION_STATE_ENABLED_TX; connection->state = GB_CONNECTION_STATE_ENABLED_TX;
gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, flags); gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
trace_gb_connection_disable(connection); trace_gb_connection_disable(connection);
@ -789,8 +785,6 @@ void gb_connection_mode_switch_complete(struct gb_connection *connection)
void gb_connection_disable(struct gb_connection *connection) void gb_connection_disable(struct gb_connection *connection)
{ {
unsigned long flags;
mutex_lock(&connection->mutex); mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED) if (connection->state == GB_CONNECTION_STATE_DISABLED)
@ -800,10 +794,10 @@ void gb_connection_disable(struct gb_connection *connection)
gb_connection_control_disconnecting(connection); gb_connection_control_disconnecting(connection);
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING; connection->state = GB_CONNECTION_STATE_DISCONNECTING;
gb_connection_cancel_operations(connection, -ESHUTDOWN, flags); gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection); gb_connection_hd_cport_flush(connection);
@ -830,8 +824,6 @@ EXPORT_SYMBOL_GPL(gb_connection_disable);
/* Disable a connection without communicating with the remote end. */ /* Disable a connection without communicating with the remote end. */
void gb_connection_disable_forced(struct gb_connection *connection) void gb_connection_disable_forced(struct gb_connection *connection)
{ {
unsigned long flags;
mutex_lock(&connection->mutex); mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED) if (connection->state == GB_CONNECTION_STATE_DISABLED)
@ -839,10 +831,10 @@ void gb_connection_disable_forced(struct gb_connection *connection)
trace_gb_connection_disable(connection); trace_gb_connection_disable(connection);
spin_lock_irqsave(&connection->lock, flags); spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISABLED; connection->state = GB_CONNECTION_STATE_DISABLED;
gb_connection_cancel_operations(connection, -ESHUTDOWN, flags); gb_connection_cancel_operations(connection, -ESHUTDOWN);
spin_unlock_irqrestore(&connection->lock, flags); spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection); gb_connection_hd_cport_flush(connection);
gb_connection_hd_cport_features_disable(connection); gb_connection_hd_cport_features_disable(connection);
@ -857,8 +849,6 @@ EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
/* Caller must have disabled the connection before destroying it. */ /* Caller must have disabled the connection before destroying it. */
void gb_connection_destroy(struct gb_connection *connection) void gb_connection_destroy(struct gb_connection *connection)
{ {
unsigned long flags;
if (!connection) if (!connection)
return; return;
@ -867,10 +857,10 @@ void gb_connection_destroy(struct gb_connection *connection)
mutex_lock(&gb_connection_mutex); mutex_lock(&gb_connection_mutex);
spin_lock_irqsave(&gb_connections_lock, flags); spin_lock_irq(&gb_connections_lock);
list_del(&connection->bundle_links); list_del(&connection->bundle_links);
list_del(&connection->hd_links); list_del(&connection->hd_links);
spin_unlock_irqrestore(&gb_connections_lock, flags); spin_unlock_irq(&gb_connections_lock);
destroy_workqueue(connection->wq); destroy_workqueue(connection->wq);

View File

@ -496,12 +496,11 @@ static void message_cancel(struct gb_message *message)
struct gb_host_device *hd = message->operation->connection->hd; struct gb_host_device *hd = message->operation->connection->hd;
struct es2_ap_dev *es2 = hd_to_es2(hd); struct es2_ap_dev *es2 = hd_to_es2(hd);
struct urb *urb; struct urb *urb;
unsigned long flags;
int i; int i;
might_sleep(); might_sleep();
spin_lock_irqsave(&es2->cport_out_urb_lock, flags); spin_lock_irq(&es2->cport_out_urb_lock);
urb = message->hcpriv; urb = message->hcpriv;
/* Prevent dynamically allocated urb from being deallocated. */ /* Prevent dynamically allocated urb from being deallocated. */
@ -514,14 +513,14 @@ static void message_cancel(struct gb_message *message)
break; break;
} }
} }
spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); spin_unlock_irq(&es2->cport_out_urb_lock);
usb_kill_urb(urb); usb_kill_urb(urb);
if (i < NUM_CPORT_OUT_URB) { if (i < NUM_CPORT_OUT_URB) {
spin_lock_irqsave(&es2->cport_out_urb_lock, flags); spin_lock_irq(&es2->cport_out_urb_lock);
es2->cport_out_urb_cancelled[i] = false; es2->cport_out_urb_cancelled[i] = false;
spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); spin_unlock_irq(&es2->cport_out_urb_lock);
} }
usb_free_urb(urb); usb_free_urb(urb);

View File

@ -674,7 +674,6 @@ static int set_serial_info(struct gb_tty *gb_tty,
static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg) static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
{ {
int retval = 0; int retval = 0;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
struct async_icount old; struct async_icount old;
struct async_icount new; struct async_icount new;
@ -683,11 +682,11 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
return -EINVAL; return -EINVAL;
do { do {
spin_lock_irqsave(&gb_tty->read_lock, flags); spin_lock_irq(&gb_tty->read_lock);
old = gb_tty->oldcount; old = gb_tty->oldcount;
new = gb_tty->iocount; new = gb_tty->iocount;
gb_tty->oldcount = new; gb_tty->oldcount = new;
spin_unlock_irqrestore(&gb_tty->read_lock, flags); spin_unlock_irq(&gb_tty->read_lock);
if ((arg & TIOCM_DSR) && (old.dsr != new.dsr)) if ((arg & TIOCM_DSR) && (old.dsr != new.dsr))
break; break;