mirror of https://gitee.com/openkylin/linux.git
greybus: Revert "greybus: don't use spin_lock_irq()"
This reverts commit 469fbe5da0229edcb42aa08bef8e10feaa37e6d7. It isn't correct in places. Reported-by: Gjorgji Rosikopulos <rosikopulos_gjorgji@projectara.com> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
1211915127
commit
19cdabcf0b
|
@ -150,7 +150,6 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
|
|||
unsigned long flags)
|
||||
{
|
||||
struct gb_connection *connection;
|
||||
unsigned long irqflags;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&gb_connection_mutex);
|
||||
|
@ -201,7 +200,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
|
|||
|
||||
gb_connection_init_name(connection);
|
||||
|
||||
spin_lock_irqsave(&gb_connections_lock, irqflags);
|
||||
spin_lock_irq(&gb_connections_lock);
|
||||
list_add(&connection->hd_links, &hd->connections);
|
||||
|
||||
if (bundle)
|
||||
|
@ -209,7 +208,7 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
|
|||
else
|
||||
INIT_LIST_HEAD(&connection->bundle_links);
|
||||
|
||||
spin_unlock_irqrestore(&gb_connections_lock, irqflags);
|
||||
spin_unlock_irq(&gb_connections_lock);
|
||||
|
||||
mutex_unlock(&gb_connection_mutex);
|
||||
|
||||
|
@ -572,7 +571,7 @@ static int gb_connection_ping(struct gb_connection *connection)
|
|||
* DISCONNECTING.
|
||||
*/
|
||||
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
||||
int errno, unsigned long flags)
|
||||
int errno)
|
||||
__must_hold(&connection->lock)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
|
@ -581,7 +580,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|||
operation = list_last_entry(&connection->operations,
|
||||
struct gb_operation, links);
|
||||
gb_operation_get(operation);
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
if (gb_operation_is_incoming(operation))
|
||||
gb_operation_cancel_incoming(operation, errno);
|
||||
|
@ -590,7 +589,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|||
|
||||
gb_operation_put(operation);
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -601,7 +600,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|||
*/
|
||||
static void
|
||||
gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
||||
int errno, unsigned long flags)
|
||||
int errno)
|
||||
__must_hold(&connection->lock)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
|
@ -621,13 +620,13 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
|||
if (!incoming)
|
||||
break;
|
||||
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
/* FIXME: flush, not cancel? */
|
||||
gb_operation_cancel_incoming(operation, errno);
|
||||
gb_operation_put(operation);
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -643,7 +642,6 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
|||
*/
|
||||
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* Handle ENABLED_TX -> ENABLED transitions. */
|
||||
|
@ -651,9 +649,9 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|||
if (!(connection->handler && rx))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED;
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -670,12 +668,12 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|||
if (ret)
|
||||
goto err_svc_connection_destroy;
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
if (connection->handler && rx)
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED;
|
||||
else
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
ret = gb_connection_control_connected(connection);
|
||||
if (ret)
|
||||
|
@ -686,10 +684,10 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|||
err_control_disconnecting:
|
||||
gb_connection_control_disconnecting(connection);
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN, flags);
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
/* Transmit queue should already be empty. */
|
||||
gb_connection_hd_cport_flush(connection);
|
||||
|
@ -755,18 +753,16 @@ EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
|
|||
|
||||
void gb_connection_disable_rx(struct gb_connection *connection)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
||||
gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, flags);
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
trace_gb_connection_disable(connection);
|
||||
|
||||
|
@ -789,8 +785,6 @@ void gb_connection_mode_switch_complete(struct gb_connection *connection)
|
|||
|
||||
void gb_connection_disable(struct gb_connection *connection)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
||||
|
@ -800,10 +794,10 @@ void gb_connection_disable(struct gb_connection *connection)
|
|||
|
||||
gb_connection_control_disconnecting(connection);
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN, flags);
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
gb_connection_hd_cport_flush(connection);
|
||||
|
||||
|
@ -830,8 +824,6 @@ EXPORT_SYMBOL_GPL(gb_connection_disable);
|
|||
/* Disable a connection without communicating with the remote end. */
|
||||
void gb_connection_disable_forced(struct gb_connection *connection)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&connection->mutex);
|
||||
|
||||
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
||||
|
@ -839,10 +831,10 @@ void gb_connection_disable_forced(struct gb_connection *connection)
|
|||
|
||||
trace_gb_connection_disable(connection);
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
spin_lock_irq(&connection->lock);
|
||||
connection->state = GB_CONNECTION_STATE_DISABLED;
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN, flags);
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
gb_connection_hd_cport_flush(connection);
|
||||
gb_connection_hd_cport_features_disable(connection);
|
||||
|
@ -857,8 +849,6 @@ EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
|
|||
/* Caller must have disabled the connection before destroying it. */
|
||||
void gb_connection_destroy(struct gb_connection *connection)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!connection)
|
||||
return;
|
||||
|
||||
|
@ -867,10 +857,10 @@ void gb_connection_destroy(struct gb_connection *connection)
|
|||
|
||||
mutex_lock(&gb_connection_mutex);
|
||||
|
||||
spin_lock_irqsave(&gb_connections_lock, flags);
|
||||
spin_lock_irq(&gb_connections_lock);
|
||||
list_del(&connection->bundle_links);
|
||||
list_del(&connection->hd_links);
|
||||
spin_unlock_irqrestore(&gb_connections_lock, flags);
|
||||
spin_unlock_irq(&gb_connections_lock);
|
||||
|
||||
destroy_workqueue(connection->wq);
|
||||
|
||||
|
|
|
@ -496,12 +496,11 @@ static void message_cancel(struct gb_message *message)
|
|||
struct gb_host_device *hd = message->operation->connection->hd;
|
||||
struct es2_ap_dev *es2 = hd_to_es2(hd);
|
||||
struct urb *urb;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
might_sleep();
|
||||
|
||||
spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
|
||||
spin_lock_irq(&es2->cport_out_urb_lock);
|
||||
urb = message->hcpriv;
|
||||
|
||||
/* Prevent dynamically allocated urb from being deallocated. */
|
||||
|
@ -514,14 +513,14 @@ static void message_cancel(struct gb_message *message)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
|
||||
spin_unlock_irq(&es2->cport_out_urb_lock);
|
||||
|
||||
usb_kill_urb(urb);
|
||||
|
||||
if (i < NUM_CPORT_OUT_URB) {
|
||||
spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
|
||||
spin_lock_irq(&es2->cport_out_urb_lock);
|
||||
es2->cport_out_urb_cancelled[i] = false;
|
||||
spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
|
||||
spin_unlock_irq(&es2->cport_out_urb_lock);
|
||||
}
|
||||
|
||||
usb_free_urb(urb);
|
||||
|
|
|
@ -674,7 +674,6 @@ static int set_serial_info(struct gb_tty *gb_tty,
|
|||
static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned long flags;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
struct async_icount old;
|
||||
struct async_icount new;
|
||||
|
@ -683,11 +682,11 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
|
|||
return -EINVAL;
|
||||
|
||||
do {
|
||||
spin_lock_irqsave(&gb_tty->read_lock, flags);
|
||||
spin_lock_irq(&gb_tty->read_lock);
|
||||
old = gb_tty->oldcount;
|
||||
new = gb_tty->iocount;
|
||||
gb_tty->oldcount = new;
|
||||
spin_unlock_irqrestore(&gb_tty->read_lock, flags);
|
||||
spin_unlock_irq(&gb_tty->read_lock);
|
||||
|
||||
if ((arg & TIOCM_DSR) && (old.dsr != new.dsr))
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue