summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohan Hovold <johan@hovoldconsulting.com>2016-06-27 20:07:09 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2016-06-29 16:37:17 -0700
commita29bac623f015045385411eb9b3eced95aa96686 (patch)
treebeff19e67205209118cdaf5d6ea8ab6fc5aa7a07
parent1beb1fae808d82a409b1c6ad4575f890b946f0ca (diff)
downloadop-kernel-dev-a29bac623f015045385411eb9b3eced95aa96686.zip
op-kernel-dev-a29bac623f015045385411eb9b3eced95aa96686.tar.gz
greybus: Revert "connection: switch to using spin_lock_irqsave/spin_lock_irqrestore excluisvely"
This reverts commit 426237c515b42b9f06d9a2b1021a6d2c4d440c51. Someone decided that all use of spin_lock_irq was to be considered a bug and went on a search-and-replace type "bug-fixing" spree last week. This is however just plain wrong. Using spin_lock_irq is perfectly fine in paths were interrupts have not been disabled, and this is in fact even preferred over the lazy approach of always using spin_lock_irqsave instead of understanding the code that is being written or modified. All current uses of spin_lock_irq have already been vetted in this respect. Also note that it is only used in functions that may sleep, that is, in functions that must not be called with interrupts disabled in the first place. Signed-off-by: Johan Hovold <johan@hovoldconsulting.com> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
-rw-r--r--drivers/staging/greybus/connection.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
index 9eb177e..f261468 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/staging/greybus/connection.c
@@ -574,7 +574,7 @@ static int gb_connection_ping(struct gb_connection *connection)
* DISCONNECTING.
*/
static void gb_connection_cancel_operations(struct gb_connection *connection,
- int errno, unsigned long *flags)
+ int errno)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
@@ -583,7 +583,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
operation = list_last_entry(&connection->operations,
struct gb_operation, links);
gb_operation_get(operation);
- spin_unlock_irqrestore(&connection->lock, *flags);
+ spin_unlock_irq(&connection->lock);
if (gb_operation_is_incoming(operation))
gb_operation_cancel_incoming(operation, errno);
@@ -592,7 +592,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
gb_operation_put(operation);
- spin_lock_irqsave(&connection->lock, *flags);
+ spin_lock_irq(&connection->lock);
}
}
@@ -603,7 +603,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
*/
static void
gb_connection_flush_incoming_operations(struct gb_connection *connection,
- int errno, unsigned long *flags)
+ int errno)
__must_hold(&connection->lock)
{
struct gb_operation *operation;
@@ -623,13 +623,13 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
if (!incoming)
break;
- spin_unlock_irqrestore(&connection->lock, *flags);
+ spin_unlock_irq(&connection->lock);
/* FIXME: flush, not cancel? */
gb_operation_cancel_incoming(operation, errno);
gb_operation_put(operation);
- spin_lock_irqsave(&connection->lock, *flags);
+ spin_lock_irq(&connection->lock);
}
}
@@ -646,16 +646,15 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
{
int ret;
- unsigned long flags;
/* Handle ENABLED_TX -> ENABLED transitions. */
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
if (!(connection->handler && rx))
return 0;
- spin_lock_irqsave(&connection->lock, flags);
+ spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_ENABLED;
- spin_unlock_irqrestore(&connection->lock, flags);
+ spin_unlock_irq(&connection->lock);
return 0;
}
@@ -672,12 +671,12 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
if (ret)
goto err_svc_connection_destroy;
- spin_lock_irqsave(&connection->lock, flags);
+ spin_lock_irq(&connection->lock);
if (connection->handler && rx)
connection->state = GB_CONNECTION_STATE_ENABLED;
else
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
- spin_unlock_irqrestore(&connection->lock, flags);
+ spin_unlock_irq(&connection->lock);
ret = gb_connection_control_connected(connection);
if (ret)
@@ -688,10 +687,10 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
err_control_disconnecting:
gb_connection_control_disconnecting(connection);
- spin_lock_irqsave(&connection->lock, flags);
+ spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
- gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
- spin_unlock_irqrestore(&connection->lock, flags);
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
/* Transmit queue should already be empty. */
gb_connection_hd_cport_flush(connection);
@@ -757,18 +756,16 @@ EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
void gb_connection_disable_rx(struct gb_connection *connection)
{
- unsigned long flags;
-
mutex_lock(&connection->mutex);
- spin_lock_irqsave(&connection->lock, flags);
+ spin_lock_irq(&connection->lock);
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
- spin_unlock_irqrestore(&connection->lock, flags);
+ spin_unlock_irq(&connection->lock);
goto out_unlock;
}
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
- gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, &flags);
- spin_unlock_irqrestore(&connection->lock, flags);
+ gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
trace_gb_connection_disable(connection);
@@ -791,8 +788,6 @@ void gb_connection_mode_switch_complete(struct gb_connection *connection)
void gb_connection_disable(struct gb_connection *connection)
{
- unsigned long flags;
-
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
@@ -802,10 +797,10 @@ void gb_connection_disable(struct gb_connection *connection)
gb_connection_control_disconnecting(connection);
- spin_lock_irqsave(&connection->lock, flags);
+ spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
- gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
- spin_unlock_irqrestore(&connection->lock, flags);
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection);
@@ -832,8 +827,6 @@ EXPORT_SYMBOL_GPL(gb_connection_disable);
/* Disable a connection without communicating with the remote end. */
void gb_connection_disable_forced(struct gb_connection *connection)
{
- unsigned long flags;
-
mutex_lock(&connection->mutex);
if (connection->state == GB_CONNECTION_STATE_DISABLED)
@@ -841,10 +834,10 @@ void gb_connection_disable_forced(struct gb_connection *connection)
trace_gb_connection_disable(connection);
- spin_lock_irqsave(&connection->lock, flags);
+ spin_lock_irq(&connection->lock);
connection->state = GB_CONNECTION_STATE_DISABLED;
- gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
- spin_unlock_irqrestore(&connection->lock, flags);
+ gb_connection_cancel_operations(connection, -ESHUTDOWN);
+ spin_unlock_irq(&connection->lock);
gb_connection_hd_cport_flush(connection);
gb_connection_hd_cport_features_disable(connection);
OpenPOWER on IntegriCloud