|
@@ -572,7 +572,7 @@ static int gb_connection_ping(struct gb_connection *connection)
|
|
* DISCONNECTING.
|
|
* DISCONNECTING.
|
|
*/
|
|
*/
|
|
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
- int errno)
|
|
|
|
|
|
+ int errno, unsigned long *flags)
|
|
__must_hold(&connection->lock)
|
|
__must_hold(&connection->lock)
|
|
{
|
|
{
|
|
struct gb_operation *operation;
|
|
struct gb_operation *operation;
|
|
@@ -581,7 +581,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
operation = list_last_entry(&connection->operations,
|
|
operation = list_last_entry(&connection->operations,
|
|
struct gb_operation, links);
|
|
struct gb_operation, links);
|
|
gb_operation_get(operation);
|
|
gb_operation_get(operation);
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, *flags);
|
|
|
|
|
|
if (gb_operation_is_incoming(operation))
|
|
if (gb_operation_is_incoming(operation))
|
|
gb_operation_cancel_incoming(operation, errno);
|
|
gb_operation_cancel_incoming(operation, errno);
|
|
@@ -590,7 +590,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
|
|
|
|
gb_operation_put(operation);
|
|
gb_operation_put(operation);
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, *flags);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -601,7 +601,7 @@ static void gb_connection_cancel_operations(struct gb_connection *connection,
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
|
gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
|
- int errno)
|
|
|
|
|
|
+ int errno, unsigned long *flags)
|
|
__must_hold(&connection->lock)
|
|
__must_hold(&connection->lock)
|
|
{
|
|
{
|
|
struct gb_operation *operation;
|
|
struct gb_operation *operation;
|
|
@@ -621,13 +621,13 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
|
if (!incoming)
|
|
if (!incoming)
|
|
break;
|
|
break;
|
|
|
|
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, *flags);
|
|
|
|
|
|
/* FIXME: flush, not cancel? */
|
|
/* FIXME: flush, not cancel? */
|
|
gb_operation_cancel_incoming(operation, errno);
|
|
gb_operation_cancel_incoming(operation, errno);
|
|
gb_operation_put(operation);
|
|
gb_operation_put(operation);
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, *flags);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -644,15 +644,16 @@ gb_connection_flush_incoming_operations(struct gb_connection *connection,
|
|
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|
static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/* Handle ENABLED_TX -> ENABLED transitions. */
|
|
/* Handle ENABLED_TX -> ENABLED transitions. */
|
|
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
|
|
if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
|
|
if (!(connection->handler && rx))
|
|
if (!(connection->handler && rx))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, flags);
|
|
connection->state = GB_CONNECTION_STATE_ENABLED;
|
|
connection->state = GB_CONNECTION_STATE_ENABLED;
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -669,12 +670,12 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|
if (ret)
|
|
if (ret)
|
|
goto err_svc_connection_destroy;
|
|
goto err_svc_connection_destroy;
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, flags);
|
|
if (connection->handler && rx)
|
|
if (connection->handler && rx)
|
|
connection->state = GB_CONNECTION_STATE_ENABLED;
|
|
connection->state = GB_CONNECTION_STATE_ENABLED;
|
|
else
|
|
else
|
|
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
|
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
|
|
|
|
ret = gb_connection_control_connected(connection);
|
|
ret = gb_connection_control_connected(connection);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -685,10 +686,10 @@ static int _gb_connection_enable(struct gb_connection *connection, bool rx)
|
|
err_control_disconnecting:
|
|
err_control_disconnecting:
|
|
gb_connection_control_disconnecting(connection);
|
|
gb_connection_control_disconnecting(connection);
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, flags);
|
|
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
|
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
|
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
|
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
|
|
|
|
/* Transmit queue should already be empty. */
|
|
/* Transmit queue should already be empty. */
|
|
gb_connection_hd_cport_flush(connection);
|
|
gb_connection_hd_cport_flush(connection);
|
|
@@ -754,16 +755,18 @@ EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
|
|
|
|
|
|
void gb_connection_disable_rx(struct gb_connection *connection)
|
|
void gb_connection_disable_rx(struct gb_connection *connection)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
mutex_lock(&connection->mutex);
|
|
mutex_lock(&connection->mutex);
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, flags);
|
|
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
|
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
|
connection->state = GB_CONNECTION_STATE_ENABLED_TX;
|
|
- gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
|
|
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ gb_connection_flush_incoming_operations(connection, -ESHUTDOWN, &flags);
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
|
|
|
|
trace_gb_connection_disable(connection);
|
|
trace_gb_connection_disable(connection);
|
|
|
|
|
|
@@ -786,6 +789,8 @@ void gb_connection_mode_switch_complete(struct gb_connection *connection)
|
|
|
|
|
|
void gb_connection_disable(struct gb_connection *connection)
|
|
void gb_connection_disable(struct gb_connection *connection)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
mutex_lock(&connection->mutex);
|
|
mutex_lock(&connection->mutex);
|
|
|
|
|
|
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
|
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
|
@@ -795,10 +800,10 @@ void gb_connection_disable(struct gb_connection *connection)
|
|
|
|
|
|
gb_connection_control_disconnecting(connection);
|
|
gb_connection_control_disconnecting(connection);
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, flags);
|
|
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
|
connection->state = GB_CONNECTION_STATE_DISCONNECTING;
|
|
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
|
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
|
|
|
|
gb_connection_hd_cport_flush(connection);
|
|
gb_connection_hd_cport_flush(connection);
|
|
|
|
|
|
@@ -825,6 +830,8 @@ EXPORT_SYMBOL_GPL(gb_connection_disable);
|
|
/* Disable a connection without communicating with the remote end. */
|
|
/* Disable a connection without communicating with the remote end. */
|
|
void gb_connection_disable_forced(struct gb_connection *connection)
|
|
void gb_connection_disable_forced(struct gb_connection *connection)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
mutex_lock(&connection->mutex);
|
|
mutex_lock(&connection->mutex);
|
|
|
|
|
|
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
|
if (connection->state == GB_CONNECTION_STATE_DISABLED)
|
|
@@ -832,10 +839,10 @@ void gb_connection_disable_forced(struct gb_connection *connection)
|
|
|
|
|
|
trace_gb_connection_disable(connection);
|
|
trace_gb_connection_disable(connection);
|
|
|
|
|
|
- spin_lock_irq(&connection->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&connection->lock, flags);
|
|
connection->state = GB_CONNECTION_STATE_DISABLED;
|
|
connection->state = GB_CONNECTION_STATE_DISABLED;
|
|
- gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
|
|
|
- spin_unlock_irq(&connection->lock);
|
|
|
|
|
|
+ gb_connection_cancel_operations(connection, -ESHUTDOWN, &flags);
|
|
|
|
+ spin_unlock_irqrestore(&connection->lock, flags);
|
|
|
|
|
|
gb_connection_hd_cport_flush(connection);
|
|
gb_connection_hd_cport_flush(connection);
|
|
gb_connection_hd_cport_features_disable(connection);
|
|
gb_connection_hd_cport_features_disable(connection);
|