summaryrefslogtreecommitdiff
path: root/net/rds/send.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-03-24 03:48:04 +0300
committerAndy Grover <andy.grover@oracle.com>2010-09-09 05:12:13 +0400
commit2ad8099b58f274dc23bc866ca259d7e5db87fa1a (patch)
treee659d7178d1e2729fc306d3b5dbb4e5e5f47544a /net/rds/send.c
parent049ee3f500954176a87f22e6ee3e98aecb1b8958 (diff)
downloadlinux-2ad8099b58f274dc23bc866ca259d7e5db87fa1a.tar.xz
RDS: rds_send_xmit() locking/irq fixes
rds_message_put() cannot be called with irqs off, so move it after irqs are re-enabled. Spinlocks throughout the function do not to use _irqsave because the lock of c_send_lock at top already disabled irqs. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r--net/rds/send.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index d4feec6ad09c..624a3dc7f060 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -168,7 +168,7 @@ int rds_send_xmit(struct rds_connection *conn)
if (!rm) {
unsigned int len;
- spin_lock_irqsave(&conn->c_lock, flags);
+ spin_lock(&conn->c_lock);
if (!list_empty(&conn->c_send_queue)) {
rm = list_entry(conn->c_send_queue.next,
@@ -183,7 +183,7 @@ int rds_send_xmit(struct rds_connection *conn)
list_move_tail(&rm->m_conn_item, &conn->c_retrans);
}
- spin_unlock_irqrestore(&conn->c_lock, flags);
+ spin_unlock(&conn->c_lock);
if (!rm) {
was_empty = 1;
@@ -199,11 +199,10 @@ int rds_send_xmit(struct rds_connection *conn)
*/
if (rm->rdma.op_active &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
- spin_lock_irqsave(&conn->c_lock, flags);
+ spin_lock(&conn->c_lock);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
- spin_unlock_irqrestore(&conn->c_lock, flags);
- rds_message_put(rm);
+ spin_unlock(&conn->c_lock);
continue;
}
@@ -326,10 +325,6 @@ int rds_send_xmit(struct rds_connection *conn)
}
}
- /* Nuke any messages we decided not to retransmit. */
- if (!list_empty(&to_be_dropped))
- rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
-
if (conn->c_trans->xmit_complete)
conn->c_trans->xmit_complete(conn);
@@ -347,6 +342,14 @@ int rds_send_xmit(struct rds_connection *conn)
*/
spin_unlock_irqrestore(&conn->c_send_lock, flags);
+ /* Nuke any messages we decided not to retransmit. */
+ if (!list_empty(&to_be_dropped)) {
+ /* irqs on here, so we can put(), unlike above */
+ list_for_each_entry(rm, &to_be_dropped, m_conn_item)
+ rds_message_put(rm);
+ rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
+ }
+
if (send_quota == 0 && !was_empty) {
/* We exhausted the send quota, but there's work left to
* do. Return and (re-)schedule the send worker.