@@ -236,7 +236,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(abrt_task);
+ iscsi_get_task(abrt_task);
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
@@ -289,7 +289,7 @@ static bool beiscsi_dev_reset_sc_iter(struct scsi_cmnd *sc, void *data,
return false;
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(task);
+ iscsi_get_task(task);
io_task = task->dd_data;
/* mark WRB invalid which have been not processed by FW yet */
if (is_chip_be2_be3r(phba)) {
@@ -1262,7 +1262,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
spin_lock_bh(&session->back_lock);
task = pwrb_handle->pio_handle;
if (task)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
spin_unlock_bh(&session->back_lock);
}
@@ -1657,7 +1657,7 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
task = iscsi_itt_to_task(conn,
nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
if (task)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
spin_unlock(&session->back_lock);
}
@@ -475,7 +475,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
* iscsi_free_task - free a task
* @task: iscsi cmd task
*
- * Must be called with session back_lock.
* This function returns the scsi command to scsi-ml or cleans
* up mgmt tasks then returns the task to the pool.
*/
@@ -509,7 +508,9 @@ static void iscsi_free_task(struct iscsi_task *task)
}
if (!sc) {
+ spin_lock_bh(&session->mgmt_lock);
kfifo_in(&session->mgmt_pool.queue, (void *)&task, sizeof(void *));
+ spin_unlock_bh(&session->mgmt_lock);
} else {
/* SCSI eh reuses commands to verify us */
sc->SCp.ptr = NULL;
@@ -522,28 +523,17 @@ static void iscsi_free_task(struct iscsi_task *task)
}
}
-void __iscsi_get_task(struct iscsi_task *task)
+void iscsi_get_task(struct iscsi_task *task)
{
refcount_inc(&task->refcount);
}
-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+EXPORT_SYMBOL_GPL(iscsi_get_task);
-void __iscsi_put_task(struct iscsi_task *task)
+void iscsi_put_task(struct iscsi_task *task)
{
if (refcount_dec_and_test(&task->refcount))
iscsi_free_task(task);
}
-EXPORT_SYMBOL_GPL(__iscsi_put_task);
-
-void iscsi_put_task(struct iscsi_task *task)
-{
- struct iscsi_session *session = task->conn->session;
-
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
-}
EXPORT_SYMBOL_GPL(iscsi_put_task);
/**
@@ -572,7 +562,7 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WRITE_ONCE(conn->ping_task, NULL);
/* release get from queueing */
- __iscsi_put_task(task);
+ iscsi_put_task(task);
}
/**
@@ -619,12 +609,12 @@ static bool cleanup_queued_task(struct iscsi_task *task)
* list
*/
if (task->state == ISCSI_TASK_RUNNING)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
}
if (conn->task == task) {
conn->task = NULL;
- __iscsi_put_task(task);
+ iscsi_put_task(task);
}
return early_complete;
@@ -749,9 +739,13 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+ spin_lock_bh(&session->mgmt_lock);
if (!kfifo_out(&session->mgmt_pool.queue, (void *)&task,
- sizeof(void *)))
+ sizeof(void *))) {
+ spin_unlock_bh(&session->mgmt_lock);
return NULL;
+ }
+ spin_unlock_bh(&session->mgmt_lock);
}
/*
* released in complete pdu for task we expect a response for, and
@@ -807,10 +801,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
return task;
free_task:
- /* regular RX path uses back_lock */
- spin_lock(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&session->back_lock);
+ iscsi_put_task(task);
return NULL;
}
@@ -1457,16 +1448,15 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, bool has_extra_ref)
int rc;
/* Take a ref so we can access it after xmit_task() */
- __iscsi_get_task(task);
+ iscsi_get_task(task);
conn->task = NULL;
- spin_lock_bh(&conn->session->back_lock);
/*
* If this was a requeue for a R2T or we were in the middle of sending
* the task, we have an extra ref on the task in case a bad target
* sends a cmd rsp before we have handled the task.
*/
if (has_extra_ref)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
/*
* Do this after dropping the extra ref because if this was a requeue
@@ -1476,8 +1466,6 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, bool has_extra_ref)
rc = -ENODATA;
goto put_task;
}
- spin_unlock_bh(&conn->session->back_lock);
-
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
@@ -1492,12 +1480,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, bool has_extra_ref)
* get an extra ref that is released next time we access it
* as conn->task above.
*/
- __iscsi_get_task(task);
+ iscsi_get_task(task);
conn->task = task;
}
-put_task:
- __iscsi_put_task(task);
spin_unlock(&conn->session->back_lock);
+
+put_task:
+ iscsi_put_task(task);
return rc;
}
@@ -1568,10 +1557,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
running);
list_del_init(&task->running);
if (iscsi_prep_mgmt_task(conn, task)) {
- /* regular RX path uses back_lock */
- spin_lock_bh(&conn->session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&conn->session->back_lock);
+ iscsi_put_task(task);
continue;
}
conn->task = task;
@@ -1909,7 +1895,7 @@ static bool fail_scsi_task_iter(struct scsi_cmnd *sc, void *data, bool rsvd)
ISCSI_DBG_SESSION(session, "failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
- __iscsi_get_task(task);
+ iscsi_get_task(task);
spin_unlock_bh(&session->back_lock);
fail_scsi_task(task, *(int *)iter_data->data);
@@ -2084,7 +2070,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
spin_unlock(&session->back_lock);
goto done;
}
- __iscsi_get_task(task);
+ iscsi_get_task(task);
spin_unlock(&session->back_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -2303,7 +2289,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
- __iscsi_get_task(task);
+ iscsi_get_task(task);
spin_unlock(&session->back_lock);
if (task->state == ISCSI_TASK_PENDING) {
@@ -2937,6 +2923,7 @@ struct iscsi_cls_session *
session->dd_data = cls_session->dd_data + sizeof(*session);
mutex_init(&session->eh_mutex);
+ spin_lock_init(&session->mgmt_lock);
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@@ -3064,13 +3051,13 @@ struct iscsi_cls_conn *
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
- spin_lock_bh(&session->frwd_lock);
+ spin_lock_bh(&session->mgmt_lock);
if (!kfifo_out(&session->mgmt_pool.queue, (void *)&conn->login_task,
sizeof(void *))) {
- spin_unlock_bh(&session->frwd_lock);
+ spin_unlock_bh(&session->mgmt_lock);
goto login_task_alloc_fail;
}
- spin_unlock_bh(&session->frwd_lock);
+ spin_unlock_bh(&session->mgmt_lock);
data = (char *) __get_free_pages(GFP_KERNEL,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
@@ -3084,8 +3071,10 @@ struct iscsi_cls_conn *
return cls_conn;
login_task_data_alloc_fail:
+ spin_lock_bh(&session->mgmt_lock);
kfifo_in(&session->mgmt_pool.queue, (void*)&conn->login_task,
sizeof(void *));
+ spin_unlock_bh(&session->mgmt_lock);
login_task_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
@@ -3126,11 +3115,12 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
kfree(conn->local_ipaddr);
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
+
+ spin_lock_bh(&session->mgmt_lock);
kfifo_in(&session->mgmt_pool.queue, (void *)&conn->login_task,
sizeof(void *));
- spin_unlock_bh(&session->back_lock);
+ spin_unlock_bh(&session->mgmt_lock);
+
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock);
@@ -558,7 +558,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
return 0;
}
task->last_xfer = jiffies;
- __iscsi_get_task(task);
+ iscsi_get_task(task);
tcp_conn = conn->dd_data;
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
@@ -727,11 +727,8 @@ static void qedi_mtask_completion(struct qedi_ctx *qedi,
static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
struct iscsi_cqe_solicited *cqe,
- struct iscsi_task *task,
- struct qedi_conn *qedi_conn)
+ struct iscsi_task *task)
{
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
struct qedi_cmd *cmd = task->dd_data;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
@@ -741,9 +738,7 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ iscsi_put_task(task);
}
static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
@@ -935,7 +930,7 @@ void qedi_fp_process_cqes(struct qedi_work *work)
if ((nopout_hdr->itt == RESERVED_ITT) &&
(cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
- task, q_conn);
+ task);
} else {
cqe->cqe_solicited.itid =
qedi_get_itt(cqe->cqe_solicited);
@@ -339,19 +339,19 @@ struct iscsi_session {
spinlock_t frwd_lock; /* protects session state *
* in the eh paths, cmdsn *
* suspend bit and session *
- * resources: *
- * - mgmt_pool, kfifo_out, *
- * - queues */
+ * queues */
spinlock_t back_lock; /* protects cmdsn_exp *
- * cmdsn_max, *
- * mgmt_pool kfifo_i */
+ * cmdsn_max, */
int state; /* session state */
int age; /* counts session re-opens */
int scsi_cmds_max; /* max scsi commands */
int cmds_max; /* size of cmds array */
+
struct iscsi_task **mgmt_cmds; /* mgmt task arr */
struct iscsi_pool mgmt_pool; /* mgmt task pool */
+ spinlock_t mgmt_lock; /* protects mgmt pool/arr */
+
void *dd_data; /* LLD private data */
};
@@ -464,8 +464,7 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
extern void iscsi_requeue_task(struct iscsi_task *task);
extern void iscsi_put_task(struct iscsi_task *task);
-extern void __iscsi_put_task(struct iscsi_task *task);
-extern void __iscsi_get_task(struct iscsi_task *task);
+extern void iscsi_get_task(struct iscsi_task *task);
extern void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn);
extern int iscsi_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
This patch adds a new lock around the mgmt pool, so we no longer need the back lock when calling iscsi_put_task. This helps in the xmit path where we are taking it to drop the refcount. The next patch will allow us to completely remove the back lock from the xmit path. Signed-off-by: Mike Christie <michael.christie@oracle.com> --- drivers/scsi/be2iscsi/be_main.c | 6 ++-- drivers/scsi/bnx2i/bnx2i_hwi.c | 2 +- drivers/scsi/libiscsi.c | 78 ++++++++++++++++++----------------------- drivers/scsi/libiscsi_tcp.c | 2 +- drivers/scsi/qedi/qedi_fw.c | 11 ++---- include/scsi/libiscsi.h | 13 ++++--- 6 files changed, 48 insertions(+), 64 deletions(-)