@@ -46,7 +46,7 @@ static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag)
.tv_nsec = (long long)q->dev->private_data,
};
- ublk_queue_alloc_sqes(q, &sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), &sqe, 1);
io_uring_prep_timeout(sqe, &ts, 1, 0);
sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1);
@@ -18,7 +18,7 @@ static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_des
unsigned ublk_op = ublksrv_get_op(iod);
struct io_uring_sqe *sqe[1];
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
/* bit63 marks us as tgt io */
@@ -36,7 +36,7 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de
void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr;
if (!zc || auto_zc) {
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
if (!sqe[0])
return -ENOMEM;
@@ -52,7 +52,7 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de
return 1;
}
- ublk_queue_alloc_sqes(q, sqe, 3);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
@@ -599,7 +599,7 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag)
if (io_uring_sq_space_left(&q->ring) < 1)
io_uring_submit(&q->ring);
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
if (!sqe[0]) {
ublk_err("%s: run out of sqe %d, tag %d\n",
__func__, q->q_id, tag);
@@ -124,6 +124,8 @@ struct ublk_io {
unsigned short flags;
unsigned short refs; /* used by target code only */
+ int tag;
+
int result;
unsigned short tgt_ios;
@@ -289,17 +291,23 @@ static inline void ublk_dbg(int level, const char *fmt, ...)
}
}
-static inline int ublk_queue_alloc_sqes(struct ublk_queue *q,
+static inline struct ublk_queue *ublk_io_to_queue(const struct ublk_io *io)
+{
+ return container_of(io, struct ublk_queue, ios[io->tag]);
+}
+
+static inline int ublk_io_alloc_sqes(struct ublk_io *io,
struct io_uring_sqe *sqes[], int nr_sqes)
{
- unsigned left = io_uring_sq_space_left(&q->ring);
+ struct io_uring *ring = &ublk_io_to_queue(io)->ring;
+ unsigned left = io_uring_sq_space_left(ring);
int i;
if (left < nr_sqes)
- io_uring_submit(&q->ring);
+ io_uring_submit(ring);
for (i = 0; i < nr_sqes; i++) {
- sqes[i] = io_uring_get_sqe(&q->ring);
+ sqes[i] = io_uring_get_sqe(ring);
if (!sqes[i])
return i;
}
@@ -60,7 +60,7 @@ static int null_queue_zc_io(struct ublk_queue *q, int tag)
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[3];
- ublk_queue_alloc_sqes(q, sqe, 3);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
sqe[0]->user_data = build_user_data(tag,
@@ -82,7 +82,7 @@ static int null_queue_auto_zc_io(struct ublk_queue *q, int tag)
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
struct io_uring_sqe *sqe[1];
- ublk_queue_alloc_sqes(q, sqe, 1);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
__setup_nop_io(tag, iod, sqe[0], q->q_id);
return 1;
}
@@ -138,7 +138,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_
io->private_data = s;
calculate_stripe_array(conf, iod, s, base);
- ublk_queue_alloc_sqes(q, sqe, s->nr + extra);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, s->nr + extra);
if (zc) {
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
@@ -182,7 +182,7 @@ static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod,
struct io_uring_sqe *sqe[NR_STRIPE];
int i;
- ublk_queue_alloc_sqes(q, sqe, conf->nr_files);
+ ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, conf->nr_files);
for (i = 0; i < conf->nr_files; i++) {
io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);