@@ -332,10 +332,10 @@ typedef struct NBDClient NBDClient;
int nbd_export_create(BlockExport *exp, BlockExportOptions *exp_args,
Error **errp);
-int nbd_export_new(BlockExport *blk_exp, BlockDriverState *bs,
+int nbd_export_new(BlockExport *blk_exp,
const char *name, const char *desc,
const char *bitmap, bool readonly, bool shared,
- bool writethrough, Error **errp);
+ Error **errp);
void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
AioContext *nbd_export_aio_context(NBDExport *exp);
@@ -58,7 +58,10 @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type)
BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
{
const BlockExportDriver *drv;
- BlockExport *exp;
+ BlockExport *exp = NULL;
+ BlockDriverState *bs;
+ BlockBackend *blk;
+ AioContext *ctx;
int ret;
if (!id_wellformed(export->id)) {
@@ -76,6 +79,33 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
return NULL;
}
+ bs = bdrv_lookup_bs(NULL, export->node_name, errp);
+ if (!bs) {
+ return NULL;
+ }
+
+ ctx = bdrv_get_aio_context(bs);
+ aio_context_acquire(ctx);
+
+ /*
+ * Block exports are used for non-shared storage migration. Make sure
+ * that BDRV_O_INACTIVE is cleared and the image is ready for write
+ * access since the export could be available before migration handover.
+ * ctx was acquired in the caller.
+ */
+ bdrv_invalidate_cache(bs, NULL);
+
+ blk = blk_new(ctx, BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
+ ret = blk_insert_bs(blk, bs, errp);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ if (!export->has_writethrough) {
+ export->writethrough = false;
+ }
+ blk_set_enable_write_cache(blk, !export->writethrough);
+
assert(drv->instance_size >= sizeof(BlockExport));
exp = g_malloc0(drv->instance_size);
*exp = (BlockExport) {
@@ -83,19 +113,30 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
.refcount = 1,
.user_owned = true,
.id = g_strdup(export->id),
+ .ctx = ctx,
+ .blk = blk,
};
ret = drv->create(exp, export, errp);
if (ret < 0) {
- g_free(exp->id);
- g_free(exp);
- return NULL;
+ goto fail;
}
assert(exp->blk != NULL);
QLIST_INSERT_HEAD(&block_exports, exp, next);
+
+ aio_context_release(ctx);
return exp;
+
+fail:
+ blk_unref(blk);
+ aio_context_release(ctx);
+ if (exp) {
+ g_free(exp->id);
+ g_free(exp);
+ }
+ return NULL;
}
/* Callers must hold exp->ctx lock */
@@ -177,9 +177,6 @@ int nbd_export_create(BlockExport *exp, BlockExportOptions *exp_args,
Error **errp)
{
BlockExportOptionsNbd *arg = &exp_args->u.nbd;
- BlockDriverState *bs = NULL;
- AioContext *aio_context;
- int ret;
assert(exp_args->type == BLOCK_EXPORT_TYPE_NBD);
@@ -207,38 +204,16 @@ int nbd_export_create(BlockExport *exp, BlockExportOptions *exp_args,
return -EEXIST;
}
- bs = bdrv_lookup_bs(NULL, exp_args->node_name, errp);
- if (!bs) {
- return -ENOENT;
- }
-
- aio_context = bdrv_get_aio_context(bs);
- aio_context_acquire(aio_context);
-
if (!arg->has_writable) {
arg->writable = false;
}
- if (bdrv_is_read_only(bs) && arg->writable) {
- ret = -EINVAL;
+ if (blk_is_read_only(exp->blk) && arg->writable) {
error_setg(errp, "Cannot export read-only node as writable");
- goto out;
- }
-
- if (!exp_args->has_writethrough) {
- exp_args->writethrough = false;
- }
-
- ret = nbd_export_new(exp, bs, arg->name, arg->description, arg->bitmap,
- !arg->writable, !arg->writable,
- exp_args->writethrough, errp);
- if (ret < 0) {
- goto out;
+ return -EINVAL;
}
- ret = 0;
- out:
- aio_context_release(aio_context);
- return ret;
+ return nbd_export_new(exp, arg->name, arg->description, arg->bitmap,
+ !arg->writable, !arg->writable, errp);
}
void qmp_nbd_server_add(NbdServerAddOptions *arg, Error **errp)
@@ -1507,56 +1507,42 @@ void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk)
blk_add_remove_bs_notifier(blk, &nbd_exp->eject_notifier);
}
-int nbd_export_new(BlockExport *blk_exp, BlockDriverState *bs,
+int nbd_export_new(BlockExport *blk_exp,
const char *name, const char *desc,
const char *bitmap, bool readonly, bool shared,
- bool writethrough, Error **errp)
+ Error **errp)
{
NBDExport *exp = container_of(blk_exp, NBDExport, common);
- AioContext *ctx;
- BlockBackend *blk;
+ BlockBackend *blk = blk_exp->blk;
int64_t size;
- uint64_t perm;
+ uint64_t perm, shared_perm;
int ret;
- size = bdrv_getlength(bs);
+ size = blk_getlength(blk);
if (size < 0) {
error_setg_errno(errp, -size,
"Failed to determine the NBD export's length");
return size;
}
- ctx = bdrv_get_aio_context(bs);
- blk_exp->ctx = ctx;
-
- /*
- * NBD exports are used for non-shared storage migration. Make sure
- * that BDRV_O_INACTIVE is cleared and the image is ready for write
- * access since the export could be available before migration handover.
- * ctx was acquired in the caller.
- */
assert(name && strlen(name) <= NBD_MAX_STRING_SIZE);
- bdrv_invalidate_cache(bs, NULL);
-
/* Don't allow resize while the NBD server is running, otherwise we don't
* care what happens with the node. */
- perm = BLK_PERM_CONSISTENT_READ;
+ blk_get_perm(blk, &perm, &shared_perm);
+
if (!readonly) {
perm |= BLK_PERM_WRITE;
}
- blk = blk_new(ctx, perm,
- BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
- BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD);
- ret = blk_insert_bs(blk, bs, errp);
+
+ ret = blk_set_perm(blk, perm, shared_perm & ~BLK_PERM_RESIZE, errp);
if (ret < 0) {
- goto fail;
+ return ret;
}
- blk_set_enable_write_cache(blk, !writethrough);
+
blk_set_allow_aio_context_change(blk, true);
QTAILQ_INIT(&exp->clients);
- exp->common.blk = blk;
exp->name = g_strdup(name);
assert(!desc || strlen(desc) <= NBD_MAX_STRING_SIZE);
exp->description = g_strdup(desc);
@@ -1574,6 +1560,7 @@ int nbd_export_new(BlockExport *blk_exp, BlockDriverState *bs,
exp->size = QEMU_ALIGN_DOWN(size, BDRV_SECTOR_SIZE);
if (bitmap) {
+ BlockDriverState *bs = blk_bs(blk);
BdrvDirtyBitmap *bm = NULL;
while (bs) {
@@ -1620,7 +1607,6 @@ int nbd_export_new(BlockExport *blk_exp, BlockDriverState *bs,
return 0;
fail:
- blk_unref(blk);
g_free(exp->name);
g_free(exp->description);
return ret;