block/nvme.c | 48 ++++++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 20 deletions(-)
nvme_get_free_req has very difference semantics when called in
coroutine context (when it waits) and in non-coroutine context
(when it doesn't). Split the two cases to make it clear what
is being requested.
Cc: qemu-block@nongnu.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
block/nvme.c | 48 ++++++++++++++++++++++++++++--------------------
1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/block/nvme.c b/block/nvme.c
index 01fb28aa63..092c1f2f8e 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -293,34 +293,42 @@ static void nvme_kick(NVMeQueuePair *q)
q->need_kick = 0;
}
-/* Find a free request element if any, otherwise:
- * a) if in coroutine context, try to wait for one to become available;
- * b) if not in coroutine, return NULL;
- */
-static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
+static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
{
NVMeRequest *req;
- qemu_mutex_lock(&q->lock);
-
- while (q->free_req_head == -1) {
- if (qemu_in_coroutine()) {
- trace_nvme_free_req_queue_wait(q->s, q->index);
- qemu_co_queue_wait(&q->free_req_queue, &q->lock);
- } else {
- qemu_mutex_unlock(&q->lock);
- return NULL;
- }
- }
-
req = &q->reqs[q->free_req_head];
q->free_req_head = req->free_req_next;
req->free_req_next = -1;
-
- qemu_mutex_unlock(&q->lock);
return req;
}
+/* Return a free request element if any, otherwise return NULL. */
+static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
+{
+ QEMU_LOCK_GUARD(&q->lock);
+ if (q->free_req_head == -1) {
+ return NULL;
+ }
+ return nvme_get_free_req_nofail_locked(q);
+}
+
+/*
+ * Return a free request element if any, otherwise wait
+ * for one to become available
+ */
+static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
+{
+ QEMU_LOCK_GUARD(&q->lock);
+
+ while (q->free_req_head == -1) {
+ trace_nvme_free_req_queue_wait(q->s, q->index);
+ qemu_co_queue_wait(&q->free_req_queue, &q->lock);
+ }
+
+ return nvme_get_free_req_nofail_locked(q);
+}
+
/* With q->lock */
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
{
@@ -506,7 +514,7 @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
AioContext *aio_context = bdrv_get_aio_context(bs);
NVMeRequest *req;
int ret = -EINPROGRESS;
- req = nvme_get_free_req(q);
+ req = nvme_get_free_req_nowait(q);
if (!req) {
return -EBUSY;
}
--
2.36.0
On Sat, May 14, 2022 at 11:31 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> nvme_get_free_req has very difference semantics when called in
> coroutine context (when it waits) and in non-coroutine context
> (when it doesn't). Split the two cases to make it clear what
> is being requested.
>
> Cc: qemu-block@nongnu.org
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> block/nvme.c | 48 ++++++++++++++++++++++++++++--------------------
> 1 file changed, 28 insertions(+), 20 deletions(-)
>
> diff --git a/block/nvme.c b/block/nvme.c
> index 01fb28aa63..092c1f2f8e 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c
> @@ -293,34 +293,42 @@ static void nvme_kick(NVMeQueuePair *q)
> q->need_kick = 0;
> }
>
> -/* Find a free request element if any, otherwise:
> - * a) if in coroutine context, try to wait for one to become available;
> - * b) if not in coroutine, return NULL;
> - */
> -static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
> +static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
> {
> NVMeRequest *req;
>
> - qemu_mutex_lock(&q->lock);
> -
> - while (q->free_req_head == -1) {
> - if (qemu_in_coroutine()) {
> - trace_nvme_free_req_queue_wait(q->s, q->index);
> - qemu_co_queue_wait(&q->free_req_queue, &q->lock);
> - } else {
> - qemu_mutex_unlock(&q->lock);
> - return NULL;
> - }
> - }
> -
> req = &q->reqs[q->free_req_head];
> q->free_req_head = req->free_req_next;
> req->free_req_next = -1;
> -
> - qemu_mutex_unlock(&q->lock);
> return req;
> }
>
> +/* Return a free request element if any, otherwise return NULL. */
> +static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
> +{
> + QEMU_LOCK_GUARD(&q->lock);
> + if (q->free_req_head == -1) {
> + return NULL;
> + }
> + return nvme_get_free_req_nofail_locked(q);
> +}
> +
> +/*
> + * Return a free request element if any, otherwise wait
> + * for one to become available
> + */
Might be a bit more precise as: "Wait for a free request to become
available and return it."
> +static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
> +{
> + QEMU_LOCK_GUARD(&q->lock);
> +
> + while (q->free_req_head == -1) {
> + trace_nvme_free_req_queue_wait(q->s, q->index);
> + qemu_co_queue_wait(&q->free_req_queue, &q->lock);
> + }
> +
> + return nvme_get_free_req_nofail_locked(q);
> +}
> +
> /* With q->lock */
> static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
> {
> @@ -506,7 +514,7 @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
> AioContext *aio_context = bdrv_get_aio_context(bs);
> NVMeRequest *req;
> int ret = -EINPROGRESS;
> - req = nvme_get_free_req(q);
> + req = nvme_get_free_req_nowait(q);
> if (!req) {
> return -EBUSY;
> }
> --
> 2.36.0
>
>
Reviewed-by: Alberto Faria <afaria@redhat.com>
© 2016 - 2025 Red Hat, Inc.