:p
atchew
Login
Based-on: <20221011215559.602584-1-peterx@redhat.com> This whole set is based on the whole set of preempt-full mode being applied first here: https://lore.kernel.org/r/20221011215559.602584-1-peterx@redhat.com I also picked up the latest version from Manish on fixing multifd counterpart of the problem, here (which are the initial two patches): https://lore.kernel.org/r/20221123172735.25181-1-manish.mishra@nutanix.com One trivial change is to fix RDMA builds for patch 1 there. I verified that I can trigger disordered connections with preempt mode postcopy (1 out of a few attemps), which can cause migration to hang during precopy phase, if e.g. I set the NIC packet loss rate to 50%. So now with all the 5 patches applied we should fix all the migration disorder issue on channel creation. Patch 3 is IMHO a cleanup that I'd think good to have even without patch 4/5. Patch 5 actually fixes the ordering issue. For each of the patch, please refer to the commit message and comments in-code. Any comment welcomed. A side note to Juan: logically it should apply cleanly to your -next tree as long as Manish's patches are not there - because they're also included here with a fix to RDMA builds. Peter Xu (3): migration: Rework multi-channel checks on URI migration: Add a semaphore to count PONGs migration: Postpone postcopy preempt channel to be after main manish.mishra (2): io: Add support for MSG_PEEK for socket channel migration: check magic value for deciding the mapping of channels chardev/char-socket.c | 4 +- include/io/channel.h | 6 + io/channel-buffer.c | 1 + io/channel-command.c | 1 + io/channel-file.c | 1 + io/channel-null.c | 1 + io/channel-socket.c | 17 ++- io/channel-tls.c | 1 + io/channel-websock.c | 1 + io/channel.c | 16 ++- migration/channel-block.c | 1 + migration/channel.c | 45 ++++++++ migration/channel.h | 5 + migration/migration.c | 164 +++++++++++++++++++--------- migration/migration.h | 15 ++- migration/multifd.c | 24 +--- migration/multifd.h | 2 +- migration/postcopy-ram.c | 36 +++--- migration/postcopy-ram.h | 6 +- migration/rdma.c | 1 + migration/savevm.c | 6 +- scsi/qemu-pr-helper.c | 2 +- tests/qtest/tpm-emu.c | 2 +- tests/unit/test-io-channel-socket.c | 1 + util/vhost-user-server.c | 2 +- 25 files changed, 254 insertions(+), 107 deletions(-) -- 2.37.3
From: "manish.mishra" <manish.mishra@nutanix.com> MSG_PEEK reads from the peek of channel, The data is treated as unread and the next read shall still return this data. This support is currently added only for socket class. Extra parameter 'flags' is added to io_readv calls to pass extra read flags like MSG_PEEK. Reviewed-by: Daniel P. Berrangé <berrange@redhat.co Suggested-by: Daniel P. Berrangé <berrange@redhat.com Signed-off-by: manish.mishra <manish.mishra@nutanix.com> Signed-off-by: Peter Xu <peterx@redhat.com> --- chardev/char-socket.c | 4 ++-- include/io/channel.h | 6 ++++++ io/channel-buffer.c | 1 + io/channel-command.c | 1 + io/channel-file.c | 1 + io/channel-null.c | 1 + io/channel-socket.c | 17 ++++++++++++++++- io/channel-tls.c | 1 + io/channel-websock.c | 1 + io/channel.c | 16 ++++++++++++---- migration/channel-block.c | 1 + migration/rdma.c | 1 + scsi/qemu-pr-helper.c | 2 +- tests/qtest/tpm-emu.c | 2 +- tests/unit/test-io-channel-socket.c | 1 + util/vhost-user-server.c | 2 +- 16 files changed, 48 insertions(+), 10 deletions(-) diff --git a/chardev/char-socket.c b/chardev/char-socket.c index XXXXXXX..XXXXXXX 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -XXX,XX +XXX,XX @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len) if (qio_channel_has_feature(s->ioc, QIO_CHANNEL_FEATURE_FD_PASS)) { ret = qio_channel_readv_full(s->ioc, &iov, 1, &msgfds, &msgfds_num, - NULL); + 0, NULL); } else { ret = qio_channel_readv_full(s->ioc, &iov, 1, NULL, NULL, - NULL); + 0, NULL); } if (msgfds_num) { diff --git a/include/io/channel.h b/include/io/channel.h index XXXXXXX..XXXXXXX 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, #define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1 +#define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1 + typedef enum QIOChannelFeature QIOChannelFeature; enum QIOChannelFeature { @@ -XXX,XX +XXX,XX @@ enum QIOChannelFeature { QIO_CHANNEL_FEATURE_SHUTDOWN, QIO_CHANNEL_FEATURE_LISTEN, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, + QIO_CHANNEL_FEATURE_READ_MSG_PEEK, }; @@ -XXX,XX +XXX,XX @@ struct QIOChannelClass { size_t niov, int **fds, size_t *nfds, + int flags, Error **errp); int (*io_close)(QIOChannel *ioc, Error **errp); @@ -XXX,XX +XXX,XX @@ void qio_channel_set_name(QIOChannel *ioc, * @niov: the length of the @iov array * @fds: pointer to an array that will received file handles * @nfds: pointer filled with number of elements in @fds on return + * @flags: read flags (QIO_CHANNEL_READ_FLAG_*) * @errp: pointer to a NULL-initialized error object * * Read data from the IO channel, storing it in the @@ -XXX,XX +XXX,XX @@ ssize_t qio_channel_readv_full(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp); diff --git a/io/channel-buffer.c b/io/channel-buffer.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-buffer.c +++ b/io/channel-buffer.c @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_buffer_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelBuffer *bioc = QIO_CHANNEL_BUFFER(ioc); diff --git a/io/channel-command.c b/io/channel-command.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-command.c +++ b/io/channel-command.c @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_command_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); diff --git a/io/channel-file.c b/io/channel-file.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-file.c +++ b/io/channel-file.c @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_file_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); diff --git a/io/channel-null.c b/io/channel-null.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-null.c +++ b/io/channel-null.c @@ -XXX,XX +XXX,XX @@ qio_channel_null_readv(QIOChannel *ioc, size_t niov, int **fds G_GNUC_UNUSED, size_t *nfds G_GNUC_UNUSED, + int flags, Error **errp) { QIOChannelNull *nioc = QIO_CHANNEL_NULL(ioc); diff --git a/io/channel-socket.c b/io/channel-socket.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-socket.c +++ b/io/channel-socket.c @@ -XXX,XX +XXX,XX @@ int qio_channel_socket_connect_sync(QIOChannelSocket *ioc, } #endif + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_READ_MSG_PEEK); + return 0; } @@ -XXX,XX +XXX,XX @@ qio_channel_socket_accept(QIOChannelSocket *ioc, } #endif /* WIN32 */ + qio_channel_set_feature(QIO_CHANNEL(cioc), QIO_CHANNEL_FEATURE_READ_MSG_PEEK); + trace_qio_channel_socket_accept_complete(ioc, cioc, cioc->fd); return cioc; @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_socket_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_socket_readv(QIOChannel *ioc, } + if (flags & QIO_CHANNEL_READ_FLAG_MSG_PEEK) { + sflags |= MSG_PEEK; + } + retry: ret = recvmsg(sioc->fd, &msg, sflags); if (ret < 0) { @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_socket_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); ssize_t done = 0; ssize_t i; + int sflags = 0; + + if (flags & QIO_CHANNEL_READ_FLAG_MSG_PEEK) { + sflags |= MSG_PEEK; + } for (i = 0; i < niov; i++) { ssize_t ret; @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_socket_readv(QIOChannel *ioc, ret = recv(sioc->fd, iov[i].iov_base, iov[i].iov_len, - 0); + sflags); if (ret < 0) { if (errno == EAGAIN) { if (done) { diff --git a/io/channel-tls.c b/io/channel-tls.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_tls_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); diff --git a/io/channel-websock.c b/io/channel-websock.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel-websock.c +++ b/io/channel-websock.c @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_websock_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelWebsock *wioc = QIO_CHANNEL_WEBSOCK(ioc); diff --git a/io/channel.c b/io/channel.c index XXXXXXX..XXXXXXX 100644 --- a/io/channel.c +++ b/io/channel.c @@ -XXX,XX +XXX,XX @@ ssize_t qio_channel_readv_full(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); @@ -XXX,XX +XXX,XX @@ ssize_t qio_channel_readv_full(QIOChannel *ioc, return -1; } - return klass->io_readv(ioc, iov, niov, fds, nfds, errp); + if ((flags & QIO_CHANNEL_READ_FLAG_MSG_PEEK) && + !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { + error_setg_errno(errp, EINVAL, + "Channel does not support peek read"); + return -1; + } + + return klass->io_readv(ioc, iov, niov, fds, nfds, flags, errp); } @@ -XXX,XX +XXX,XX @@ int qio_channel_readv_full_all_eof(QIOChannel *ioc, while ((nlocal_iov > 0) || local_fds) { ssize_t len; len = qio_channel_readv_full(ioc, local_iov, nlocal_iov, local_fds, - local_nfds, errp); + local_nfds, 0, errp); if (len == QIO_CHANNEL_ERR_BLOCK) { if (qemu_in_coroutine()) { qio_channel_yield(ioc, G_IO_IN); @@ -XXX,XX +XXX,XX @@ ssize_t qio_channel_readv(QIOChannel *ioc, size_t niov, Error **errp) { - return qio_channel_readv_full(ioc, iov, niov, NULL, NULL, errp); + return qio_channel_readv_full(ioc, iov, niov, NULL, NULL, 0, errp); } @@ -XXX,XX +XXX,XX @@ ssize_t qio_channel_read(QIOChannel *ioc, Error **errp) { struct iovec iov = { .iov_base = buf, .iov_len = buflen }; - return qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, errp); + return qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, 0, errp); } diff --git a/migration/channel-block.c b/migration/channel-block.c index XXXXXXX..XXXXXXX 100644 --- a/migration/channel-block.c +++ b/migration/channel-block.c @@ -XXX,XX +XXX,XX @@ qio_channel_block_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelBlock *bioc = QIO_CHANNEL_BLOCK(ioc); diff --git a/migration/rdma.c b/migration/rdma.c index XXXXXXX..XXXXXXX 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_rdma_readv(QIOChannel *ioc, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index XXXXXXX..XXXXXXX 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -XXX,XX +XXX,XX @@ static int coroutine_fn prh_read(PRHelperClient *client, void *buf, int sz, iov.iov_base = buf; iov.iov_len = sz; n_read = qio_channel_readv_full(QIO_CHANNEL(client->ioc), &iov, 1, - &fds, &nfds, errp); + &fds, &nfds, 0, errp); if (n_read == QIO_CHANNEL_ERR_BLOCK) { qio_channel_yield(QIO_CHANNEL(client->ioc), G_IO_IN); diff --git a/tests/qtest/tpm-emu.c b/tests/qtest/tpm-emu.c index XXXXXXX..XXXXXXX 100644 --- a/tests/qtest/tpm-emu.c +++ b/tests/qtest/tpm-emu.c @@ -XXX,XX +XXX,XX @@ void *tpm_emu_ctrl_thread(void *data) int *pfd = NULL; size_t nfd = 0; - qio_channel_readv_full(ioc, &iov, 1, &pfd, &nfd, &error_abort); + qio_channel_readv_full(ioc, &iov, 1, &pfd, &nfd, 0, &error_abort); cmd = be32_to_cpu(cmd); g_assert_cmpint(cmd, ==, CMD_SET_DATAFD); g_assert_cmpint(nfd, ==, 1); diff --git a/tests/unit/test-io-channel-socket.c b/tests/unit/test-io-channel-socket.c index XXXXXXX..XXXXXXX 100644 --- a/tests/unit/test-io-channel-socket.c +++ b/tests/unit/test-io-channel-socket.c @@ -XXX,XX +XXX,XX @@ static void test_io_channel_unix_fd_pass(void) G_N_ELEMENTS(iorecv), &fdrecv, &nfdrecv, + 0, &error_abort); g_assert(nfdrecv == G_N_ELEMENTS(fdsend)); diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index XXXXXXX..XXXXXXX 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -XXX,XX +XXX,XX @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg) * qio_channel_readv_full may have short reads, keeping calling it * until getting VHOST_USER_HDR_SIZE or 0 bytes in total */ - rc = qio_channel_readv_full(ioc, &iov, 1, &fds, &nfds, &local_err); + rc = qio_channel_readv_full(ioc, &iov, 1, &fds, &nfds, 0, &local_err); if (rc < 0) { if (rc == QIO_CHANNEL_ERR_BLOCK) { assert(local_err == NULL); -- 2.37.3
From: "manish.mishra" <manish.mishra@nutanix.com> Current logic assumes that channel connections on the destination side are always established in the same order as the source and the first one will always be the main channel followed by the multifid or post-copy preemption channel. This may not be always true, as even if a channel has a connection established on the source side it can be in the pending state on the destination side and a newer connection can be established first. Basically causing out of order mapping of channels on the destination side. Currently, all channels except post-copy preempt send a magic number, this patch uses that magic number to decide the type of channel. This logic is applicable only for precopy(multifd) live migration, as mentioned, the post-copy preempt channel does not send any magic number. Also, tls live migrations already does tls handshake before creating other channels, so this issue is not possible with tls, hence this logic is avoided for tls live migrations. This patch uses read peek to check the magic number of channels so that current data/control stream management remains un-effected. Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.co Suggested-by: Daniel P. Berrangé <berrange@redhat.com Signed-off-by: manish.mishra <manish.mishra@nutanix.com> Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/channel.c | 45 ++++++++++++++++++++++++++++++++++++++++ migration/channel.h | 5 +++++ migration/migration.c | 45 +++++++++++++++++++++++++++++----------- migration/multifd.c | 12 ++++------- migration/multifd.h | 2 +- migration/postcopy-ram.c | 5 +---- migration/postcopy-ram.h | 2 +- 7 files changed, 90 insertions(+), 26 deletions(-) diff --git a/migration/channel.c b/migration/channel.c index XXXXXXX..XXXXXXX 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -XXX,XX +XXX,XX @@ void migration_channel_connect(MigrationState *s, migrate_fd_connect(s, error); error_free(error); } + + +/** + * @migration_channel_read_peek - Read from the peek of migration channel, + * without actually removing it from channel buffer. + * + * @ioc: the channel object + * @buf: the memory region to read data into + * @buflen: the number of bytes to read in @buf + * @errp: pointer to a NULL-initialized error object + * + * Returns 0 if successful, returns -1 and sets @errp if fails. + */ +int migration_channel_read_peek(QIOChannel *ioc, + const char *buf, + const size_t buflen, + Error **errp) +{ + ssize_t len = 0; + struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen }; + + while (true) { + len = qio_channel_readv_full(ioc, &iov, 1, NULL, + NULL, QIO_CHANNEL_READ_FLAG_MSG_PEEK, errp); + + if (len <= 0 && len != QIO_CHANNEL_ERR_BLOCK) { + error_setg(errp, + "Failed to read peek of channel"); + return -1; + } + + if (len == buflen) { + break; + } + + /* 1ms sleep. */ + if (qemu_in_coroutine()) { + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); + } else { + g_usleep(1000); + } + } + + return 0; +} diff --git a/migration/channel.h b/migration/channel.h index XXXXXXX..XXXXXXX 100644 --- a/migration/channel.h +++ b/migration/channel.h @@ -XXX,XX +XXX,XX @@ void migration_channel_connect(MigrationState *s, QIOChannel *ioc, const char *hostname, Error *error_in); + +int migration_channel_read_peek(QIOChannel *ioc, + const char *buf, + const size_t buflen, + Error **errp); #endif diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ #include "migration.h" #include "savevm.h" #include "qemu-file.h" +#include "channel.h" #include "migration/vmstate.h" #include "block/block.h" #include "qapi/error.h" @@ -XXX,XX +XXX,XX @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; - bool start_migration; QEMUFile *f; + bool default_channel = true; + uint32_t channel_magic = 0; + int ret = 0; - if (!mis->from_src_file) { - /* The first connection (multifd may have multiple) */ + if (migrate_use_multifd() && !migrate_postcopy_ram() && + qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { + /* + * With multiple channels, it is possible that we receive channels + * out of order on destination side, causing incorrect mapping of + * source channels on destination side. Check channel MAGIC to + * decide type of channel. Please note this is best effort, postcopy + * preempt channel does not send any magic number so avoid it for + * postcopy live migration. Also tls live migration already does + * tls handshake while initializing main channel so with tls this + * issue is not possible. + */ + ret = migration_channel_read_peek(ioc, (void *)&channel_magic, + sizeof(channel_magic), &local_err); + + if (ret != 0) { + error_propagate(errp, local_err); + return; + } + + default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); + } else { + default_channel = !mis->from_src_file; + } + + if (default_channel) { f = qemu_file_new_input(ioc); if (!migration_incoming_setup(f, errp)) { return; } - - /* - * Common migration only needs one channel, so we can start - * right now. Some features need more than one channel, we wait. - */ - start_migration = !migration_needs_multiple_sockets(); } else { /* Multiple connections */ assert(migration_needs_multiple_sockets()); if (migrate_use_multifd()) { - start_migration = multifd_recv_new_channel(ioc, &local_err); + multifd_recv_new_channel(ioc, &local_err); } else { assert(migrate_postcopy_preempt()); f = qemu_file_new_input(ioc); - start_migration = postcopy_preempt_new_channel(mis, f); + postcopy_preempt_new_channel(mis, f); } if (local_err) { error_propagate(errp, local_err); @@ -XXX,XX +XXX,XX @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) } } - if (start_migration) { + if (migration_has_all_channels()) { /* If it's a recovery, we're done */ if (postcopy_try_recover()) { return; diff --git a/migration/multifd.c b/migration/multifd.c index XXXXXXX..XXXXXXX 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -XXX,XX +XXX,XX @@ bool multifd_recv_all_channels_created(void) /* * Try to receive all multifd channels to get ready for the migration. - * - Return true and do not set @errp when correctly receiving all channels; - * - Return false and do not set @errp when correctly receiving the current one; - * - Return false and set @errp when failing to receive the current channel. + * Sets @errp when failing to receive the current channel. */ -bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) +void multifd_recv_new_channel(QIOChannel *ioc, Error **errp) { MultiFDRecvParams *p; Error *local_err = NULL; @@ -XXX,XX +XXX,XX @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) "failed to receive packet" " via multifd channel %d: ", qatomic_read(&multifd_recv_state->count)); - return false; + return; } trace_multifd_recv_new_channel(id); @@ -XXX,XX +XXX,XX @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) id); multifd_recv_terminate_threads(local_err); error_propagate(errp, local_err); - return false; + return; } p->c = ioc; object_ref(OBJECT(ioc)); @@ -XXX,XX +XXX,XX @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p, QEMU_THREAD_JOINABLE); qatomic_inc(&multifd_recv_state->count); - return qatomic_read(&multifd_recv_state->count) == - migrate_multifd_channels(); } diff --git a/migration/multifd.h b/migration/multifd.h index XXXXXXX..XXXXXXX 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -XXX,XX +XXX,XX @@ void multifd_save_cleanup(void); int multifd_load_setup(Error **errp); int multifd_load_cleanup(Error **errp); bool multifd_recv_all_channels_created(void); -bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp); +void multifd_recv_new_channel(QIOChannel *ioc, Error **errp); void multifd_recv_sync_main(void); int multifd_send_sync_main(QEMUFile *f); int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -XXX,XX +XXX,XX @@ void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) } } -bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) +void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) { /* * The new loading channel has its own threads, so it needs to be @@ -XXX,XX +XXX,XX @@ bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) qemu_file_set_blocking(file, true); mis->postcopy_qemufile_dst = file; trace_postcopy_preempt_new_channel(); - - /* Start the migration immediately */ - return true; } /* diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -XXX,XX +XXX,XX @@ enum PostcopyChannels { RAM_CHANNEL_MAX, }; -bool postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); +void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); int postcopy_preempt_setup(MigrationState *s, Error **errp); int postcopy_preempt_wait_channel(MigrationState *s); -- 2.37.3
The whole idea of multi-channel checks was not properly done, IMHO. Currently we check multi-channel in a lot of places, but actually that's not needed because we only need to check it right after we get the URI and that should be it. If the URI check succeeded, we should never need to check it again because we must have it. If it check fails, we should fail immediately on either the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after the connection established. Neither should we fail any set capabiliities like what we used to do here: 5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19) Because logically the URI will only be set later after the capability is set, so it doesn't make a lot of sense to check the URI type when setting the capability, because we're checking the cap with an old URI passed in, and that may not even be the URI we're going to use later. This patch mostly reverted all such checks for before, dropping the variable migrate_allow_multi_channels and helpers. Instead, add a common helper to check URI for multi-channels for either qmp_migrate and qmp_migrate_incoming and that should do all the proper checks. The failure will only trigger with the "migrate" or "migrate_incoming" command, or when user specified "-incoming xxx" where "xxx" is not "defer". With that, make postcopy_preempt_setup() as simple as creating the channel. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 56 +++++++++++++++++++--------------------- migration/migration.h | 3 --- migration/multifd.c | 12 ++------- migration/postcopy-ram.c | 14 +--------- migration/postcopy-ram.h | 2 +- 5 files changed, 31 insertions(+), 56 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ static int migration_maybe_pause(MigrationState *s, int new_state); static void migrate_fd_cancel(MigrationState *s); -static bool migrate_allow_multi_channels = true; +static bool migration_needs_multiple_sockets(void) +{ + return migrate_use_multifd() || migrate_postcopy_preempt(); +} -void migrate_protocol_allow_multi_channels(bool allow) +static bool uri_supports_multi_channels(const char *uri) { - migrate_allow_multi_channels = allow; + return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) || + strstart(uri, "vsock:", NULL); } -bool migrate_multi_channels_is_allowed(void) +static bool migration_uri_validate(const char *uri, Error **errp) { - return migrate_allow_multi_channels; + if (migration_needs_multiple_sockets() && + !uri_supports_multi_channels(uri)) { + error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); + return false; + } + + return true; } static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) @@ -XXX,XX +XXX,XX @@ static void qemu_start_incoming_migration(const char *uri, Error **errp) { const char *p = NULL; - migrate_protocol_allow_multi_channels(false); /* reset it anyway */ + /* URI is not suitable for migration? */ + if (!migration_uri_validate(uri, errp)) { + return; + } + qapi_event_send_migration(MIGRATION_STATUS_SETUP); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { - migrate_protocol_allow_multi_channels(true); socket_start_incoming_migration(p ? p : uri, errp); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -XXX,XX +XXX,XX @@ void migration_fd_process_incoming(QEMUFile *f, Error **errp) migration_incoming_process(); } -static bool migration_needs_multiple_sockets(void) -{ - return migrate_use_multifd() || migrate_postcopy_preempt(); -} - void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -XXX,XX +XXX,XX @@ static bool migrate_caps_check(bool *cap_list, } #endif - - /* incoming side only */ - if (runstate_check(RUN_STATE_INMIGRATE) && - !migrate_multi_channels_is_allowed() && - cap_list[MIGRATION_CAPABILITY_MULTIFD]) { - error_setg(errp, "multifd is not supported by current protocol"); - return false; - } - if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) { if (!cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { error_setg(errp, "Postcopy preempt requires postcopy-ram"); @@ -XXX,XX +XXX,XX @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, MigrationState *s = migrate_get_current(); const char *p = NULL; + /* URI is not suitable for migration? */ + if (!migration_uri_validate(uri, errp)) { + return; + } + if (!migrate_prepare(s, has_blk && blk, has_inc && inc, has_resume && resume, errp)) { /* Error detected, put into errp */ @@ -XXX,XX +XXX,XX @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, } } - migrate_protocol_allow_multi_channels(false); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { - migrate_protocol_allow_multi_channels(true); socket_start_outgoing_migration(s, p ? p : uri, &local_err); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -XXX,XX +XXX,XX @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } /* This needs to be done before resuming a postcopy */ - if (postcopy_preempt_setup(s, &local_err)) { - error_report_err(local_err); - migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, - MIGRATION_STATUS_FAILED); - migrate_fd_cleanup(s); - return; + if (migrate_postcopy_preempt()) { + postcopy_preempt_setup(s); } if (resume) { diff --git a/migration/migration.h b/migration/migration.h index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -XXX,XX +XXX,XX @@ void migration_cancel(const Error *error); void populate_vfio_info(MigrationInfo *info); void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page); -bool migrate_multi_channels_is_allowed(void); -void migrate_protocol_allow_multi_channels(bool allow); - #endif diff --git a/migration/multifd.c b/migration/multifd.c index XXXXXXX..XXXXXXX 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -XXX,XX +XXX,XX @@ void multifd_save_cleanup(void) { int i; - if (!migrate_use_multifd() || !migrate_multi_channels_is_allowed()) { + if (!migrate_use_multifd()) { return; } multifd_send_terminate_threads(NULL); @@ -XXX,XX +XXX,XX @@ int multifd_save_setup(Error **errp) if (!migrate_use_multifd()) { return 0; } - if (!migrate_multi_channels_is_allowed()) { - error_setg(errp, "multifd is not supported by current protocol"); - return -1; - } thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); @@ -XXX,XX +XXX,XX @@ int multifd_load_cleanup(Error **errp) { int i; - if (!migrate_use_multifd() || !migrate_multi_channels_is_allowed()) { + if (!migrate_use_multifd()) { return 0; } multifd_recv_terminate_threads(NULL); @@ -XXX,XX +XXX,XX @@ int multifd_load_setup(Error **errp) if (!migrate_use_multifd()) { return 0; } - if (!migrate_multi_channels_is_allowed()) { - error_setg(errp, "multifd is not supported by current protocol"); - return -1; - } thread_count = migrate_multifd_channels(); multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -XXX,XX +XXX,XX @@ int postcopy_preempt_wait_channel(MigrationState *s) return s->postcopy_qemufile_src ? 0 : -1; } -int postcopy_preempt_setup(MigrationState *s, Error **errp) +void postcopy_preempt_setup(MigrationState *s) { - if (!migrate_postcopy_preempt()) { - return 0; - } - - if (!migrate_multi_channels_is_allowed()) { - error_setg(errp, "Postcopy preempt is not supported as current " - "migration stream does not support multi-channels."); - return -1; - } - /* Kick an async task to connect */ socket_send_channel_create(postcopy_preempt_send_channel_new, s); - - return 0; } static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -XXX,XX +XXX,XX @@ enum PostcopyChannels { }; void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); -int postcopy_preempt_setup(MigrationState *s, Error **errp); +void postcopy_preempt_setup(MigrationState *s); int postcopy_preempt_wait_channel(MigrationState *s); #endif -- 2.37.3
This is mostly useless, but useful for us to know whether the main channel is correctly established without changing the migration protocol. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 3 +++ migration/migration.h | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ retry: case MIG_RP_MSG_PONG: tmp32 = ldl_be_p(buf); trace_source_return_path_thread_pong(tmp32); + qemu_sem_post(&ms->rp_state.rp_pong_acks); break; case MIG_RP_MSG_REQ_PAGES: @@ -XXX,XX +XXX,XX @@ static void migration_instance_finalize(Object *obj) qemu_sem_destroy(&ms->postcopy_pause_sem); qemu_sem_destroy(&ms->postcopy_pause_rp_sem); qemu_sem_destroy(&ms->rp_state.rp_sem); + qemu_sem_destroy(&ms->rp_state.rp_pong_acks); qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); error_free(ms->error); } @@ -XXX,XX +XXX,XX @@ static void migration_instance_init(Object *obj) qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); qemu_sem_init(&ms->rp_state.rp_sem, 0); + qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); qemu_sem_init(&ms->rate_limit_sem, 0); qemu_sem_init(&ms->wait_unplug_sem, 0); qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); diff --git a/migration/migration.h b/migration/migration.h index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -XXX,XX +XXX,XX @@ struct MigrationState { */ bool rp_thread_created; QemuSemaphore rp_sem; + /* + * We post to this when we got one PONG from dest. So far it's an + * easy way to know the main channel has successfully established + * on dest QEMU. + */ + QemuSemaphore rp_pong_acks; } rp_state; double mbps; -- 2.37.3
Postcopy with preempt-mode enabled needs two channels to communicate. The order of channel establishment is not guaranteed. It can happen that the dest QEMU got the preempt channel connection request before the main channel is established, then the migration may make no progress even during precopy due to the wrong order. To fix it, create the preempt channel only if we know the main channel is established. For a general postcopy migration, we delay it until postcopy_start(), that's where we already went through some part of precopy on the main channel. To make sure dest QEMU has already established the channel, we wait until we got the first PONG received. That's something we do at the start of precopy when postcopy enabled so it's guaranteed to happen sooner or later. For a postcopy recovery, we delay it to qemu_savevm_state_resume_prepare() where we'll have round trips of data on bitmap synchronizations, which means the main channel must have been established. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 72 ++++++++++++++++++++++++++++++---------- migration/migration.h | 6 ++++ migration/postcopy-ram.c | 17 ++++++++-- migration/postcopy-ram.h | 2 +- migration/savevm.c | 6 +++- 5 files changed, 82 insertions(+), 21 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ void migration_object_init(void) qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); + qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); + qemu_mutex_init(¤t_incoming->page_request_mutex); current_incoming->page_requested = g_tree_new(page_request_addr_cmp); @@ -XXX,XX +XXX,XX @@ void migration_fd_process_incoming(QEMUFile *f, Error **errp) migration_incoming_process(); } +/* + * Returns true when we want to start a new incoming migration process, + * false otherwise. + */ +static bool migration_should_start_incoming(bool main_channel) +{ + /* Multifd doesn't start unless all channels are established */ + if (migrate_use_multifd()) { + return migration_has_all_channels(); + } + + /* Preempt channel only starts when the main channel is created */ + if (migrate_postcopy_preempt()) { + return main_channel; + } + + /* + * For all the rest types of migration, we should only reach here when + * it's the main channel that's being created, and we should always + * proceed with this channel. + */ + assert(main_channel); + return true; +} + void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -XXX,XX +XXX,XX @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) } } - if (migration_has_all_channels()) { + if (migration_should_start_incoming(default_channel)) { /* If it's a recovery, we're done */ if (postcopy_try_recover()) { return; @@ -XXX,XX +XXX,XX @@ static int await_return_path_close_on_source(MigrationState *ms) return ms->rp_state.error; } +static inline void +migration_wait_main_channel(MigrationState *ms) +{ + /* Wait until one PONG message received */ + qemu_sem_wait(&ms->rp_state.rp_pong_acks); +} + /* * Switch from normal iteration to postcopy * Returns non-0 on error @@ -XXX,XX +XXX,XX @@ static int postcopy_start(MigrationState *ms) bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; - if (postcopy_preempt_wait_channel(ms)) { - migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); - return -1; + if (migrate_postcopy_preempt()) { + migration_wait_main_channel(ms); + if (postcopy_preempt_establish_channel(ms)) { + migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + return -1; + } } if (!migrate_pause_before_switchover()) { @@ -XXX,XX +XXX,XX @@ static int postcopy_do_resume(MigrationState *s) return ret; } + /* + * If preempt is enabled, re-establish the preempt channel. Note that + * we do it after resume prepare to make sure the main channel will be + * created before the preempt channel. E.g. with weak network, the + * dest QEMU may get messed up with the preempt and main channels on + * the order of connection setup. This guarantees the correct order. + */ + ret = postcopy_preempt_establish_channel(s); + if (ret) { + error_report("%s: postcopy_preempt_establish_channel(): %d", + __func__, ret); + return ret; + } + /* * Last handshake with destination on the resume (destination will * switch to postcopy-active afterwards) @@ -XXX,XX +XXX,XX @@ static MigThrError postcopy_pause(MigrationState *s) if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { /* Woken up by a recover procedure. Give it a shot */ - if (postcopy_preempt_wait_channel(s)) { - /* - * Preempt enabled, and new channel create failed; loop - * back to wait for another recovery. - */ - continue; - } - /* * Firstly, let's wake up the return path now, with a new * return path channel. @@ -XXX,XX +XXX,XX @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } - /* This needs to be done before resuming a postcopy */ - if (migrate_postcopy_preempt()) { - postcopy_preempt_setup(s); - } - if (resume) { /* Wakeup the main migration thread to do the recovery */ migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, diff --git a/migration/migration.h b/migration/migration.h index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -XXX,XX +XXX,XX @@ struct MigrationIncomingState { unsigned int postcopy_channels; /* QEMUFile for postcopy only; it'll be handled by a separate thread */ QEMUFile *postcopy_qemufile_dst; + /* + * When postcopy_qemufile_dst is properly setup, this sem is posted. + * One can wait on this semaphore to wait until the preempt channel is + * properly setup. + */ + QemuSemaphore postcopy_qemufile_dst_done; /* Postcopy priority thread is used to receive postcopy requested pages */ QemuThread postcopy_prio_thread; bool postcopy_prio_thread_created; diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -XXX,XX +XXX,XX @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) } if (migrate_postcopy_preempt()) { + /* + * The preempt channel is established in asynchronous way. Wait + * for its completion. + */ + qemu_sem_wait(&mis->postcopy_qemufile_dst_done); /* * This thread needs to be created after the temp pages because * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. @@ -XXX,XX +XXX,XX @@ void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) */ qemu_file_set_blocking(file, true); mis->postcopy_qemufile_dst = file; + qemu_sem_post(&mis->postcopy_qemufile_dst_done); trace_postcopy_preempt_new_channel(); } @@ -XXX,XX +XXX,XX @@ out: postcopy_preempt_send_channel_done(s, ioc, local_err); } -/* Returns 0 if channel established, -1 for error. */ -int postcopy_preempt_wait_channel(MigrationState *s) +/* + * This function will kick off an async task to establish the preempt + * channel, and wait until the connection setup completed. Returns 0 if + * channel established, -1 for error. + */ +int postcopy_preempt_establish_channel(MigrationState *s) { /* If preempt not enabled, no need to wait */ if (!migrate_postcopy_preempt()) { return 0; } + /* Kick off async task to establish preempt channel */ + postcopy_preempt_setup(s); + /* * We need the postcopy preempt channel to be established before * starting doing anything. diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -XXX,XX +XXX,XX @@ enum PostcopyChannels { void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); void postcopy_preempt_setup(MigrationState *s); -int postcopy_preempt_wait_channel(MigrationState *s); +int postcopy_preempt_establish_channel(MigrationState *s); #endif diff --git a/migration/savevm.c b/migration/savevm.c index XXXXXXX..XXXXXXX 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -XXX,XX +XXX,XX @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) qemu_sem_post(&mis->postcopy_pause_sem_fault); if (migrate_postcopy_preempt()) { - /* The channel should already be setup again; make sure of it */ + /* + * The preempt channel will be created in async manner, now let's + * wait for it and make sure it's created. + */ + qemu_sem_wait(&mis->postcopy_qemufile_dst_done); assert(mis->postcopy_qemufile_dst); /* Kick the fast ram load thread too */ qemu_sem_post(&mis->postcopy_pause_sem_fast_load); -- 2.37.3
v3 - Fix indent for uri_supports_multi_channels() [Juan] - Rename migration_uri_validate() to migration_channels_and_uri_compatible() [Juan] - Separate the changes to migrate_postcopy_preempt() into separate patch [Juan] I can trigger disordered connections with preempt mode postcopy (1 out of a few attemps), which can cause migration to hang during precopy phase, if e.g. I set the NIC packet loss rate to 50%. The last patch fixes the real ordering issue. For each of the patch, please refer to the commit message and comments in-code. Any comment welcomed, thanks. Peter Xu (4): migration: Rework multi-channel checks on URI migration: Cleanup postcopy_preempt_setup() migration: Add a semaphore to count PONGs migration: Postpone postcopy preempt channel to be after main migration/migration.c | 122 ++++++++++++++++++++++++++------------- migration/migration.h | 15 ++++- migration/multifd.c | 12 +--- migration/postcopy-ram.c | 31 +++++----- migration/postcopy-ram.h | 4 +- migration/savevm.c | 6 +- 6 files changed, 118 insertions(+), 72 deletions(-) -- 2.37.3
The whole idea of multi-channel checks was not properly done, IMHO. Currently we check multi-channel in a lot of places, but actually that's not needed because we only need to check it right after we get the URI and that should be it. If the URI check succeeded, we should never need to check it again because we must have it. If it check fails, we should fail immediately on either the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after the connection established. Neither should we fail any set capabiliities like what we used to do here: 5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19) Because logically the URI will only be set later after the capability is set, so it doesn't make a lot of sense to check the URI type when setting the capability, because we're checking the cap with an old URI passed in, and that may not even be the URI we're going to use later. This patch mostly reverted all such checks for before, dropping the variable migrate_allow_multi_channels and helpers. Instead, add a common helper to check URI for multi-channels for either qmp_migrate and qmp_migrate_incoming and that should do all the proper checks. The failure will only trigger with the "migrate" or "migrate_incoming" command, or when user specified "-incoming xxx" where "xxx" is not "defer". Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 49 +++++++++++++++++++++------------------- migration/migration.h | 3 --- migration/multifd.c | 12 ++-------- migration/postcopy-ram.c | 6 ----- 4 files changed, 28 insertions(+), 42 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ static int migration_maybe_pause(MigrationState *s, int new_state); static void migrate_fd_cancel(MigrationState *s); -static bool migrate_allow_multi_channels = true; +static bool migration_needs_multiple_sockets(void) +{ + return migrate_use_multifd() || migrate_postcopy_preempt(); +} -void migrate_protocol_allow_multi_channels(bool allow) +static bool uri_supports_multi_channels(const char *uri) { - migrate_allow_multi_channels = allow; + return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) || + strstart(uri, "vsock:", NULL); } -bool migrate_multi_channels_is_allowed(void) +static bool +migration_channels_and_uri_compatible(const char *uri, Error **errp) { - return migrate_allow_multi_channels; + if (migration_needs_multiple_sockets() && + !uri_supports_multi_channels(uri)) { + error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); + return false; + } + + return true; } static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) @@ -XXX,XX +XXX,XX @@ static void qemu_start_incoming_migration(const char *uri, Error **errp) { const char *p = NULL; - migrate_protocol_allow_multi_channels(false); /* reset it anyway */ + /* URI is not suitable for migration? */ + if (!migration_channels_and_uri_compatible(uri, errp)) { + return; + } + qapi_event_send_migration(MIGRATION_STATUS_SETUP); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { - migrate_protocol_allow_multi_channels(true); socket_start_incoming_migration(p ? p : uri, errp); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -XXX,XX +XXX,XX @@ void migration_fd_process_incoming(QEMUFile *f, Error **errp) migration_incoming_process(); } -static bool migration_needs_multiple_sockets(void) -{ - return migrate_use_multifd() || migrate_postcopy_preempt(); -} - void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -XXX,XX +XXX,XX @@ static bool migrate_caps_check(bool *cap_list, } #endif - - /* incoming side only */ - if (runstate_check(RUN_STATE_INMIGRATE) && - !migrate_multi_channels_is_allowed() && - cap_list[MIGRATION_CAPABILITY_MULTIFD]) { - error_setg(errp, "multifd is not supported by current protocol"); - return false; - } - if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) { if (!cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { error_setg(errp, "Postcopy preempt requires postcopy-ram"); @@ -XXX,XX +XXX,XX @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, MigrationState *s = migrate_get_current(); const char *p = NULL; + /* URI is not suitable for migration? */ + if (!migration_channels_and_uri_compatible(uri, errp)) { + return; + } + if (!migrate_prepare(s, has_blk && blk, has_inc && inc, has_resume && resume, errp)) { /* Error detected, put into errp */ @@ -XXX,XX +XXX,XX @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, } } - migrate_protocol_allow_multi_channels(false); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { - migrate_protocol_allow_multi_channels(true); socket_start_outgoing_migration(s, p ? p : uri, &local_err); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { diff --git a/migration/migration.h b/migration/migration.h index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -XXX,XX +XXX,XX @@ void migration_cancel(const Error *error); void populate_vfio_info(MigrationInfo *info); void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page); -bool migrate_multi_channels_is_allowed(void); -void migrate_protocol_allow_multi_channels(bool allow); - #endif diff --git a/migration/multifd.c b/migration/multifd.c index XXXXXXX..XXXXXXX 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -XXX,XX +XXX,XX @@ void multifd_save_cleanup(void) { int i; - if (!migrate_use_multifd() || !migrate_multi_channels_is_allowed()) { + if (!migrate_use_multifd()) { return; } multifd_send_terminate_threads(NULL); @@ -XXX,XX +XXX,XX @@ int multifd_save_setup(Error **errp) if (!migrate_use_multifd()) { return 0; } - if (!migrate_multi_channels_is_allowed()) { - error_setg(errp, "multifd is not supported by current protocol"); - return -1; - } thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); @@ -XXX,XX +XXX,XX @@ int multifd_load_cleanup(Error **errp) { int i; - if (!migrate_use_multifd() || !migrate_multi_channels_is_allowed()) { + if (!migrate_use_multifd()) { return 0; } multifd_recv_terminate_threads(NULL); @@ -XXX,XX +XXX,XX @@ int multifd_load_setup(Error **errp) return 0; } - if (!migrate_multi_channels_is_allowed()) { - error_setg(errp, "multifd is not supported by current protocol"); - return -1; - } thread_count = migrate_multifd_channels(); multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -XXX,XX +XXX,XX @@ int postcopy_preempt_setup(MigrationState *s, Error **errp) return 0; } - if (!migrate_multi_channels_is_allowed()) { - error_setg(errp, "Postcopy preempt is not supported as current " - "migration stream does not support multi-channels."); - return -1; - } - /* Kick an async task to connect */ socket_send_channel_create(postcopy_preempt_send_channel_new, s); -- 2.37.3
Since we just dropped the only case where postcopy_preempt_setup() can return an error, it doesn't need a retval anymore because it never fails. Move the preempt check to the caller, preparing it to be used elsewhere to do nothing but as simple as kicking the async connection. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 8 ++------ migration/postcopy-ram.c | 8 +------- migration/postcopy-ram.h | 2 +- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } /* This needs to be done before resuming a postcopy */ - if (postcopy_preempt_setup(s, &local_err)) { - error_report_err(local_err); - migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, - MIGRATION_STATUS_FAILED); - migrate_fd_cleanup(s); - return; + if (migrate_postcopy_preempt()) { + postcopy_preempt_setup(s); } if (resume) { diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -XXX,XX +XXX,XX @@ int postcopy_preempt_wait_channel(MigrationState *s) return s->postcopy_qemufile_src ? 0 : -1; } -int postcopy_preempt_setup(MigrationState *s, Error **errp) +void postcopy_preempt_setup(MigrationState *s) { - if (!migrate_postcopy_preempt()) { - return 0; - } - /* Kick an async task to connect */ socket_send_channel_create(postcopy_preempt_send_channel_new, s); - - return 0; } static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -XXX,XX +XXX,XX @@ enum PostcopyChannels { }; void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); -int postcopy_preempt_setup(MigrationState *s, Error **errp); +void postcopy_preempt_setup(MigrationState *s); int postcopy_preempt_wait_channel(MigrationState *s); #endif -- 2.37.3
This is mostly useless, but useful for us to know whether the main channel is correctly established without changing the migration protocol. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 3 +++ migration/migration.h | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ retry: case MIG_RP_MSG_PONG: tmp32 = ldl_be_p(buf); trace_source_return_path_thread_pong(tmp32); + qemu_sem_post(&ms->rp_state.rp_pong_acks); break; case MIG_RP_MSG_REQ_PAGES: @@ -XXX,XX +XXX,XX @@ static void migration_instance_finalize(Object *obj) qemu_sem_destroy(&ms->postcopy_pause_sem); qemu_sem_destroy(&ms->postcopy_pause_rp_sem); qemu_sem_destroy(&ms->rp_state.rp_sem); + qemu_sem_destroy(&ms->rp_state.rp_pong_acks); qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); error_free(ms->error); } @@ -XXX,XX +XXX,XX @@ static void migration_instance_init(Object *obj) qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); qemu_sem_init(&ms->rp_state.rp_sem, 0); + qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); qemu_sem_init(&ms->rate_limit_sem, 0); qemu_sem_init(&ms->wait_unplug_sem, 0); qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); diff --git a/migration/migration.h b/migration/migration.h index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -XXX,XX +XXX,XX @@ struct MigrationState { */ bool rp_thread_created; QemuSemaphore rp_sem; + /* + * We post to this when we got one PONG from dest. So far it's an + * easy way to know the main channel has successfully established + * on dest QEMU. + */ + QemuSemaphore rp_pong_acks; } rp_state; double mbps; -- 2.37.3
Postcopy with preempt-mode enabled needs two channels to communicate. The order of channel establishment is not guaranteed. It can happen that the dest QEMU got the preempt channel connection request before the main channel is established, then the migration may make no progress even during precopy due to the wrong order. To fix it, create the preempt channel only if we know the main channel is established. For a general postcopy migration, we delay it until postcopy_start(), that's where we already went through some part of precopy on the main channel. To make sure dest QEMU has already established the channel, we wait until we got the first PONG received. That's something we do at the start of precopy when postcopy enabled so it's guaranteed to happen sooner or later. For a postcopy recovery, we delay it to qemu_savevm_state_resume_prepare() where we'll have round trips of data on bitmap synchronizations, which means the main channel must have been established. Signed-off-by: Peter Xu <peterx@redhat.com> --- migration/migration.c | 72 ++++++++++++++++++++++++++++++---------- migration/migration.h | 6 ++++ migration/postcopy-ram.c | 17 ++++++++-- migration/postcopy-ram.h | 2 +- migration/savevm.c | 6 +++- 5 files changed, 82 insertions(+), 21 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -XXX,XX +XXX,XX @@ void migration_object_init(void) qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); + qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); + qemu_mutex_init(¤t_incoming->page_request_mutex); current_incoming->page_requested = g_tree_new(page_request_addr_cmp); @@ -XXX,XX +XXX,XX @@ void migration_fd_process_incoming(QEMUFile *f, Error **errp) migration_incoming_process(); } +/* + * Returns true when we want to start a new incoming migration process, + * false otherwise. + */ +static bool migration_should_start_incoming(bool main_channel) +{ + /* Multifd doesn't start unless all channels are established */ + if (migrate_use_multifd()) { + return migration_has_all_channels(); + } + + /* Preempt channel only starts when the main channel is created */ + if (migrate_postcopy_preempt()) { + return main_channel; + } + + /* + * For all the rest types of migration, we should only reach here when + * it's the main channel that's being created, and we should always + * proceed with this channel. + */ + assert(main_channel); + return true; +} + void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -XXX,XX +XXX,XX @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) } } - if (migration_has_all_channels()) { + if (migration_should_start_incoming(default_channel)) { /* If it's a recovery, we're done */ if (postcopy_try_recover()) { return; @@ -XXX,XX +XXX,XX @@ static int await_return_path_close_on_source(MigrationState *ms) return ms->rp_state.error; } +static inline void +migration_wait_main_channel(MigrationState *ms) +{ + /* Wait until one PONG message received */ + qemu_sem_wait(&ms->rp_state.rp_pong_acks); +} + /* * Switch from normal iteration to postcopy * Returns non-0 on error @@ -XXX,XX +XXX,XX @@ static int postcopy_start(MigrationState *ms) bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; - if (postcopy_preempt_wait_channel(ms)) { - migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); - return -1; + if (migrate_postcopy_preempt()) { + migration_wait_main_channel(ms); + if (postcopy_preempt_establish_channel(ms)) { + migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + return -1; + } } if (!migrate_pause_before_switchover()) { @@ -XXX,XX +XXX,XX @@ static int postcopy_do_resume(MigrationState *s) return ret; } + /* + * If preempt is enabled, re-establish the preempt channel. Note that + * we do it after resume prepare to make sure the main channel will be + * created before the preempt channel. E.g. with weak network, the + * dest QEMU may get messed up with the preempt and main channels on + * the order of connection setup. This guarantees the correct order. + */ + ret = postcopy_preempt_establish_channel(s); + if (ret) { + error_report("%s: postcopy_preempt_establish_channel(): %d", + __func__, ret); + return ret; + } + /* * Last handshake with destination on the resume (destination will * switch to postcopy-active afterwards) @@ -XXX,XX +XXX,XX @@ static MigThrError postcopy_pause(MigrationState *s) if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { /* Woken up by a recover procedure. Give it a shot */ - if (postcopy_preempt_wait_channel(s)) { - /* - * Preempt enabled, and new channel create failed; loop - * back to wait for another recovery. - */ - continue; - } - /* * Firstly, let's wake up the return path now, with a new * return path channel. @@ -XXX,XX +XXX,XX @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } - /* This needs to be done before resuming a postcopy */ - if (migrate_postcopy_preempt()) { - postcopy_preempt_setup(s); - } - if (resume) { /* Wakeup the main migration thread to do the recovery */ migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, diff --git a/migration/migration.h b/migration/migration.h index XXXXXXX..XXXXXXX 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -XXX,XX +XXX,XX @@ struct MigrationIncomingState { unsigned int postcopy_channels; /* QEMUFile for postcopy only; it'll be handled by a separate thread */ QEMUFile *postcopy_qemufile_dst; + /* + * When postcopy_qemufile_dst is properly setup, this sem is posted. + * One can wait on this semaphore to wait until the preempt channel is + * properly setup. + */ + QemuSemaphore postcopy_qemufile_dst_done; /* Postcopy priority thread is used to receive postcopy requested pages */ QemuThread postcopy_prio_thread; bool postcopy_prio_thread_created; diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -XXX,XX +XXX,XX @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) } if (migrate_postcopy_preempt()) { + /* + * The preempt channel is established in asynchronous way. Wait + * for its completion. + */ + qemu_sem_wait(&mis->postcopy_qemufile_dst_done); /* * This thread needs to be created after the temp pages because * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. @@ -XXX,XX +XXX,XX @@ void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) */ qemu_file_set_blocking(file, true); mis->postcopy_qemufile_dst = file; + qemu_sem_post(&mis->postcopy_qemufile_dst_done); trace_postcopy_preempt_new_channel(); } @@ -XXX,XX +XXX,XX @@ out: postcopy_preempt_send_channel_done(s, ioc, local_err); } -/* Returns 0 if channel established, -1 for error. */ -int postcopy_preempt_wait_channel(MigrationState *s) +/* + * This function will kick off an async task to establish the preempt + * channel, and wait until the connection setup completed. Returns 0 if + * channel established, -1 for error. + */ +int postcopy_preempt_establish_channel(MigrationState *s) { /* If preempt not enabled, no need to wait */ if (!migrate_postcopy_preempt()) { return 0; } + /* Kick off async task to establish preempt channel */ + postcopy_preempt_setup(s); + /* * We need the postcopy preempt channel to be established before * starting doing anything. diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index XXXXXXX..XXXXXXX 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -XXX,XX +XXX,XX @@ enum PostcopyChannels { void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); void postcopy_preempt_setup(MigrationState *s); -int postcopy_preempt_wait_channel(MigrationState *s); +int postcopy_preempt_establish_channel(MigrationState *s); #endif diff --git a/migration/savevm.c b/migration/savevm.c index XXXXXXX..XXXXXXX 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -XXX,XX +XXX,XX @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) qemu_sem_post(&mis->postcopy_pause_sem_fault); if (migrate_postcopy_preempt()) { - /* The channel should already be setup again; make sure of it */ + /* + * The preempt channel will be created in async manner, now let's + * wait for it and make sure it's created. + */ + qemu_sem_wait(&mis->postcopy_qemufile_dst_done); assert(mis->postcopy_qemufile_dst); /* Kick the fast ram load thread too */ qemu_sem_post(&mis->postcopy_pause_sem_fast_load); -- 2.37.3