Now, when soft-cancelling READY mirror handled in qmp_block_job_cancel(), no other functions need to care of it: cancel is always force.
So drop unused code paths. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> --- include/qemu/job.h | 14 ++++---------- block/backup.c | 2 +- block/mirror.c | 13 ++++--------- blockdev.c | 4 ++-- job-qmp.c | 2 +- job.c | 18 ++++++++---------- tests/unit/test-blockjob-txn.c | 8 ++++---- 7 files changed, 24 insertions(+), 37 deletions(-) diff --git a/include/qemu/job.h b/include/qemu/job.h index 3dfb79cee6..0e30665fed 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -103,12 +103,6 @@ typedef struct Job { */ bool cancelled; - /** - * Set to true if the job should abort immediately without waiting - * for data to be in sync. - */ - bool force_cancel; - /** Set to true when the job has deferred work to the main loop. */ bool deferred_to_main_loop; @@ -254,7 +248,7 @@ struct JobDriver { /** * If the callback is not NULL, it will be invoked in job_cancel_async */ - void (*cancel)(Job *job, bool force); + void (*cancel)(Job *job); /** Called when the job is freed */ @@ -496,16 +490,16 @@ void job_complete(Job *job, Error **errp); void job_complete_ex(Job *job, bool do_graph_change, Error **errp); /** - * Asynchronously cancel the specified @job. If @force is true, the job should + * Asynchronously cancel the specified @job. * be cancelled immediately without waiting for a consistent state. */ -void job_cancel(Job *job, bool force); +void job_cancel(Job *job); /** * Cancels the specified job like job_cancel(), but may refuse to do so if the * operation isn't meaningful in the current state of the job. */ -void job_user_cancel(Job *job, bool force, Error **errp); +void job_user_cancel(Job *job, Error **errp); /** * Synchronously cancel the @job. The completion callback is called diff --git a/block/backup.c b/block/backup.c index bd3614ce70..6cf2f974aa 100644 --- a/block/backup.c +++ b/block/backup.c @@ -331,7 +331,7 @@ static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed) } } -static void backup_cancel(Job *job, bool force) +static void backup_cancel(Job *job) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); diff --git a/block/mirror.c b/block/mirror.c index ad9736eb5e..06a07baf46 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -1095,9 +1095,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) } trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); job_sleep_ns(&s->common.job, delay_ns); - if (job_is_cancelled(&s->common.job) && - (!s->synced || s->common.job.force_cancel)) - { + if (job_is_cancelled(&s->common.job)) { break; } s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); @@ -1109,8 +1107,7 @@ immediate_exit: * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ - assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && - job_is_cancelled(&s->common.job))); + assert(ret < 0 || job_is_cancelled(&s->common.job)); assert(need_drain); mirror_wait_for_all_io(s); } @@ -1197,14 +1194,12 @@ static bool mirror_drained_poll(BlockJob *job) return !!s->in_flight; } -static void mirror_cancel(Job *job, bool force) +static void mirror_cancel(Job *job) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockDriverState *target = blk_bs(s->target); - if (force || !job_is_ready(job)) { - bdrv_cancel_in_flight(target); - } + bdrv_cancel_in_flight(target); } static const BlockJobDriver mirror_job_driver = { diff --git a/blockdev.c b/blockdev.c index c4ee5f02f4..cc424a451f 100644 --- a/blockdev.c +++ b/blockdev.c @@ -150,7 +150,7 @@ void blockdev_mark_auto_del(BlockBackend *blk) AioContext *aio_context = job->job.aio_context; aio_context_acquire(aio_context); - job_cancel(&job->job, false); + job_cancel(&job->job); aio_context_release(aio_context); } @@ -3374,7 +3374,7 @@ void qmp_block_job_cancel(const char *device, */ job_complete_ex(&job->job, false, errp); } else { - job_user_cancel(&job->job, force, errp); + job_user_cancel(&job->job, errp); } out: aio_context_release(aio_context); diff --git a/job-qmp.c b/job-qmp.c index 829a28aa70..272837bd1f 100644 --- a/job-qmp.c +++ b/job-qmp.c @@ -58,7 +58,7 @@ void qmp_job_cancel(const char *id, Error **errp) } trace_qmp_job_cancel(job); - job_user_cancel(job, true, errp); + job_user_cancel(job, errp); aio_context_release(aio_context); } diff --git a/job.c b/job.c index 52127dd6bd..04437ce438 100644 --- a/job.c +++ b/job.c @@ -716,10 +716,10 @@ static int job_finalize_single(Job *job) return 0; } -static void job_cancel_async(Job *job, bool force) +static void job_cancel_async(Job *job) { if (job->driver->cancel) { - job->driver->cancel(job, force); + job->driver->cancel(job); } if (job->user_paused) { /* Do not call job_enter here, the caller will handle it. */ @@ -731,8 +731,6 @@ static void job_cancel_async(Job *job, bool force) job->pause_count--; } job->cancelled = true; - /* To prevent 'force == false' overriding a previous 'force == true' */ - job->force_cancel |= force; } static void job_completed_txn_abort(Job *job) @@ -763,7 +761,7 @@ static void job_completed_txn_abort(Job *job) if (other_job != job) { ctx = other_job->aio_context; aio_context_acquire(ctx); - job_cancel_async(other_job, false); + job_cancel_async(other_job); aio_context_release(ctx); } } @@ -932,13 +930,13 @@ void job_start(Job *job) aio_co_enter(job->aio_context, job->co); } -void job_cancel(Job *job, bool force) +void job_cancel(Job *job) { if (job->status == JOB_STATUS_CONCLUDED) { job_do_dismiss(job); return; } - job_cancel_async(job, force); + job_cancel_async(job); if (!job_started(job)) { job_completed(job); } else if (job->deferred_to_main_loop) { @@ -948,12 +946,12 @@ void job_cancel(Job *job, bool force) } } -void job_user_cancel(Job *job, bool force, Error **errp) +void job_user_cancel(Job *job, Error **errp) { if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) { return; } - job_cancel(job, force); + job_cancel(job); } /* A wrapper around job_cancel() taking an Error ** parameter so it may be @@ -961,7 +959,7 @@ void job_user_cancel(Job *job, bool force, Error **errp) * pointer casts there. */ static void job_cancel_err(Job *job, Error **errp) { - job_cancel(job, false); + job_cancel(job); } int job_cancel_sync(Job *job) diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c index 8bd13b9949..5a995c59c7 100644 --- a/tests/unit/test-blockjob-txn.c +++ b/tests/unit/test-blockjob-txn.c @@ -125,7 +125,7 @@ static void test_single_job(int expected) job_start(&job->job); if (expected == -ECANCELED) { - job_cancel(&job->job, false); + job_cancel(&job->job); } while (result == -EINPROGRESS) { @@ -171,10 +171,10 @@ static void test_pair_jobs(int expected1, int expected2) job_txn_unref(txn); if (expected1 == -ECANCELED) { - job_cancel(&job1->job, false); + job_cancel(&job1->job); } if (expected2 == -ECANCELED) { - job_cancel(&job2->job, false); + job_cancel(&job2->job); } while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { @@ -227,7 +227,7 @@ static void test_pair_jobs_fail_cancel_race(void) job_start(&job1->job); job_start(&job2->job); - job_cancel(&job1->job, false); + job_cancel(&job1->job); /* Now make job2 finish before the main loop kicks jobs. This simulates * the race between a pending kick and another job completing. -- 2.29.2