i386: Fix pkg_id offset for EPYC cpu models
[qemu.git] / job.c
1 /*
2 * Background jobs (long-running operations)
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012, 2018 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "qapi/error.h"
28 #include "qemu/job.h"
29 #include "qemu/id.h"
30 #include "qemu/main-loop.h"
31 #include "block/aio-wait.h"
32 #include "trace-root.h"
33 #include "qapi/qapi-events-job.h"
34
35 static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
36
37 /* Job State Transition Table */
38 bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = {
39 /* U, C, R, P, Y, S, W, D, X, E, N */
40 /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
41 /* C: */ [JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1},
42 /* R: */ [JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0},
43 /* P: */ [JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
44 /* Y: */ [JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0},
45 /* S: */ [JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
46 /* W: */ [JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0},
47 /* D: */ [JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
48 /* X: */ [JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
49 /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
50 /* N: */ [JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
51 };
52
53 bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = {
54 /* U, C, R, P, Y, S, W, D, X, E, N */
55 [JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
56 [JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
57 [JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
58 [JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
59 [JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
60 [JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
61 [JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
62 };
63
64 /* Transactional group of jobs */
65 struct JobTxn {
66
67 /* Is this txn being cancelled? */
68 bool aborting;
69
70 /* List of jobs */
71 QLIST_HEAD(, Job) jobs;
72
73 /* Reference count */
74 int refcnt;
75 };
76
77 /* Right now, this mutex is only needed to synchronize accesses to job->busy
78 * and job->sleep_timer, such as concurrent calls to job_do_yield and
79 * job_enter. */
80 static QemuMutex job_mutex;
81
82 static void job_lock(void)
83 {
84 qemu_mutex_lock(&job_mutex);
85 }
86
87 static void job_unlock(void)
88 {
89 qemu_mutex_unlock(&job_mutex);
90 }
91
92 static void __attribute__((__constructor__)) job_init(void)
93 {
94 qemu_mutex_init(&job_mutex);
95 }
96
97 JobTxn *job_txn_new(void)
98 {
99 JobTxn *txn = g_new0(JobTxn, 1);
100 QLIST_INIT(&txn->jobs);
101 txn->refcnt = 1;
102 return txn;
103 }
104
105 static void job_txn_ref(JobTxn *txn)
106 {
107 txn->refcnt++;
108 }
109
110 void job_txn_unref(JobTxn *txn)
111 {
112 if (txn && --txn->refcnt == 0) {
113 g_free(txn);
114 }
115 }
116
117 void job_txn_add_job(JobTxn *txn, Job *job)
118 {
119 if (!txn) {
120 return;
121 }
122
123 assert(!job->txn);
124 job->txn = txn;
125
126 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
127 job_txn_ref(txn);
128 }
129
130 static void job_txn_del_job(Job *job)
131 {
132 if (job->txn) {
133 QLIST_REMOVE(job, txn_list);
134 job_txn_unref(job->txn);
135 job->txn = NULL;
136 }
137 }
138
139 static int job_txn_apply(JobTxn *txn, int fn(Job *))
140 {
141 Job *job, *next;
142 int rc = 0;
143
144 QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
145 rc = fn(job);
146 if (rc) {
147 break;
148 }
149 }
150 return rc;
151 }
152
153 bool job_is_internal(Job *job)
154 {
155 return (job->id == NULL);
156 }
157
158 static void job_state_transition(Job *job, JobStatus s1)
159 {
160 JobStatus s0 = job->status;
161 assert(s1 >= 0 && s1 < JOB_STATUS__MAX);
162 trace_job_state_transition(job, job->ret,
163 JobSTT[s0][s1] ? "allowed" : "disallowed",
164 JobStatus_str(s0), JobStatus_str(s1));
165 assert(JobSTT[s0][s1]);
166 job->status = s1;
167
168 if (!job_is_internal(job) && s1 != s0) {
169 qapi_event_send_job_status_change(job->id, job->status);
170 }
171 }
172
173 int job_apply_verb(Job *job, JobVerb verb, Error **errp)
174 {
175 JobStatus s0 = job->status;
176 assert(verb >= 0 && verb < JOB_VERB__MAX);
177 trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb),
178 JobVerbTable[verb][s0] ? "allowed" : "prohibited");
179 if (JobVerbTable[verb][s0]) {
180 return 0;
181 }
182 error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'",
183 job->id, JobStatus_str(s0), JobVerb_str(verb));
184 return -EPERM;
185 }
186
187 JobType job_type(const Job *job)
188 {
189 return job->driver->job_type;
190 }
191
192 const char *job_type_str(const Job *job)
193 {
194 return JobType_str(job_type(job));
195 }
196
197 bool job_is_cancelled(Job *job)
198 {
199 return job->cancelled;
200 }
201
202 bool job_is_ready(Job *job)
203 {
204 switch (job->status) {
205 case JOB_STATUS_UNDEFINED:
206 case JOB_STATUS_CREATED:
207 case JOB_STATUS_RUNNING:
208 case JOB_STATUS_PAUSED:
209 case JOB_STATUS_WAITING:
210 case JOB_STATUS_PENDING:
211 case JOB_STATUS_ABORTING:
212 case JOB_STATUS_CONCLUDED:
213 case JOB_STATUS_NULL:
214 return false;
215 case JOB_STATUS_READY:
216 case JOB_STATUS_STANDBY:
217 return true;
218 default:
219 g_assert_not_reached();
220 }
221 return false;
222 }
223
224 bool job_is_completed(Job *job)
225 {
226 switch (job->status) {
227 case JOB_STATUS_UNDEFINED:
228 case JOB_STATUS_CREATED:
229 case JOB_STATUS_RUNNING:
230 case JOB_STATUS_PAUSED:
231 case JOB_STATUS_READY:
232 case JOB_STATUS_STANDBY:
233 return false;
234 case JOB_STATUS_WAITING:
235 case JOB_STATUS_PENDING:
236 case JOB_STATUS_ABORTING:
237 case JOB_STATUS_CONCLUDED:
238 case JOB_STATUS_NULL:
239 return true;
240 default:
241 g_assert_not_reached();
242 }
243 return false;
244 }
245
246 static bool job_started(Job *job)
247 {
248 return job->co;
249 }
250
251 static bool job_should_pause(Job *job)
252 {
253 return job->pause_count > 0;
254 }
255
256 Job *job_next(Job *job)
257 {
258 if (!job) {
259 return QLIST_FIRST(&jobs);
260 }
261 return QLIST_NEXT(job, job_list);
262 }
263
264 Job *job_get(const char *id)
265 {
266 Job *job;
267
268 QLIST_FOREACH(job, &jobs, job_list) {
269 if (job->id && !strcmp(id, job->id)) {
270 return job;
271 }
272 }
273
274 return NULL;
275 }
276
277 static void job_sleep_timer_cb(void *opaque)
278 {
279 Job *job = opaque;
280
281 job_enter(job);
282 }
283
284 void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
285 AioContext *ctx, int flags, BlockCompletionFunc *cb,
286 void *opaque, Error **errp)
287 {
288 Job *job;
289
290 if (job_id) {
291 if (flags & JOB_INTERNAL) {
292 error_setg(errp, "Cannot specify job ID for internal job");
293 return NULL;
294 }
295 if (!id_wellformed(job_id)) {
296 error_setg(errp, "Invalid job ID '%s'", job_id);
297 return NULL;
298 }
299 if (job_get(job_id)) {
300 error_setg(errp, "Job ID '%s' already in use", job_id);
301 return NULL;
302 }
303 } else if (!(flags & JOB_INTERNAL)) {
304 error_setg(errp, "An explicit job ID is required");
305 return NULL;
306 }
307
308 job = g_malloc0(driver->instance_size);
309 job->driver = driver;
310 job->id = g_strdup(job_id);
311 job->refcnt = 1;
312 job->aio_context = ctx;
313 job->busy = false;
314 job->paused = true;
315 job->pause_count = 1;
316 job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE);
317 job->auto_dismiss = !(flags & JOB_MANUAL_DISMISS);
318 job->cb = cb;
319 job->opaque = opaque;
320
321 notifier_list_init(&job->on_finalize_cancelled);
322 notifier_list_init(&job->on_finalize_completed);
323 notifier_list_init(&job->on_pending);
324 notifier_list_init(&job->on_ready);
325
326 job_state_transition(job, JOB_STATUS_CREATED);
327 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
328 QEMU_CLOCK_REALTIME, SCALE_NS,
329 job_sleep_timer_cb, job);
330
331 QLIST_INSERT_HEAD(&jobs, job, job_list);
332
333 /* Single jobs are modeled as single-job transactions for sake of
334 * consolidating the job management logic */
335 if (!txn) {
336 txn = job_txn_new();
337 job_txn_add_job(txn, job);
338 job_txn_unref(txn);
339 } else {
340 job_txn_add_job(txn, job);
341 }
342
343 return job;
344 }
345
346 void job_ref(Job *job)
347 {
348 ++job->refcnt;
349 }
350
351 void job_unref(Job *job)
352 {
353 if (--job->refcnt == 0) {
354 assert(job->status == JOB_STATUS_NULL);
355 assert(!timer_pending(&job->sleep_timer));
356 assert(!job->txn);
357
358 if (job->driver->free) {
359 job->driver->free(job);
360 }
361
362 QLIST_REMOVE(job, job_list);
363
364 error_free(job->err);
365 g_free(job->id);
366 g_free(job);
367 }
368 }
369
370 void job_progress_update(Job *job, uint64_t done)
371 {
372 progress_work_done(&job->progress, done);
373 }
374
375 void job_progress_set_remaining(Job *job, uint64_t remaining)
376 {
377 progress_set_remaining(&job->progress, remaining);
378 }
379
380 void job_progress_increase_remaining(Job *job, uint64_t delta)
381 {
382 progress_increase_remaining(&job->progress, delta);
383 }
384
385 void job_event_cancelled(Job *job)
386 {
387 notifier_list_notify(&job->on_finalize_cancelled, job);
388 }
389
390 void job_event_completed(Job *job)
391 {
392 notifier_list_notify(&job->on_finalize_completed, job);
393 }
394
395 static void job_event_pending(Job *job)
396 {
397 notifier_list_notify(&job->on_pending, job);
398 }
399
400 static void job_event_ready(Job *job)
401 {
402 notifier_list_notify(&job->on_ready, job);
403 }
404
405 static void job_event_idle(Job *job)
406 {
407 notifier_list_notify(&job->on_idle, job);
408 }
409
410 void job_enter_cond(Job *job, bool(*fn)(Job *job))
411 {
412 if (!job_started(job)) {
413 return;
414 }
415 if (job->deferred_to_main_loop) {
416 return;
417 }
418
419 job_lock();
420 if (job->busy) {
421 job_unlock();
422 return;
423 }
424
425 if (fn && !fn(job)) {
426 job_unlock();
427 return;
428 }
429
430 assert(!job->deferred_to_main_loop);
431 timer_del(&job->sleep_timer);
432 job->busy = true;
433 job_unlock();
434 aio_co_enter(job->aio_context, job->co);
435 }
436
437 void job_enter(Job *job)
438 {
439 job_enter_cond(job, NULL);
440 }
441
442 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
443 * Reentering the job coroutine with job_enter() before the timer has expired
444 * is allowed and cancels the timer.
445 *
446 * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be
447 * called explicitly. */
448 static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
449 {
450 job_lock();
451 if (ns != -1) {
452 timer_mod(&job->sleep_timer, ns);
453 }
454 job->busy = false;
455 job_event_idle(job);
456 job_unlock();
457 qemu_coroutine_yield();
458
459 /* Set by job_enter_cond() before re-entering the coroutine. */
460 assert(job->busy);
461 }
462
463 void coroutine_fn job_pause_point(Job *job)
464 {
465 assert(job && job_started(job));
466
467 if (!job_should_pause(job)) {
468 return;
469 }
470 if (job_is_cancelled(job)) {
471 return;
472 }
473
474 if (job->driver->pause) {
475 job->driver->pause(job);
476 }
477
478 if (job_should_pause(job) && !job_is_cancelled(job)) {
479 JobStatus status = job->status;
480 job_state_transition(job, status == JOB_STATUS_READY
481 ? JOB_STATUS_STANDBY
482 : JOB_STATUS_PAUSED);
483 job->paused = true;
484 job_do_yield(job, -1);
485 job->paused = false;
486 job_state_transition(job, status);
487 }
488
489 if (job->driver->resume) {
490 job->driver->resume(job);
491 }
492 }
493
494 void job_yield(Job *job)
495 {
496 assert(job->busy);
497
498 /* Check cancellation *before* setting busy = false, too! */
499 if (job_is_cancelled(job)) {
500 return;
501 }
502
503 if (!job_should_pause(job)) {
504 job_do_yield(job, -1);
505 }
506
507 job_pause_point(job);
508 }
509
510 void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
511 {
512 assert(job->busy);
513
514 /* Check cancellation *before* setting busy = false, too! */
515 if (job_is_cancelled(job)) {
516 return;
517 }
518
519 if (!job_should_pause(job)) {
520 job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
521 }
522
523 job_pause_point(job);
524 }
525
526 /* Assumes the block_job_mutex is held */
527 static bool job_timer_not_pending(Job *job)
528 {
529 return !timer_pending(&job->sleep_timer);
530 }
531
532 void job_pause(Job *job)
533 {
534 job->pause_count++;
535 }
536
537 void job_resume(Job *job)
538 {
539 assert(job->pause_count > 0);
540 job->pause_count--;
541 if (job->pause_count) {
542 return;
543 }
544
545 /* kick only if no timer is pending */
546 job_enter_cond(job, job_timer_not_pending);
547 }
548
549 void job_user_pause(Job *job, Error **errp)
550 {
551 if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) {
552 return;
553 }
554 if (job->user_paused) {
555 error_setg(errp, "Job is already paused");
556 return;
557 }
558 job->user_paused = true;
559 job_pause(job);
560 }
561
562 bool job_user_paused(Job *job)
563 {
564 return job->user_paused;
565 }
566
567 void job_user_resume(Job *job, Error **errp)
568 {
569 assert(job);
570 if (!job->user_paused || job->pause_count <= 0) {
571 error_setg(errp, "Can't resume a job that was not paused");
572 return;
573 }
574 if (job_apply_verb(job, JOB_VERB_RESUME, errp)) {
575 return;
576 }
577 if (job->driver->user_resume) {
578 job->driver->user_resume(job);
579 }
580 job->user_paused = false;
581 job_resume(job);
582 }
583
584 static void job_do_dismiss(Job *job)
585 {
586 assert(job);
587 job->busy = false;
588 job->paused = false;
589 job->deferred_to_main_loop = true;
590
591 job_txn_del_job(job);
592
593 job_state_transition(job, JOB_STATUS_NULL);
594 job_unref(job);
595 }
596
597 void job_dismiss(Job **jobptr, Error **errp)
598 {
599 Job *job = *jobptr;
600 /* similarly to _complete, this is QMP-interface only. */
601 assert(job->id);
602 if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) {
603 return;
604 }
605
606 job_do_dismiss(job);
607 *jobptr = NULL;
608 }
609
610 void job_early_fail(Job *job)
611 {
612 assert(job->status == JOB_STATUS_CREATED);
613 job_do_dismiss(job);
614 }
615
616 static void job_conclude(Job *job)
617 {
618 job_state_transition(job, JOB_STATUS_CONCLUDED);
619 if (job->auto_dismiss || !job_started(job)) {
620 job_do_dismiss(job);
621 }
622 }
623
624 static void job_update_rc(Job *job)
625 {
626 if (!job->ret && job_is_cancelled(job)) {
627 job->ret = -ECANCELED;
628 }
629 if (job->ret) {
630 if (!job->err) {
631 error_setg(&job->err, "%s", strerror(-job->ret));
632 }
633 job_state_transition(job, JOB_STATUS_ABORTING);
634 }
635 }
636
637 static void job_commit(Job *job)
638 {
639 assert(!job->ret);
640 if (job->driver->commit) {
641 job->driver->commit(job);
642 }
643 }
644
645 static void job_abort(Job *job)
646 {
647 assert(job->ret);
648 if (job->driver->abort) {
649 job->driver->abort(job);
650 }
651 }
652
653 static void job_clean(Job *job)
654 {
655 if (job->driver->clean) {
656 job->driver->clean(job);
657 }
658 }
659
660 static int job_finalize_single(Job *job)
661 {
662 assert(job_is_completed(job));
663
664 /* Ensure abort is called for late-transactional failures */
665 job_update_rc(job);
666
667 if (!job->ret) {
668 job_commit(job);
669 } else {
670 job_abort(job);
671 }
672 job_clean(job);
673
674 if (job->cb) {
675 job->cb(job->opaque, job->ret);
676 }
677
678 /* Emit events only if we actually started */
679 if (job_started(job)) {
680 if (job_is_cancelled(job)) {
681 job_event_cancelled(job);
682 } else {
683 job_event_completed(job);
684 }
685 }
686
687 job_txn_del_job(job);
688 job_conclude(job);
689 return 0;
690 }
691
692 static void job_cancel_async(Job *job, bool force)
693 {
694 if (job->user_paused) {
695 /* Do not call job_enter here, the caller will handle it. */
696 if (job->driver->user_resume) {
697 job->driver->user_resume(job);
698 }
699 job->user_paused = false;
700 assert(job->pause_count > 0);
701 job->pause_count--;
702 }
703 job->cancelled = true;
704 /* To prevent 'force == false' overriding a previous 'force == true' */
705 job->force_cancel |= force;
706 }
707
708 static void job_completed_txn_abort(Job *job)
709 {
710 AioContext *outer_ctx = job->aio_context;
711 AioContext *ctx;
712 JobTxn *txn = job->txn;
713 Job *other_job;
714
715 if (txn->aborting) {
716 /*
717 * We are cancelled by another job, which will handle everything.
718 */
719 return;
720 }
721 txn->aborting = true;
722 job_txn_ref(txn);
723
724 /* We can only hold the single job's AioContext lock while calling
725 * job_finalize_single() because the finalization callbacks can involve
726 * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */
727 aio_context_release(outer_ctx);
728
729 /* Other jobs are effectively cancelled by us, set the status for
730 * them; this job, however, may or may not be cancelled, depending
731 * on the caller, so leave it. */
732 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
733 if (other_job != job) {
734 ctx = other_job->aio_context;
735 aio_context_acquire(ctx);
736 job_cancel_async(other_job, false);
737 aio_context_release(ctx);
738 }
739 }
740 while (!QLIST_EMPTY(&txn->jobs)) {
741 other_job = QLIST_FIRST(&txn->jobs);
742 ctx = other_job->aio_context;
743 aio_context_acquire(ctx);
744 if (!job_is_completed(other_job)) {
745 assert(job_is_cancelled(other_job));
746 job_finish_sync(other_job, NULL, NULL);
747 }
748 job_finalize_single(other_job);
749 aio_context_release(ctx);
750 }
751
752 aio_context_acquire(outer_ctx);
753
754 job_txn_unref(txn);
755 }
756
757 static int job_prepare(Job *job)
758 {
759 if (job->ret == 0 && job->driver->prepare) {
760 job->ret = job->driver->prepare(job);
761 job_update_rc(job);
762 }
763 return job->ret;
764 }
765
766 static int job_needs_finalize(Job *job)
767 {
768 return !job->auto_finalize;
769 }
770
771 static void job_do_finalize(Job *job)
772 {
773 int rc;
774 assert(job && job->txn);
775
776 /* prepare the transaction to complete */
777 rc = job_txn_apply(job->txn, job_prepare);
778 if (rc) {
779 job_completed_txn_abort(job);
780 } else {
781 job_txn_apply(job->txn, job_finalize_single);
782 }
783 }
784
785 void job_finalize(Job *job, Error **errp)
786 {
787 assert(job && job->id);
788 if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) {
789 return;
790 }
791 job_do_finalize(job);
792 }
793
794 static int job_transition_to_pending(Job *job)
795 {
796 job_state_transition(job, JOB_STATUS_PENDING);
797 if (!job->auto_finalize) {
798 job_event_pending(job);
799 }
800 return 0;
801 }
802
803 void job_transition_to_ready(Job *job)
804 {
805 job_state_transition(job, JOB_STATUS_READY);
806 job_event_ready(job);
807 }
808
809 static void job_completed_txn_success(Job *job)
810 {
811 JobTxn *txn = job->txn;
812 Job *other_job;
813
814 job_state_transition(job, JOB_STATUS_WAITING);
815
816 /*
817 * Successful completion, see if there are other running jobs in this
818 * txn.
819 */
820 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
821 if (!job_is_completed(other_job)) {
822 return;
823 }
824 assert(other_job->ret == 0);
825 }
826
827 job_txn_apply(txn, job_transition_to_pending);
828
829 /* If no jobs need manual finalization, automatically do so */
830 if (job_txn_apply(txn, job_needs_finalize) == 0) {
831 job_do_finalize(job);
832 }
833 }
834
835 static void job_completed(Job *job)
836 {
837 assert(job && job->txn && !job_is_completed(job));
838
839 job_update_rc(job);
840 trace_job_completed(job, job->ret);
841 if (job->ret) {
842 job_completed_txn_abort(job);
843 } else {
844 job_completed_txn_success(job);
845 }
846 }
847
848 /** Useful only as a type shim for aio_bh_schedule_oneshot. */
849 static void job_exit(void *opaque)
850 {
851 Job *job = (Job *)opaque;
852 AioContext *ctx = job->aio_context;
853
854 aio_context_acquire(ctx);
855
856 /* This is a lie, we're not quiescent, but still doing the completion
857 * callbacks. However, completion callbacks tend to involve operations that
858 * drain block nodes, and if .drained_poll still returned true, we would
859 * deadlock. */
860 job->busy = false;
861 job_event_idle(job);
862
863 job_completed(job);
864
865 aio_context_release(ctx);
866 }
867
868 /**
869 * All jobs must allow a pause point before entering their job proper. This
870 * ensures that jobs can be paused prior to being started, then resumed later.
871 */
872 static void coroutine_fn job_co_entry(void *opaque)
873 {
874 Job *job = opaque;
875
876 assert(job && job->driver && job->driver->run);
877 job_pause_point(job);
878 job->ret = job->driver->run(job, &job->err);
879 job->deferred_to_main_loop = true;
880 job->busy = true;
881 aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
882 }
883
884 void job_start(Job *job)
885 {
886 assert(job && !job_started(job) && job->paused &&
887 job->driver && job->driver->run);
888 job->co = qemu_coroutine_create(job_co_entry, job);
889 job->pause_count--;
890 job->busy = true;
891 job->paused = false;
892 job_state_transition(job, JOB_STATUS_RUNNING);
893 aio_co_enter(job->aio_context, job->co);
894 }
895
896 void job_cancel(Job *job, bool force)
897 {
898 if (job->status == JOB_STATUS_CONCLUDED) {
899 job_do_dismiss(job);
900 return;
901 }
902 job_cancel_async(job, force);
903 if (!job_started(job)) {
904 job_completed(job);
905 } else if (job->deferred_to_main_loop) {
906 job_completed_txn_abort(job);
907 } else {
908 job_enter(job);
909 }
910 }
911
912 void job_user_cancel(Job *job, bool force, Error **errp)
913 {
914 if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) {
915 return;
916 }
917 job_cancel(job, force);
918 }
919
920 /* A wrapper around job_cancel() taking an Error ** parameter so it may be
921 * used with job_finish_sync() without the need for (rather nasty) function
922 * pointer casts there. */
923 static void job_cancel_err(Job *job, Error **errp)
924 {
925 job_cancel(job, false);
926 }
927
928 int job_cancel_sync(Job *job)
929 {
930 return job_finish_sync(job, &job_cancel_err, NULL);
931 }
932
933 void job_cancel_sync_all(void)
934 {
935 Job *job;
936 AioContext *aio_context;
937
938 while ((job = job_next(NULL))) {
939 aio_context = job->aio_context;
940 aio_context_acquire(aio_context);
941 job_cancel_sync(job);
942 aio_context_release(aio_context);
943 }
944 }
945
946 int job_complete_sync(Job *job, Error **errp)
947 {
948 return job_finish_sync(job, job_complete, errp);
949 }
950
951 void job_complete(Job *job, Error **errp)
952 {
953 /* Should not be reachable via external interface for internal jobs */
954 assert(job->id);
955 if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) {
956 return;
957 }
958 if (job->pause_count || job_is_cancelled(job) || !job->driver->complete) {
959 error_setg(errp, "The active block job '%s' cannot be completed",
960 job->id);
961 return;
962 }
963
964 job->driver->complete(job, errp);
965 }
966
967 int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
968 {
969 Error *local_err = NULL;
970 int ret;
971
972 job_ref(job);
973
974 if (finish) {
975 finish(job, &local_err);
976 }
977 if (local_err) {
978 error_propagate(errp, local_err);
979 job_unref(job);
980 return -EBUSY;
981 }
982
983 AIO_WAIT_WHILE(job->aio_context,
984 (job_enter(job), !job_is_completed(job)));
985
986 ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
987 job_unref(job);
988 return ret;
989 }