apic: move target-dependent definitions to cpu.h
[qemu.git] / include / migration / migration.h
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
16
17 #include "qapi/qmp/qdict.h"
18 #include "qemu-common.h"
19 #include "qemu/thread.h"
20 #include "qemu/notify.h"
21 #include "migration/vmstate.h"
22 #include "qapi-types.h"
23 #include "exec/cpu-common.h"
24
25 #define QEMU_VM_FILE_MAGIC 0x5145564d
26 #define QEMU_VM_FILE_VERSION_COMPAT 0x00000002
27 #define QEMU_VM_FILE_VERSION 0x00000003
28
29 #define QEMU_VM_EOF 0x00
30 #define QEMU_VM_SECTION_START 0x01
31 #define QEMU_VM_SECTION_PART 0x02
32 #define QEMU_VM_SECTION_END 0x03
33 #define QEMU_VM_SECTION_FULL 0x04
34 #define QEMU_VM_SUBSECTION 0x05
35 #define QEMU_VM_VMDESCRIPTION 0x06
36 #define QEMU_VM_CONFIGURATION 0x07
37 #define QEMU_VM_COMMAND 0x08
38 #define QEMU_VM_SECTION_FOOTER 0x7e
39
40 struct MigrationParams {
41 bool blk;
42 bool shared;
43 };
44
45 /* Messages sent on the return path from destination to source */
46 enum mig_rp_message_type {
47 MIG_RP_MSG_INVALID = 0, /* Must be 0 */
48 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */
49 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */
50
51 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
52 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */
53
54 MIG_RP_MSG_MAX
55 };
56
57 typedef QLIST_HEAD(, LoadStateEntry) LoadStateEntry_Head;
58
59 /* The current postcopy state is read/set by postcopy_state_get/set
60 * which update it atomically.
61 * The state is updated as postcopy messages are received, and
62 * in general only one thread should be writing to the state at any one
63 * time, initially the main thread and then the listen thread;
64 * Corner cases are where either thread finishes early and/or errors.
65 * The state is checked as messages are received to ensure that
66 * the source is sending us messages in the correct order.
67 * The state is also used by the RAM reception code to know if it
68 * has to place pages atomically, and the cleanup code at the end of
69 * the main thread to know if it has to delay cleanup until the end
70 * of postcopy.
71 */
72 typedef enum {
73 POSTCOPY_INCOMING_NONE = 0, /* Initial state - no postcopy */
74 POSTCOPY_INCOMING_ADVISE,
75 POSTCOPY_INCOMING_DISCARD,
76 POSTCOPY_INCOMING_LISTENING,
77 POSTCOPY_INCOMING_RUNNING,
78 POSTCOPY_INCOMING_END
79 } PostcopyState;
80
81 /* State for the incoming migration */
82 struct MigrationIncomingState {
83 QEMUFile *from_src_file;
84
85 /*
86 * Free at the start of the main state load, set as the main thread finishes
87 * loading state.
88 */
89 QemuEvent main_thread_load_event;
90
91 bool have_fault_thread;
92 QemuThread fault_thread;
93 QemuSemaphore fault_thread_sem;
94
95 bool have_listen_thread;
96 QemuThread listen_thread;
97 QemuSemaphore listen_thread_sem;
98
99 /* For the kernel to send us notifications */
100 int userfault_fd;
101 /* To tell the fault_thread to quit */
102 int userfault_quit_fd;
103 QEMUFile *to_src_file;
104 QemuMutex rp_mutex; /* We send replies from multiple threads */
105 void *postcopy_tmp_page;
106
107 QEMUBH *bh;
108
109 int state;
110 /* See savevm.c */
111 LoadStateEntry_Head loadvm_handlers;
112 };
113
114 MigrationIncomingState *migration_incoming_get_current(void);
115 MigrationIncomingState *migration_incoming_state_new(QEMUFile *f);
116 void migration_incoming_state_destroy(void);
117
118 /*
119 * An outstanding page request, on the source, having been received
120 * and queued
121 */
122 struct MigrationSrcPageRequest {
123 RAMBlock *rb;
124 hwaddr offset;
125 hwaddr len;
126
127 QSIMPLEQ_ENTRY(MigrationSrcPageRequest) next_req;
128 };
129
130 struct MigrationState
131 {
132 int64_t bandwidth_limit;
133 size_t bytes_xfer;
134 size_t xfer_limit;
135 QemuThread thread;
136 QEMUBH *cleanup_bh;
137 QEMUFile *to_dst_file;
138 int parameters[MIGRATION_PARAMETER__MAX];
139
140 int state;
141 MigrationParams params;
142
143 /* State related to return path */
144 struct {
145 QEMUFile *from_dst_file;
146 QemuThread rp_thread;
147 bool error;
148 } rp_state;
149
150 double mbps;
151 int64_t total_time;
152 int64_t downtime;
153 int64_t expected_downtime;
154 int64_t dirty_pages_rate;
155 int64_t dirty_bytes_rate;
156 bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
157 int64_t xbzrle_cache_size;
158 int64_t setup_time;
159 int64_t dirty_sync_count;
160
161 /* Flag set once the migration has been asked to enter postcopy */
162 bool start_postcopy;
163 /* Flag set after postcopy has sent the device state */
164 bool postcopy_after_devices;
165
166 /* Flag set once the migration thread is running (and needs joining) */
167 bool migration_thread_running;
168
169 /* Queue of outstanding page requests from the destination */
170 QemuMutex src_page_req_mutex;
171 QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
172 /* The RAMBlock used in the last src_page_request */
173 RAMBlock *last_req_rb;
174 };
175
176 void migrate_set_state(int *state, int old_state, int new_state);
177
178 void process_incoming_migration(QEMUFile *f);
179
180 void qemu_start_incoming_migration(const char *uri, Error **errp);
181
182 uint64_t migrate_max_downtime(void);
183
184 void exec_start_incoming_migration(const char *host_port, Error **errp);
185
186 void exec_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp);
187
188 void tcp_start_incoming_migration(const char *host_port, Error **errp);
189
190 void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp);
191
192 void unix_start_incoming_migration(const char *path, Error **errp);
193
194 void unix_start_outgoing_migration(MigrationState *s, const char *path, Error **errp);
195
196 void fd_start_incoming_migration(const char *path, Error **errp);
197
198 void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp);
199
200 void rdma_start_outgoing_migration(void *opaque, const char *host_port, Error **errp);
201
202 void rdma_start_incoming_migration(const char *host_port, Error **errp);
203
204 void migrate_fd_error(MigrationState *s);
205
206 void migrate_fd_connect(MigrationState *s);
207
208 int migrate_fd_close(MigrationState *s);
209
210 void add_migration_state_change_notifier(Notifier *notify);
211 void remove_migration_state_change_notifier(Notifier *notify);
212 MigrationState *migrate_init(const MigrationParams *params);
213 bool migration_in_setup(MigrationState *);
214 bool migration_has_finished(MigrationState *);
215 bool migration_has_failed(MigrationState *);
216 /* True if outgoing migration has entered postcopy phase */
217 bool migration_in_postcopy(MigrationState *);
218 /* ...and after the device transmission */
219 bool migration_in_postcopy_after_devices(MigrationState *);
220 MigrationState *migrate_get_current(void);
221
222 void migrate_compress_threads_create(void);
223 void migrate_compress_threads_join(void);
224 void migrate_decompress_threads_create(void);
225 void migrate_decompress_threads_join(void);
226 uint64_t ram_bytes_remaining(void);
227 uint64_t ram_bytes_transferred(void);
228 uint64_t ram_bytes_total(void);
229 void free_xbzrle_decoded_buf(void);
230
231 void acct_update_position(QEMUFile *f, size_t size, bool zero);
232
233 uint64_t dup_mig_bytes_transferred(void);
234 uint64_t dup_mig_pages_transferred(void);
235 uint64_t skipped_mig_bytes_transferred(void);
236 uint64_t skipped_mig_pages_transferred(void);
237 uint64_t norm_mig_bytes_transferred(void);
238 uint64_t norm_mig_pages_transferred(void);
239 uint64_t xbzrle_mig_bytes_transferred(void);
240 uint64_t xbzrle_mig_pages_transferred(void);
241 uint64_t xbzrle_mig_pages_overflow(void);
242 uint64_t xbzrle_mig_pages_cache_miss(void);
243 double xbzrle_mig_cache_miss_rate(void);
244
245 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
246 void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
247 /* For outgoing discard bitmap */
248 int ram_postcopy_send_discard_bitmap(MigrationState *ms);
249 /* For incoming postcopy discard */
250 int ram_discard_range(MigrationIncomingState *mis, const char *block_name,
251 uint64_t start, size_t length);
252 int ram_postcopy_incoming_init(MigrationIncomingState *mis);
253
254 /**
255 * @migrate_add_blocker - prevent migration from proceeding
256 *
257 * @reason - an error to be returned whenever migration is attempted
258 */
259 void migrate_add_blocker(Error *reason);
260
261 /**
262 * @migrate_del_blocker - remove a blocking error from migration
263 *
264 * @reason - the error blocking migration
265 */
266 void migrate_del_blocker(Error *reason);
267
268 bool migrate_postcopy_ram(void);
269 bool migrate_zero_blocks(void);
270
271 bool migrate_auto_converge(void);
272
273 int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
274 uint8_t *dst, int dlen);
275 int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen);
276
277 int migrate_use_xbzrle(void);
278 int64_t migrate_xbzrle_cache_size(void);
279
280 int64_t xbzrle_cache_resize(int64_t new_size);
281
282 bool migrate_use_compression(void);
283 int migrate_compress_level(void);
284 int migrate_compress_threads(void);
285 int migrate_decompress_threads(void);
286 bool migrate_use_events(void);
287
288 /* Sending on the return path - generic and then for each message type */
289 void migrate_send_rp_message(MigrationIncomingState *mis,
290 enum mig_rp_message_type message_type,
291 uint16_t len, void *data);
292 void migrate_send_rp_shut(MigrationIncomingState *mis,
293 uint32_t value);
294 void migrate_send_rp_pong(MigrationIncomingState *mis,
295 uint32_t value);
296 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
297 ram_addr_t start, size_t len);
298
299 void ram_control_before_iterate(QEMUFile *f, uint64_t flags);
300 void ram_control_after_iterate(QEMUFile *f, uint64_t flags);
301 void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data);
302
303 /* Whenever this is found in the data stream, the flags
304 * will be passed to ram_control_load_hook in the incoming-migration
305 * side. This lets before_ram_iterate/after_ram_iterate add
306 * transport-specific sections to the RAM migration data.
307 */
308 #define RAM_SAVE_FLAG_HOOK 0x80
309
310 #define RAM_SAVE_CONTROL_NOT_SUPP -1000
311 #define RAM_SAVE_CONTROL_DELAYED -2000
312
313 size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
314 ram_addr_t offset, size_t size,
315 uint64_t *bytes_sent);
316
317 void ram_mig_init(void);
318 void savevm_skip_section_footers(void);
319 void register_global_state(void);
320 void global_state_set_optional(void);
321 void savevm_skip_configuration(void);
322 int global_state_store(void);
323 void global_state_store_running(void);
324
325 void flush_page_queue(MigrationState *ms);
326 int ram_save_queue_pages(MigrationState *ms, const char *rbname,
327 ram_addr_t start, ram_addr_t len);
328
329 PostcopyState postcopy_state_get(void);
330 /* Set the state and return the old state */
331 PostcopyState postcopy_state_set(PostcopyState new_state);
332 #endif