cirrus: fix off-by-one in cirrus_bitblt_rop_bkwd_transp_*_16
[qemu.git] / tests / qemu-iotests / 124
1 #!/usr/bin/env python
2 #
3 # Tests for incremental drive-backup
4 #
5 # Copyright (C) 2015 John Snow for Red Hat, Inc.
6 #
7 # Based on 056.
8 #
9 # This program is free software; you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation; either version 2 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 import os
24 import iotests
25
26
27 def io_write_patterns(img, patterns):
28     for pattern in patterns:
29         iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img)
30
31
32 def try_remove(img):
33     try:
34         os.remove(img)
35     except OSError:
36         pass
37
38
39 def transaction_action(action, **kwargs):
40     return {
41         'type': action,
42         'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems())
43     }
44
45
46 def transaction_bitmap_clear(node, name, **kwargs):
47     return transaction_action('block-dirty-bitmap-clear',
48                               node=node, name=name, **kwargs)
49
50
51 def transaction_drive_backup(device, target, **kwargs):
52     return transaction_action('drive-backup', job_id=device, device=device,
53                               target=target, **kwargs)
54
55
56 class Bitmap:
57     def __init__(self, name, drive):
58         self.name = name
59         self.drive = drive
60         self.num = 0
61         self.backups = list()
62
63     def base_target(self):
64         return (self.drive['backup'], None)
65
66     def new_target(self, num=None):
67         if num is None:
68             num = self.num
69         self.num = num + 1
70         base = os.path.join(iotests.test_dir,
71                             "%s.%s." % (self.drive['id'], self.name))
72         suff = "%i.%s" % (num, self.drive['fmt'])
73         target = base + "inc" + suff
74         reference = base + "ref" + suff
75         self.backups.append((target, reference))
76         return (target, reference)
77
78     def last_target(self):
79         if self.backups:
80             return self.backups[-1]
81         return self.base_target()
82
83     def del_target(self):
84         for image in self.backups.pop():
85             try_remove(image)
86         self.num -= 1
87
88     def cleanup(self):
89         for backup in self.backups:
90             for image in backup:
91                 try_remove(image)
92
93
94 class TestIncrementalBackupBase(iotests.QMPTestCase):
95     def __init__(self, *args):
96         super(TestIncrementalBackupBase, self).__init__(*args)
97         self.bitmaps = list()
98         self.files = list()
99         self.drives = list()
100         self.vm = iotests.VM()
101         self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt)
102
103
104     def setUp(self):
105         # Create a base image with a distinctive patterning
106         drive0 = self.add_node('drive0')
107         self.img_create(drive0['file'], drive0['fmt'])
108         self.vm.add_drive(drive0['file'])
109         self.write_default_pattern(drive0['file'])
110         self.vm.launch()
111
112
113     def write_default_pattern(self, target):
114         io_write_patterns(target, (('0x41', 0, 512),
115                                    ('0xd5', '1M', '32k'),
116                                    ('0xdc', '32M', '124k')))
117
118
119     def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None):
120         if path is None:
121             path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt))
122         if backup is None:
123             backup = os.path.join(iotests.test_dir,
124                                   '%s.full.backup.%s' % (node_id, fmt))
125
126         self.drives.append({
127             'id': node_id,
128             'file': path,
129             'backup': backup,
130             'fmt': fmt })
131         return self.drives[-1]
132
133
134     def img_create(self, img, fmt=iotests.imgfmt, size='64M',
135                    parent=None, parentFormat=None, **kwargs):
136         optargs = []
137         for k,v in kwargs.iteritems():
138             optargs = optargs + ['-o', '%s=%s' % (k,v)]
139         args = ['create', '-f', fmt] + optargs + [img, size]
140         if parent:
141             if parentFormat is None:
142                 parentFormat = fmt
143             args = args + ['-b', parent, '-F', parentFormat]
144         iotests.qemu_img(*args)
145         self.files.append(img)
146
147
148     def do_qmp_backup(self, error='Input/output error', **kwargs):
149         res = self.vm.qmp('drive-backup', **kwargs)
150         self.assert_qmp(res, 'return', {})
151         return self.wait_qmp_backup(kwargs['device'], error)
152
153
154     def wait_qmp_backup(self, device, error='Input/output error'):
155         event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED",
156                                    match={'data': {'device': device}})
157         self.assertNotEqual(event, None)
158
159         try:
160             failure = self.dictpath(event, 'data/error')
161         except AssertionError:
162             # Backup succeeded.
163             self.assert_qmp(event, 'data/offset', event['data']['len'])
164             return True
165         else:
166             # Backup failed.
167             self.assert_qmp(event, 'data/error', error)
168             return False
169
170
171     def wait_qmp_backup_cancelled(self, device):
172         event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED',
173                                    match={'data': {'device': device}})
174         self.assertNotEqual(event, None)
175
176
177     def create_anchor_backup(self, drive=None):
178         if drive is None:
179             drive = self.drives[-1]
180         res = self.do_qmp_backup(job_id=drive['id'],
181                                  device=drive['id'], sync='full',
182                                  format=drive['fmt'], target=drive['backup'])
183         self.assertTrue(res)
184         self.files.append(drive['backup'])
185         return drive['backup']
186
187
188     def make_reference_backup(self, bitmap=None):
189         if bitmap is None:
190             bitmap = self.bitmaps[-1]
191         _, reference = bitmap.last_target()
192         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
193                                  device=bitmap.drive['id'], sync='full',
194                                  format=bitmap.drive['fmt'], target=reference)
195         self.assertTrue(res)
196
197
198     def add_bitmap(self, name, drive, **kwargs):
199         bitmap = Bitmap(name, drive)
200         self.bitmaps.append(bitmap)
201         result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'],
202                              name=bitmap.name, **kwargs)
203         self.assert_qmp(result, 'return', {})
204         return bitmap
205
206
207     def prepare_backup(self, bitmap=None, parent=None):
208         if bitmap is None:
209             bitmap = self.bitmaps[-1]
210         if parent is None:
211             parent, _ = bitmap.last_target()
212
213         target, _ = bitmap.new_target()
214         self.img_create(target, bitmap.drive['fmt'], parent=parent)
215         return target
216
217
218     def create_incremental(self, bitmap=None, parent=None,
219                            parentFormat=None, validate=True):
220         if bitmap is None:
221             bitmap = self.bitmaps[-1]
222         if parent is None:
223             parent, _ = bitmap.last_target()
224
225         target = self.prepare_backup(bitmap, parent)
226         res = self.do_qmp_backup(job_id=bitmap.drive['id'],
227                                  device=bitmap.drive['id'],
228                                  sync='incremental', bitmap=bitmap.name,
229                                  format=bitmap.drive['fmt'], target=target,
230                                  mode='existing')
231         if not res:
232             bitmap.del_target();
233             self.assertFalse(validate)
234         else:
235             self.make_reference_backup(bitmap)
236         return res
237
238
239     def check_backups(self):
240         for bitmap in self.bitmaps:
241             for incremental, reference in bitmap.backups:
242                 self.assertTrue(iotests.compare_images(incremental, reference))
243             last = bitmap.last_target()[0]
244             self.assertTrue(iotests.compare_images(last, bitmap.drive['file']))
245
246
247     def hmp_io_writes(self, drive, patterns):
248         for pattern in patterns:
249             self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern)
250         self.vm.hmp_qemu_io(drive, 'flush')
251
252
253     def do_incremental_simple(self, **kwargs):
254         self.create_anchor_backup()
255         self.add_bitmap('bitmap0', self.drives[0], **kwargs)
256
257         # Sanity: Create a "hollow" incremental backup
258         self.create_incremental()
259         # Three writes: One complete overwrite, one new segment,
260         # and one partial overlap.
261         self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512),
262                                                   ('0xfe', '16M', '256k'),
263                                                   ('0x64', '32736k', '64k')))
264         self.create_incremental()
265         # Three more writes, one of each kind, like above
266         self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512),
267                                                   ('0x55', '8M', '352k'),
268                                                   ('0x78', '15872k', '1M')))
269         self.create_incremental()
270         self.vm.shutdown()
271         self.check_backups()
272
273
274     def tearDown(self):
275         self.vm.shutdown()
276         for bitmap in self.bitmaps:
277             bitmap.cleanup()
278         for filename in self.files:
279             try_remove(filename)
280
281
282
283 class TestIncrementalBackup(TestIncrementalBackupBase):
284     def test_incremental_simple(self):
285         '''
286         Test: Create and verify three incremental backups.
287
288         Create a bitmap and a full backup before VM execution begins,
289         then create a series of three incremental backups "during execution,"
290         i.e.; after IO requests begin modifying the drive.
291         '''
292         return self.do_incremental_simple()
293
294
295     def test_small_granularity(self):
296         '''
297         Test: Create and verify backups made with a small granularity bitmap.
298
299         Perform the same test as test_incremental_simple, but with a granularity
300         of only 32KiB instead of the present default of 64KiB.
301         '''
302         return self.do_incremental_simple(granularity=32768)
303
304
305     def test_large_granularity(self):
306         '''
307         Test: Create and verify backups made with a large granularity bitmap.
308
309         Perform the same test as test_incremental_simple, but with a granularity
310         of 128KiB instead of the present default of 64KiB.
311         '''
312         return self.do_incremental_simple(granularity=131072)
313
314
315     def test_larger_cluster_target(self):
316         '''
317         Test: Create and verify backups made to a larger cluster size target.
318
319         With a default granularity of 64KiB, verify that backups made to a
320         larger cluster size target of 128KiB without a backing file works.
321         '''
322         drive0 = self.drives[0]
323
324         # Create a cluster_size=128k full backup / "anchor" backup
325         self.img_create(drive0['backup'], cluster_size='128k')
326         self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full',
327                                            format=drive0['fmt'],
328                                            target=drive0['backup'],
329                                            mode='existing'))
330
331         # Create bitmap and dirty it with some new writes.
332         # overwrite [32736, 32799] which will dirty bitmap clusters at
333         # 32M-64K and 32M. 32M+64K will be left undirtied.
334         bitmap0 = self.add_bitmap('bitmap0', drive0)
335         self.hmp_io_writes(drive0['id'],
336                            (('0xab', 0, 512),
337                             ('0xfe', '16M', '256k'),
338                             ('0x64', '32736k', '64k')))
339
340
341         # Prepare a cluster_size=128k backup target without a backing file.
342         (target, _) = bitmap0.new_target()
343         self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k')
344
345         # Perform Incremental Backup
346         self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'],
347                                            sync='incremental',
348                                            bitmap=bitmap0.name,
349                                            format=bitmap0.drive['fmt'],
350                                            target=target,
351                                            mode='existing'))
352         self.make_reference_backup(bitmap0)
353
354         # Add the backing file, then compare and exit.
355         iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b',
356                          drive0['backup'], '-F', drive0['fmt'], target)
357         self.vm.shutdown()
358         self.check_backups()
359
360
361     def test_incremental_transaction(self):
362         '''Test: Verify backups made from transactionally created bitmaps.
363
364         Create a bitmap "before" VM execution begins, then create a second
365         bitmap AFTER writes have already occurred. Use transactions to create
366         a full backup and synchronize both bitmaps to this backup.
367         Create an incremental backup through both bitmaps and verify that
368         both backups match the current drive0 image.
369         '''
370
371         drive0 = self.drives[0]
372         bitmap0 = self.add_bitmap('bitmap0', drive0)
373         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
374                                           ('0xfe', '16M', '256k'),
375                                           ('0x64', '32736k', '64k')))
376         bitmap1 = self.add_bitmap('bitmap1', drive0)
377
378         result = self.vm.qmp('transaction', actions=[
379             transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name),
380             transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name),
381             transaction_drive_backup(drive0['id'], drive0['backup'],
382                                      sync='full', format=drive0['fmt'])
383         ])
384         self.assert_qmp(result, 'return', {})
385         self.wait_until_completed(drive0['id'])
386         self.files.append(drive0['backup'])
387
388         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
389                                           ('0x55', '8M', '352k'),
390                                           ('0x78', '15872k', '1M')))
391         # Both bitmaps should be correctly in sync.
392         self.create_incremental(bitmap0)
393         self.create_incremental(bitmap1)
394         self.vm.shutdown()
395         self.check_backups()
396
397
398     def do_transaction_failure_test(self, race=False):
399         # Create a second drive, with pattern:
400         drive1 = self.add_node('drive1')
401         self.img_create(drive1['file'], drive1['fmt'])
402         io_write_patterns(drive1['file'], (('0x14', 0, 512),
403                                            ('0x5d', '1M', '32k'),
404                                            ('0xcd', '32M', '124k')))
405
406         # Create a blkdebug interface to this img as 'drive1'
407         result = self.vm.qmp('blockdev-add',
408             node_name=drive1['id'],
409             driver=drive1['fmt'],
410             file={
411                 'driver': 'blkdebug',
412                 'image': {
413                     'driver': 'file',
414                     'filename': drive1['file']
415                 },
416                 'set-state': [{
417                     'event': 'flush_to_disk',
418                     'state': 1,
419                     'new_state': 2
420                 }],
421                 'inject-error': [{
422                     'event': 'read_aio',
423                     'errno': 5,
424                     'state': 2,
425                     'immediately': False,
426                     'once': True
427                 }],
428             }
429         )
430         self.assert_qmp(result, 'return', {})
431
432         # Create bitmaps and full backups for both drives
433         drive0 = self.drives[0]
434         dr0bm0 = self.add_bitmap('bitmap0', drive0)
435         dr1bm0 = self.add_bitmap('bitmap0', drive1)
436         self.create_anchor_backup(drive0)
437         self.create_anchor_backup(drive1)
438         self.assert_no_active_block_jobs()
439         self.assertFalse(self.vm.get_qmp_events(wait=False))
440
441         # Emulate some writes
442         if not race:
443             self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
444                                               ('0xfe', '16M', '256k'),
445                                               ('0x64', '32736k', '64k')))
446         self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
447                                           ('0xef', '16M', '256k'),
448                                           ('0x46', '32736k', '64k')))
449
450         # Create incremental backup targets
451         target0 = self.prepare_backup(dr0bm0)
452         target1 = self.prepare_backup(dr1bm0)
453
454         # Ask for a new incremental backup per-each drive,
455         # expecting drive1's backup to fail. In the 'race' test,
456         # we expect drive1 to attempt to cancel the empty drive0 job.
457         transaction = [
458             transaction_drive_backup(drive0['id'], target0, sync='incremental',
459                                      format=drive0['fmt'], mode='existing',
460                                      bitmap=dr0bm0.name),
461             transaction_drive_backup(drive1['id'], target1, sync='incremental',
462                                      format=drive1['fmt'], mode='existing',
463                                      bitmap=dr1bm0.name)
464         ]
465         result = self.vm.qmp('transaction', actions=transaction,
466                              properties={'completion-mode': 'grouped'} )
467         self.assert_qmp(result, 'return', {})
468
469         # Observe that drive0's backup is cancelled and drive1 completes with
470         # an error.
471         self.wait_qmp_backup_cancelled(drive0['id'])
472         self.assertFalse(self.wait_qmp_backup(drive1['id']))
473         error = self.vm.event_wait('BLOCK_JOB_ERROR')
474         self.assert_qmp(error, 'data', {'device': drive1['id'],
475                                         'action': 'report',
476                                         'operation': 'read'})
477         self.assertFalse(self.vm.get_qmp_events(wait=False))
478         self.assert_no_active_block_jobs()
479
480         # Delete drive0's successful target and eliminate our record of the
481         # unsuccessful drive1 target.
482         dr0bm0.del_target()
483         dr1bm0.del_target()
484         if race:
485             # Don't re-run the transaction, we only wanted to test the race.
486             self.vm.shutdown()
487             return
488
489         # Re-run the same transaction:
490         target0 = self.prepare_backup(dr0bm0)
491         target1 = self.prepare_backup(dr1bm0)
492
493         # Re-run the exact same transaction.
494         result = self.vm.qmp('transaction', actions=transaction,
495                              properties={'completion-mode':'grouped'})
496         self.assert_qmp(result, 'return', {})
497
498         # Both should complete successfully this time.
499         self.assertTrue(self.wait_qmp_backup(drive0['id']))
500         self.assertTrue(self.wait_qmp_backup(drive1['id']))
501         self.make_reference_backup(dr0bm0)
502         self.make_reference_backup(dr1bm0)
503         self.assertFalse(self.vm.get_qmp_events(wait=False))
504         self.assert_no_active_block_jobs()
505
506         # And the images should of course validate.
507         self.vm.shutdown()
508         self.check_backups()
509
510     def test_transaction_failure(self):
511         '''Test: Verify backups made from a transaction that partially fails.
512
513         Add a second drive with its own unique pattern, and add a bitmap to each
514         drive. Use blkdebug to interfere with the backup on just one drive and
515         attempt to create a coherent incremental backup across both drives.
516
517         verify a failure in one but not both, then delete the failed stubs and
518         re-run the same transaction.
519
520         verify that both incrementals are created successfully.
521         '''
522         self.do_transaction_failure_test()
523
524     def test_transaction_failure_race(self):
525         '''Test: Verify that transactions with jobs that have no data to
526         transfer do not cause race conditions in the cancellation of the entire
527         transaction job group.
528         '''
529         self.do_transaction_failure_test(race=True)
530
531
532     def test_sync_dirty_bitmap_missing(self):
533         self.assert_no_active_block_jobs()
534         self.files.append(self.err_img)
535         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
536                              sync='incremental', format=self.drives[0]['fmt'],
537                              target=self.err_img)
538         self.assert_qmp(result, 'error/class', 'GenericError')
539
540
541     def test_sync_dirty_bitmap_not_found(self):
542         self.assert_no_active_block_jobs()
543         self.files.append(self.err_img)
544         result = self.vm.qmp('drive-backup', device=self.drives[0]['id'],
545                              sync='incremental', bitmap='unknown',
546                              format=self.drives[0]['fmt'], target=self.err_img)
547         self.assert_qmp(result, 'error/class', 'GenericError')
548
549
550     def test_sync_dirty_bitmap_bad_granularity(self):
551         '''
552         Test: Test what happens if we provide an improper granularity.
553
554         The granularity must always be a power of 2.
555         '''
556         self.assert_no_active_block_jobs()
557         self.assertRaises(AssertionError, self.add_bitmap,
558                           'bitmap0', self.drives[0],
559                           granularity=64000)
560
561
562 class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase):
563     '''Incremental backup tests that utilize a BlkDebug filter on drive0.'''
564
565     def setUp(self):
566         drive0 = self.add_node('drive0')
567         self.img_create(drive0['file'], drive0['fmt'])
568         self.write_default_pattern(drive0['file'])
569         self.vm.launch()
570
571     def test_incremental_failure(self):
572         '''Test: Verify backups made after a failure are correct.
573
574         Simulate a failure during an incremental backup block job,
575         emulate additional writes, then create another incremental backup
576         afterwards and verify that the backup created is correct.
577         '''
578
579         drive0 = self.drives[0]
580         result = self.vm.qmp('blockdev-add',
581             node_name=drive0['id'],
582             driver=drive0['fmt'],
583             file={
584                 'driver': 'blkdebug',
585                 'image': {
586                     'driver': 'file',
587                     'filename': drive0['file']
588                 },
589                 'set-state': [{
590                     'event': 'flush_to_disk',
591                     'state': 1,
592                     'new_state': 2
593                 }],
594                 'inject-error': [{
595                     'event': 'read_aio',
596                     'errno': 5,
597                     'state': 2,
598                     'immediately': False,
599                     'once': True
600                 }],
601             }
602         )
603         self.assert_qmp(result, 'return', {})
604
605         self.create_anchor_backup(drive0)
606         self.add_bitmap('bitmap0', drive0)
607         # Note: at this point, during a normal execution,
608         # Assume that the VM resumes and begins issuing IO requests here.
609
610         self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
611                                           ('0xfe', '16M', '256k'),
612                                           ('0x64', '32736k', '64k')))
613
614         result = self.create_incremental(validate=False)
615         self.assertFalse(result)
616         self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512),
617                                           ('0x55', '8M', '352k'),
618                                           ('0x78', '15872k', '1M')))
619         self.create_incremental()
620         self.vm.shutdown()
621         self.check_backups()
622
623
624 if __name__ == '__main__':
625     iotests.main(supported_fmts=['qcow2'])