blob: 289381e315f48087342fb9dbd76471396f806c80 [file] [log] [blame]
#!/usr/bin/env python3
# group: rw
#
# Test nbd reconnect
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import os
import iotests
from iotests import qemu_img_create, file_path, qemu_nbd_popen
disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock')
nbd_uri = 'nbd+unix:///?socket=' + nbd_sock
wait_limit = 3.0
wait_step = 0.2
class TestNbdReconnect(iotests.QMPTestCase):
def init_vm(self, disk_size):
qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size))
qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size))
self.vm = iotests.VM().add_drive(disk_a)
self.vm.launch()
self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size))
def tearDown(self):
self.vm.shutdown()
os.remove(disk_a)
os.remove(disk_b)
def start_job(self, job):
"""Stat job with nbd target and kill the server"""
assert job in ('blockdev-backup', 'blockdev-mirror')
with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
result = self.vm.qmp('blockdev-add',
**{'node_name': 'backup0',
'driver': 'raw',
'file': {'driver': 'nbd',
'server': {'type': 'unix',
'path': nbd_sock},
'reconnect-delay': 10}})
self.assert_qmp(result, 'return', {})
result = self.vm.qmp(job, device='drive0',
sync='full', target='backup0',
speed=(1 * 1024 * 1024))
self.assert_qmp(result, 'return', {})
# Wait for some progress
t = 0.0
while t < wait_limit:
jobs = self.vm.qmp('query-block-jobs')['return']
if jobs and jobs[0]['offset'] > 0:
break
time.sleep(wait_step)
t += wait_step
self.assertTrue(jobs and jobs[0]['offset'] > 0) # job started
jobs = self.vm.qmp('query-block-jobs')['return']
# Check that job is still in progress
self.assertTrue(jobs)
self.assertTrue(jobs[0]['offset'] < jobs[0]['len'])
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
self.assert_qmp(result, 'return', {})
# Emulate server down time for 1 second
time.sleep(1)
def test_backup(self):
size = 5 * 1024 * 1024
self.init_vm(size)
self.start_job('blockdev-backup')
with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
e = self.vm.event_wait('BLOCK_JOB_COMPLETED')
self.assertEqual(e['data']['offset'], size)
result = self.vm.qmp('blockdev-del', node_name='backup0')
self.assert_qmp(result, 'return', {})
def cancel_job(self):
result = self.vm.qmp('block-job-cancel', device='drive0', force=True)
self.assert_qmp(result, 'return', {})
start_t = time.time()
self.vm.event_wait('BLOCK_JOB_CANCELLED')
delta_t = time.time() - start_t
self.assertTrue(delta_t < 5.0)
def test_mirror_cancel(self):
# Mirror speed limit doesn't work well enough, it seems that mirror
# will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
# MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
self.init_vm(20 * 1024 * 1024)
self.start_job('blockdev-mirror')
self.cancel_job()
def test_backup_cancel(self):
self.init_vm(5 * 1024 * 1024)
self.start_job('blockdev-backup')
self.cancel_job()
if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2'])