Вт, 19 апреля 2016, 15:39

kvm live backup

Live backup of your VM running in KVM under Ubuntu using qcow2 disk images

 

# First of all disable apparmor to allow access

aa-status

aa-complain /usr/sbin/libvirtd

aa-complain /etc/apparmor.d/libvirt/libvirt-xxxxxxxxxxxxxxxxxx

 

# View disks

virsh domblklist test-backup

 

# Suspend the domain (This is optional. I do this so that the condition of virtual machines remains unchanged during the backup.)

virsh suspend test-backup

 

# Create snapshot

virsh snapshot-create-as --domain test-backup test-snap1 \

--diskspec vda,file=/home/virtual/test_backup/test-c.img \

--diskspec vdb,file=/home/virtual/test_backup/test-d.img \

--disk-only --atomic

# or

virsh snapshot-create-as --domain test-backup test-backup-snap1 --disk-only --atomic

 

# View snapshots

virsh snapshot-list test-backup

 

# View disks (it must be running on snapshots)

virsh domblklist test-backup

 

# Copy drives to backup dir

rsync -avh --progress drive-c.qcow2 export/drive-c.qcow2-copy

rsync -avh --progress drive-d.qcow2 export/drive-d.qcow2-copy

 

# Perform active blockcommit by live merging contents

virsh blockcommit test-backup vda --active --verbose --pivot

virsh blockcommit test-backup vdb --active --verbose --pivot

 

# View disks (it must be running on normal drives)

virsh domblklist test-backup

 

# Unsuspend the domain

virsh resume test-backup

 

# View snapshots again

virsh snapshot-list test-backup

 

# Delete snapshots

virsh snapshot-delete test-backup test-backup-snap1 --metadata

 

# View snapshots to check 

virsh snapshot-list test-backup

 

# Delete snapshot files

 

rm -f /home/virtual/test_backup/drive-c.test-backup-snap1

rm -f /home/virtual/test_backup/drive-d.test-backup-snap1

 

 

 

KVM:

On the Source SmartOS Node:

zfs snapshot zones/d8944236-9d33-40ce-a112-5445cc40c412@migration
zfs snapshot zones/d8944236-9d33-40ce-a112-5445cc40c412-disk0@migration
zfs send zones/d8944236-9d33-40ce-a112-5445cc40c412@migration  | ssh root@10.0.0.3 zfs recv zones/d8944236-9d33-40ce-a112-5445cc40c412
zfs send zones/d8944236-9d33-40ce-a112-5445cc40c412-disk0@migration  | ssh root@10.0.0.3 zfs recv zones/d8944236-9d33-40ce-a112-5445cc40c412-disk0
scp /etc/zones/d8944236-9d33-40ce-a112-5445cc40c412.xml root@10.0.0.3:/etc/zones/

On the Target SmartOS Node:

echo 'd8944236-9d33-40ce-a112-5445cc40c412:installed:/zones/d8944236-9d33-40ce-a112-5445cc40c412:d8944236-9d33-40ce-a112-5445cc40c412' >> /etc/zones/index
vmadm boot d8944236-9d33-40ce-a112-5445cc40c412

 

Non-Global Zone:

It's simple:

[root@10.0.0.2]# vmadm send 1a666e33-f4ca-4c4e-a4c6-a47dd3d78096 | ssh 10.0.0.3 vmadm receive

Successfully sent VM 1a666e33-f4ca-4c4e-a4c6-a47dd3d78096
Successfully received VM 1a666e33-f4ca-4c4e-a4c6-a47dd3d78096