cloner: Big API rework
* Centralize lots of disk building * Open code virt-clone specific behavior at the source * Drop a lot or properties * Move most testing to test_cli.py * Generally a ton of cleanup virt-manager clone wizard has not been converted yet so is totally broken after this commit Signed-off-by: Cole Robinson <crobinso@redhat.com>
This commit is contained in:
parent
c0d1e76941
commit
f23a27639f
|
@ -1,5 +1,5 @@
|
|||
<domain type='test'>
|
||||
<name>clone-empty</name>
|
||||
<name>empty-clone5</name>
|
||||
<uuid>4a64cc71-7272-2fd0-2323-3050941ea3c3</uuid>
|
||||
<memory>8388608</memory>
|
||||
<currentMemory>2097152</currentMemory>
|
|
@ -1,6 +1,6 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<name>clone-orig-clone</name>
|
||||
<uuid>00000000-1111-2222-3333-444444444444</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
|
@ -8,7 +8,7 @@
|
|||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
<loader readonly="yes" type="pflash">/usr/share/ovmf/ovmf-efi.fd</loader>
|
||||
<nvram>/nvram/clone-new_VARS.fd</nvram>
|
||||
<nvram>/nvram/my-custom-path</nvram>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
|
@ -1,5 +1,5 @@
|
|||
<domain type="test">
|
||||
<name>clone-empty-clone</name>
|
||||
<name>empty-clone6</name>
|
||||
<uuid>00000000-1111-2222-3333-444444444444</uuid>
|
||||
<memory>8388608</memory>
|
||||
<currentMemory>2097152</currentMemory>
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
<domain type="test">
|
||||
<name>test</name>
|
||||
<uuid>00000000-1111-2222-3333-444444444444</uuid>
|
||||
<memory unit="KiB">409600</memory>
|
||||
<currentMemory unit="KiB">204800</currentMemory>
|
||||
<vcpu placement="static">5</vcpu>
|
||||
<os>
|
||||
<type arch="i686">hvm</type>
|
||||
<loader type="rom">/usr/lib/xen/boot/hvmloader</loader>
|
||||
<boot dev="hd"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
<apic/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/lib/xen/bin/qemu-dm</emulator>
|
||||
<disk type="block" device="floppy">
|
||||
<driver type="vmdk"/>
|
||||
<source dev="/dev/disk-pool/diskvol1-clone"/>
|
||||
<target dev="fda" bus="fdc"/>
|
||||
<address type="drive" controller="0" bus="0" target="0" unit="0"/>
|
||||
</disk>
|
||||
<disk type="block" device="disk">
|
||||
<source dev="/dev/disk-pool/diskvol2"/>
|
||||
<target dev="sda" bus="scsi"/>
|
||||
<readonly/>
|
||||
<address type="drive" controller="0" bus="0" target="0" unit="0"/>
|
||||
</disk>
|
||||
<disk type="file" device="cdrom">
|
||||
<target dev="sdb" bus="scsi"/>
|
||||
<readonly/>
|
||||
<address type="drive" controller="0" bus="0" target="0" unit="1"/>
|
||||
</disk>
|
||||
<disk type="block" device="disk">
|
||||
<driver type="qcow2"/>
|
||||
<source dev="/dev/default-pool/collidevol1.img"/>
|
||||
<target dev="sdc" bus="scsi"/>
|
||||
<shareable/>
|
||||
<address type="drive" controller="0" bus="0" target="0" unit="2"/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/dev/default-pool/default-vol-clone"/>
|
||||
<target dev="hda" bus="ide"/>
|
||||
<address type="drive" controller="0" bus="0" target="0" unit="0"/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/dev/default-pool/testvol9-clone.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
<address type="drive" controller="0" bus="0" target="0" unit="1"/>
|
||||
</disk>
|
||||
<controller type="scsi" index="0"/>
|
||||
<controller type="fdc" index="0"/>
|
||||
<controller type="ide" index="0"/>
|
||||
<controller type="virtio-serial" index="0"/>
|
||||
<interface type="network">
|
||||
<mac address="00:11:22:33:44:55"/>
|
||||
<source network="default"/>
|
||||
<model type="e1000"/>
|
||||
</interface>
|
||||
<interface type="user">
|
||||
<mac address="00:11:22:33:44:55"/>
|
||||
</interface>
|
||||
<parallel type="file">
|
||||
<source path="/tmp/foo.log"/>
|
||||
<target port="0"/>
|
||||
</parallel>
|
||||
<channel type="unix">
|
||||
<source mode="bind"/>
|
||||
<target type="virtio" name="org.qemu.guest_agent.0"/>
|
||||
</channel>
|
||||
<graphics type="vnc" port="-1" listen="127.0.0.1">
|
||||
<listen type="address" address="127.0.0.1"/>
|
||||
</graphics>
|
||||
<video>
|
||||
<model type="cirrus" vram="16384" heads="1" primary="yes"/>
|
||||
</video>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,35 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<channel type='unix'>
|
||||
<source mode='bind' path='/tmp/guestfwd'/>
|
||||
<target type='guestfwd' address='10.0.0.1' port='1234'/>
|
||||
</channel>
|
||||
<channel type='unix'>
|
||||
<source mode='bind' path='/var/lib/libvirt/qemu/channel/target/domain-2-generic/org.qemu.guest_agent.0'/>
|
||||
<target type='virtio' name='org.qemu.guest_agent.0'/>
|
||||
</channel>
|
||||
<channel type='unix'>
|
||||
<target type='virtio' name='org.qemu.guest_agent.0'/>
|
||||
</channel>
|
||||
<channel type='unix'>
|
||||
<target type='virtio'/>
|
||||
</channel>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,35 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<channel type="unix">
|
||||
<source mode="bind" path="/tmp/guestfwd"/>
|
||||
<target type="guestfwd" address="10.0.0.1" port="1234"/>
|
||||
</channel>
|
||||
<channel type="unix">
|
||||
<source mode="bind"/>
|
||||
<target type="virtio" name="org.qemu.guest_agent.0"/>
|
||||
</channel>
|
||||
<channel type="unix">
|
||||
<target type="virtio" name="org.qemu.guest_agent.0"/>
|
||||
</channel>
|
||||
<channel type="unix">
|
||||
<target type="virtio"/>
|
||||
</channel>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,22 +0,0 @@
|
|||
<volume>
|
||||
<name>new1.img</name>
|
||||
<capacity>1000000</capacity>
|
||||
<allocation>50000</allocation>
|
||||
<target>
|
||||
<format type="qcow2"/>
|
||||
<features>
|
||||
<lazy_refcounts/>
|
||||
</features>
|
||||
</target>
|
||||
</volume>
|
||||
<volume>
|
||||
<name>new2.img</name>
|
||||
<capacity>1000000</capacity>
|
||||
<allocation>50000</allocation>
|
||||
<target>
|
||||
<format type="qcow2"/>
|
||||
<features>
|
||||
<lazy_refcounts/>
|
||||
</features>
|
||||
</target>
|
||||
</volume>
|
|
@ -1,39 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/dev/default-pool/testvol1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/dev/default-pool/testvol2.img'/>
|
||||
<target dev='hdb' bus='ide'/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,39 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/dev/cross-pool/new1.img"/>
|
||||
<target dev="hda" bus="ide"/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/dev/default-pool/new2.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,46 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='file' device='floppy'>
|
||||
<target dev='fdb' bus='fdc'/>
|
||||
</disk>
|
||||
<disk type='block' device='cdrom'>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test2.img'/>
|
||||
<target dev='sdb' bus='scsi'/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,46 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="block" device="disk">
|
||||
<target dev="hda" bus="ide"/>
|
||||
<source dev="/dev/disk-pool/disk-vol1"/>
|
||||
</disk>
|
||||
<disk type="file" device="floppy">
|
||||
<target dev="fdb" bus="fdc"/>
|
||||
</disk>
|
||||
<disk type="block" device="cdrom">
|
||||
<target dev="sda" bus="scsi"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/tmp/clone2.img"/>
|
||||
<target dev="sdb" bus="scsi"/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,46 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/dev/default-pool/default-vol'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='file' device='floppy'>
|
||||
<target dev='fdb' bus='fdc'/>
|
||||
</disk>
|
||||
<disk type='block' device='cdrom'>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test2.img'/>
|
||||
<target dev='sdb' bus='scsi'/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,46 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/dev/default-pool/1234.img"/>
|
||||
<target dev="hda" bus="ide"/>
|
||||
</disk>
|
||||
<disk type="file" device="floppy">
|
||||
<target dev="fdb" bus="fdc"/>
|
||||
</disk>
|
||||
<disk type="block" device="cdrom">
|
||||
<target dev="sda" bus="scsi"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/clone2.img"/>
|
||||
<target dev="sdb" bus="scsi"/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,29 +0,0 @@
|
|||
<domain type='xen'>
|
||||
<name>test-full-clone</name>
|
||||
<currentMemory>204800</currentMemory>
|
||||
<memory>409600</memory>
|
||||
<uuid>abcd5678-aaaa-1234-1234-12345678FFFF</uuid>
|
||||
<os>
|
||||
<type arch='i686'>hvm</type>
|
||||
<loader>/usr/lib/xen/boot/hvmloader</loader>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/><apic/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<vcpu>5</vcpu>
|
||||
<devices>
|
||||
<emulator>/usr/lib/xen/bin/qemu-dm</emulator>
|
||||
<disk type='block' device='disk'>
|
||||
<source dev='/full-pool/testvol1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<interface type='user'>
|
||||
<mac address='22:11:11:11:11:11'/>
|
||||
</interface>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,43 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<title>footitle</title>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='block' device='disk'>
|
||||
<source dev='/dev/null'/>
|
||||
<target dev='hdb' bus='ide'/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
<target dev='vnet0'/>
|
||||
</interface>
|
||||
<interface type='bridge'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source bridge='br0'/>
|
||||
<target dev='my_manual_dev'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
<graphics type='vnc' port='5905'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,40 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="block" device="disk">
|
||||
<target dev="hda" bus="ide"/>
|
||||
<source dev="/dev/disk-pool/disk-vol1"/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/tmp/clone2.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="bridge">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source bridge="br0"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
<graphics type="vnc" port="-1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' passwd='foo'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1" passwd="foo"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,40 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<driver name="qemu" type="vmdk"/>
|
||||
<source file='/dev/default-pool/testvol1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='block' device='disk'>
|
||||
<source dev='/dev/disk-pool/diskvol1'/>
|
||||
<target dev='hdb' bus='ide'/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,40 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="file" device="disk">
|
||||
<driver name="qemu" type="vmdk"/>
|
||||
<source file="/dev/default-pool/new1.img"/>
|
||||
<target dev="hda" bus="ide"/>
|
||||
</disk>
|
||||
<disk type="block" device="disk">
|
||||
<source dev="/dev/disk-pool/new2.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,43 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type='file' device='floppy'>
|
||||
<target dev='fdb' bus='fdc'/>
|
||||
</disk>
|
||||
<disk type='block' device='cdrom'>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,43 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/tmp/virtinst-test1.img"/>
|
||||
<target dev="hda" bus="ide"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type="file" device="floppy">
|
||||
<target dev="fdb" bus="fdc"/>
|
||||
</disk>
|
||||
<disk type="block" device="cdrom">
|
||||
<target dev="sda" bus="scsi"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,18 +0,0 @@
|
|||
<domain type='test'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>19618dc6-7895-956d-6056-8ebcd8061234</uuid>
|
||||
<memory>8388608</memory>
|
||||
<currentMemory>2097152</currentMemory>
|
||||
<vcpu>2</vcpu>
|
||||
<os>
|
||||
<type arch='i686'>hvm</type>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
</devices>
|
||||
</domain>
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
<domain type="test">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>8388608</memory>
|
||||
<currentMemory>2097152</currentMemory>
|
||||
<vcpu>2</vcpu>
|
||||
<os>
|
||||
<type arch="i686">hvm</type>
|
||||
<boot dev="hd"/>
|
||||
</os>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
<loader readonly='yes' type='pflash'>/usr/share/ovmf/ovmf-efi.fd</loader>
|
||||
<nvram>/nvram/clone-orig_VARS.fd</nvram>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
<loader readonly="yes" type="pflash">/usr/share/ovmf/ovmf-efi.fd</loader>
|
||||
<nvram>/nvram/clone-new_VARS.fd</nvram>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
<loader readonly='yes' type='pflash'>/usr/share/ovmf/ovmf-efi.fd</loader>
|
||||
<nvram>/nvram/clone-orig-missing_VARS.fd</nvram>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
<loader readonly='yes' type='pflash'>/usr/share/ovmf/ovmf-efi.fd</loader>
|
||||
<nvram>/nvram-newpool/clone-orig-vars.fd</nvram>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,23 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
<loader readonly="yes" type="pflash">/usr/share/ovmf/ovmf-efi.fd</loader>
|
||||
<nvram>/nvram-newpool/clone-new_VARS.fd</nvram>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,54 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test1.img'/>
|
||||
<target dev='hdb' bus='ide'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test2.img'/>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
</disk>
|
||||
<disk type='block' device='cdrom'>
|
||||
<target dev='sdb' bus='scsi'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type='file' device='cdrom'>
|
||||
<source file='/tmp/virtinst-test2.img'/>
|
||||
<target dev='sdc' bus='scsi'/>
|
||||
<readonly/>
|
||||
<shareable/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,54 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="block" device="disk">
|
||||
<target dev="hda" bus="ide"/>
|
||||
<source dev="/dev/disk-pool/disk-vol1"/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/tmp/virtinst-test1.img"/>
|
||||
<target dev="hdb" bus="ide"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/tmp/clone2.img"/>
|
||||
<target dev="sda" bus="scsi"/>
|
||||
</disk>
|
||||
<disk type="block" device="cdrom">
|
||||
<target dev="sdb" bus="scsi"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type="file" device="cdrom">
|
||||
<source file="/tmp/virtinst-test2.img"/>
|
||||
<target dev="sdc" bus="scsi"/>
|
||||
<readonly/>
|
||||
<shareable/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,46 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>clone-orig</name>
|
||||
<uuid>aaa3ae22-fed2-bfbd-ac02-3bea3bcfad82</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='cdrom'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/tmp/virtinst-test1.img'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
</disk>
|
||||
<disk type='file' device='floppy'>
|
||||
<target dev='fdb' bus='fdc'/>
|
||||
</disk>
|
||||
<disk type='block' device='cdrom'>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<source file='/dev/default-pool/default-vol'/>
|
||||
<target dev='sdb' bus='scsi'/>
|
||||
</disk>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:a0:cb'/>
|
||||
<source network='test1'/>
|
||||
</interface>
|
||||
<interface type='network'>
|
||||
<mac address='52:54:00:6c:bb:ca'/>
|
||||
<source network='test2'/>
|
||||
</interface>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1,46 +0,0 @@
|
|||
<domain type="kvm">
|
||||
<name>clone-new</name>
|
||||
<uuid>12345678-1234-1234-1234-123456789012</uuid>
|
||||
<memory>262144</memory>
|
||||
<currentMemory>262144</currentMemory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch="i686" machine="pc">hvm</type>
|
||||
<boot dev="cdrom"/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset="utc"/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-kvm</emulator>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/tmp/virtinst-test1.img"/>
|
||||
<target dev="hda" bus="ide"/>
|
||||
</disk>
|
||||
<disk type="file" device="floppy">
|
||||
<target dev="fdb" bus="fdc"/>
|
||||
</disk>
|
||||
<disk type="block" device="cdrom">
|
||||
<target dev="sda" bus="scsi"/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<disk type="file" device="disk">
|
||||
<source file="/dev/default-pool/1234.img"/>
|
||||
<target dev="sdb" bus="scsi"/>
|
||||
</disk>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:00"/>
|
||||
<source network="test1"/>
|
||||
</interface>
|
||||
<interface type="network">
|
||||
<mac address="22:23:45:67:89:01"/>
|
||||
<source network="test2"/>
|
||||
</interface>
|
||||
<input type="mouse" bus="ps2"/>
|
||||
<graphics type="vnc" port="-1" autoport="yes" listen="127.0.0.1"/>
|
||||
</devices>
|
||||
</domain>
|
|
@ -1298,13 +1298,14 @@ c.add_compare("--add-device --network default --os-variant http://fedoraproject.
|
|||
# virt-clone tests #
|
||||
####################
|
||||
|
||||
_CLONE_UNMANAGED = "--original-xml %s/clone-disk.xml" % XMLDIR
|
||||
_CLONE_MANAGED = "--original-xml %s/clone-disk-managed.xml" % XMLDIR
|
||||
_CLONE_NOEXIST = "--original-xml %s/clone-disk-noexist.xml" % XMLDIR
|
||||
_CLONE_NVRAM = "--original-xml %s/clone-nvram-auto.xml" % XMLDIR
|
||||
_CLONE_NVRAM_NEWPOOL = "--original-xml %s/clone-nvram-newpool.xml" % XMLDIR
|
||||
_CLONE_NVRAM_MISSING = "--original-xml %s/clone-nvram-missing.xml" % XMLDIR
|
||||
_CLONE_EMPTY = "--original-xml %s/clone-empty.xml" % XMLDIR
|
||||
_CLONEXMLDIR = XMLDIR + "/clone"
|
||||
_CLONE_UNMANAGED = "--original-xml %s/clone-disk.xml" % _CLONEXMLDIR
|
||||
_CLONE_MANAGED = "--original-xml %s/clone-disk-managed.xml" % _CLONEXMLDIR
|
||||
_CLONE_NOEXIST = "--original-xml %s/clone-disk-noexist.xml" % _CLONEXMLDIR
|
||||
_CLONE_NVRAM = "--original-xml %s/clone-nvram-auto.xml" % _CLONEXMLDIR
|
||||
_CLONE_NVRAM_NEWPOOL = "--original-xml %s/clone-nvram-newpool.xml" % _CLONEXMLDIR
|
||||
_CLONE_NVRAM_MISSING = "--original-xml %s/clone-nvram-missing.xml" % _CLONEXMLDIR
|
||||
_CLONE_EMPTY = "--original-xml %s/clone-empty.xml" % _CLONEXMLDIR
|
||||
|
||||
vclon = App("virt-clone")
|
||||
c = vclon.add_category("remote", "--connect %(URI-TEST-REMOTE)s")
|
||||
|
@ -1318,6 +1319,7 @@ c = vclon.add_category("misc", "")
|
|||
c.add_compare("--connect %(URI-KVM)s -o test-clone --auto-clone", "clone-auto1")
|
||||
c.add_compare("--connect %(URI-TEST-FULL)s -o test-clone-simple --name newvm --auto-clone", "clone-auto2")
|
||||
c.add_compare("--connect %(URI-KVM)s " + _CLONE_NVRAM + " --auto-clone", "clone-nvram") # hits a particular nvram code path
|
||||
c.add_compare("--connect %(URI-KVM)s " + _CLONE_NVRAM + " --auto-clone --nvram /nvram/my-custom-path", "clone-nvram-path") # hits a particular nvram code path
|
||||
c.add_compare("--connect %(URI-KVM)s " + _CLONE_NVRAM_NEWPOOL + " --auto-clone", "nvram-newpool") # hits a particular nvram code path
|
||||
c.add_compare("--connect %(URI-KVM)s " + _CLONE_NVRAM_MISSING + " --auto-clone", "nvram-missing") # hits a particular nvram code path
|
||||
c.add_compare("--connect %(URI-KVM)s -o test-clone -n test-newclone --mac 12:34:56:1A:B2:C3 --mac 12:34:56:1A:B7:C3 --uuid 12345678-12F4-1234-1234-123456789AFA --file /dev/disk-pool/newclone1.img --file /dev/default-pool/newclone2.img --skip-copy=hdb --force-copy=sdb --file /dev/default-pool/newclone3.img", "clone-manual")
|
||||
|
@ -1333,14 +1335,14 @@ c.add_invalid("-n clonetest " + _CLONE_UNMANAGED + " --auto-clone --mac 22:11:11
|
|||
c.add_invalid("--auto-clone") # Just the auto flag
|
||||
c.add_invalid(_CLONE_EMPTY + " --file foo") # Didn't specify new name
|
||||
c.add_invalid(_CLONE_EMPTY + " --auto-clone -n test") # new name raises error
|
||||
c.add_invalid("-o test --auto-clone", grep="shutoff") # VM is running, but --clone-running isn't passed
|
||||
c.add_invalid("-o test --auto-clone", grep="shutoff") # VM is running
|
||||
c.add_invalid("--connect %(URI-TEST-FULL)s -o test-clone-simple -n newvm --file %(EXISTIMG1)s") # Should complain about overwriting existing file
|
||||
c.add_invalid("--connect %(URI-TEST-REMOTE)s -o test-clone-simple --auto-clone --file /dev/default-pool/testvol9.img --check all=off", grep="Clone onto existing storage volume") # hit a specific error message
|
||||
c.add_invalid("--connect %(URI-TEST-FULL)s -o test-clone-full --auto-clone", grep="not enough free space") # catch failure of clone path setting
|
||||
|
||||
|
||||
c = vclon.add_category("general", "-n clonetest")
|
||||
c.add_valid("-o test --clone-running --auto-clone --replace") # Auto flag, no storage, --replace is redundant
|
||||
c.add_valid(_CLONE_EMPTY + " --auto-clone --replace") # --replace but it doesn't matter, should be safely ignored
|
||||
c.add_valid(_CLONE_EMPTY + " --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s") # Nodisk, but with spurious files passed
|
||||
c.add_valid(_CLONE_EMPTY + " --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --prompt") # Working scenario w/ prompt shouldn't ask anything
|
||||
c.add_valid(_CLONE_UNMANAGED + " --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s") # XML File with 2 disks
|
||||
|
@ -1350,18 +1352,17 @@ c.add_valid(_CLONE_UNMANAGED + " --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s
|
|||
c.add_valid(_CLONE_UNMANAGED + " --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --force-copy=fda") # XML w/ disks, force copy a target with no media
|
||||
c.add_valid(_CLONE_MANAGED + " --file %(NEWIMG1)s") # XML w/ managed storage, specify managed path
|
||||
c.add_valid(_CLONE_MANAGED + " --file %(NEWIMG1)s --reflink") # XML w/ managed storage, specify managed path, use --reflink option
|
||||
c.add_valid(_CLONE_NOEXIST + " --file %(EXISTIMG1)s --preserve") # XML w/ managed storage, specify managed path across pools# Libvirt test driver doesn't support cloning across pools# XML w/ non-existent storage, with --preserve
|
||||
c.add_valid("--connect %(URI-TEST-FULL)s -o test-clone -n test --auto-clone --replace") # Overwriting existing running VM
|
||||
c.add_valid(_CLONE_NOEXIST + " --file %(EXISTIMG1)s --preserve") # XML w/ managed storage, specify managed path across pools
|
||||
c.add_compare("--connect %(URI-TEST-FULL)s -o test-clone -n test --auto-clone --replace", "replace") # Overwriting existing running VM
|
||||
c.add_valid(_CLONE_MANAGED + " --auto-clone --force-copy fda") # force copy empty floppy drive
|
||||
c.add_invalid(_CLONE_EMPTY + " foobar") # Positional arguments error
|
||||
c.add_invalid("-o idontexist") # Non-existent vm name
|
||||
c.add_invalid("-o idontexist --auto-clone") # Non-existent vm name with auto flag,
|
||||
c.add_invalid(_CLONE_EMPTY + " -n test") # Colliding new name
|
||||
c.add_invalid(_CLONE_UNMANAGED + "") # XML file with several disks, but non specified
|
||||
c.add_invalid(_CLONE_UNMANAGED + " --file virt-install --file %(EXISTIMG1)s") # XML w/ disks, overwriting existing files with no --preserve
|
||||
c.add_invalid(_CLONE_UNMANAGED + " --file %(NEWCLONEIMG1)s --file %(NEWCLONEIMG2)s --force-copy=hdc") # XML w/ disks, force copy but not enough disks passed
|
||||
c.add_invalid(_CLONE_MANAGED + " --file /tmp/clonevol") # XML w/ managed storage, specify unmanaged path (should fail)
|
||||
c.add_invalid(_CLONE_NOEXIST + " --file %(EXISTIMG1)s") # XML w/ non-existent storage, WITHOUT --preserve
|
||||
c.add_valid(_CLONE_MANAGED + " --auto-clone --force-copy fda") # force copy empty floppy drive
|
||||
c.add_invalid(_CLONE_UNMANAGED + " --file virt-install", grep="overwrite the existing path 'virt-install'") # XML w/ disks, overwriting existing files with no --preserve
|
||||
c.add_invalid(_CLONE_MANAGED + " --file /tmp/clonevol", grep="matching name 'default-vol'") # will attempt to clone across pools, which test driver doesn't support
|
||||
c.add_invalid(_CLONE_NOEXIST + " --auto-clone", grep="'/i/really/dont/exist' does not exist.") # XML w/ non-existent storage, WITHOUT --preserve
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -3,209 +3,42 @@
|
|||
# This work is licensed under the GNU GPLv2 or later.
|
||||
# See the COPYING file in the top-level directory.
|
||||
|
||||
import unittest
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from tests import utils
|
||||
|
||||
from virtinst import Cloner
|
||||
from virtinst import log
|
||||
|
||||
ORIG_NAME = "clone-orig"
|
||||
CLONE_NAME = "clone-new"
|
||||
|
||||
# Create some files to use as test images
|
||||
FILE1 = "/tmp/virtinst-test1.img"
|
||||
FILE2 = "/tmp/virtinst-test2.img"
|
||||
P1_VOL1 = "/dev/default-pool/testvol1.img"
|
||||
P1_VOL2 = "/dev/default-pool/testvol2.img"
|
||||
P2_VOL1 = "/dev/cross-pool/testvol1.img"
|
||||
P2_VOL2 = "/dev/cross-pool/testvol2.img"
|
||||
|
||||
POOL1 = "/dev/default-pool"
|
||||
POOL2 = "/dev/cross-pool"
|
||||
DISKPOOL = "/dev/disk-pool"
|
||||
|
||||
local_files = [FILE1, FILE2]
|
||||
|
||||
CLONEXML_DIR = os.path.join(utils.DATADIR, "clone")
|
||||
|
||||
|
||||
class TestClone(unittest.TestCase):
|
||||
CLI_XMLDIR = utils.DATADIR + "/cli/clone/"
|
||||
|
||||
def setUp(self):
|
||||
for f in local_files:
|
||||
open(f, "w").write("")
|
||||
|
||||
def tearDown(self):
|
||||
for f in local_files:
|
||||
os.unlink(f)
|
||||
|
||||
def _clone(self, filebase, disks=None, force_list=None,
|
||||
skip_list=None, compare=True, conn=None,
|
||||
clone_disks_file=None):
|
||||
"""Helper for comparing clone input/output from 2 xml files"""
|
||||
infile = os.path.join(CLONEXML_DIR, filebase + "-in.xml")
|
||||
in_content = open(infile).read()
|
||||
|
||||
if not conn:
|
||||
conn = utils.URIs.open_testdriver_cached()
|
||||
cloneobj = Cloner(conn)
|
||||
cloneobj.original_xml = in_content
|
||||
|
||||
force_list = force_list or []
|
||||
for force in force_list:
|
||||
cloneobj.force_target = force
|
||||
self.assertEqual(cloneobj.force_target, force_list)
|
||||
cloneobj.force_target = force_list
|
||||
self.assertEqual(cloneobj.force_target, force_list)
|
||||
|
||||
skip_list = skip_list or []
|
||||
for skip in skip_list:
|
||||
cloneobj.skip_target = skip
|
||||
self.assertEqual(cloneobj.skip_target, skip_list)
|
||||
cloneobj.skip_target = skip_list
|
||||
self.assertEqual(cloneobj.skip_target, skip_list)
|
||||
|
||||
cloneobj = self._default_clone_values(cloneobj, disks)
|
||||
|
||||
if compare:
|
||||
self._clone_compare(cloneobj, filebase,
|
||||
clone_disks_file=clone_disks_file)
|
||||
self._clone_define(filebase)
|
||||
else:
|
||||
cloneobj.setup_original()
|
||||
cloneobj.setup_clone()
|
||||
|
||||
def _default_clone_values(self, cloneobj, disks=None):
|
||||
"""Sets default values for the cloned VM."""
|
||||
cloneobj.clone_name = "clone-new"
|
||||
|
||||
uuid = "12345678-1234-1234-1234-123456789012"
|
||||
cloneobj.clone_uuid = uuid
|
||||
self.assertEqual(cloneobj.clone_uuid, uuid)
|
||||
|
||||
macs = ["22:23:45:67:89:00", "22:23:45:67:89:01"]
|
||||
cloneobj.clone_macs = macs
|
||||
self.assertEqual(cloneobj.clone_macs, macs)
|
||||
|
||||
if disks is None:
|
||||
disks = ["/dev/disk-pool/disk-vol1", "/tmp/clone2.img",
|
||||
"/clone3", "/tmp/clone4.img",
|
||||
"/tmp/clone5.img", None]
|
||||
|
||||
cloneobj.clone_paths = disks
|
||||
self.assertEqual(cloneobj.clone_paths, disks)
|
||||
return cloneobj
|
||||
|
||||
def _clone_compare(self, cloneobj, outbase, clone_disks_file=None):
|
||||
"""Helps compare output from passed clone instance with an xml file"""
|
||||
outfile = os.path.join(CLONEXML_DIR, outbase + "-out.xml")
|
||||
|
||||
cloneobj.setup_original()
|
||||
cloneobj.setup_clone()
|
||||
|
||||
utils.diff_compare(cloneobj.clone_xml, outfile)
|
||||
if clone_disks_file:
|
||||
xml_clone_disks = ""
|
||||
for i in cloneobj.clone_disks:
|
||||
xml_clone_disks += i.get_vol_install().get_xml()
|
||||
utils.diff_compare(xml_clone_disks, clone_disks_file)
|
||||
|
||||
def _clone_define(self, filebase):
|
||||
"""Take the valid output xml and attempt to define it on the
|
||||
connection to ensure we don't get any errors"""
|
||||
outfile = os.path.join(CLONEXML_DIR, filebase + "-out.xml")
|
||||
outxml = open(outfile).read()
|
||||
conn = utils.URIs.open_testdriver_cached()
|
||||
utils.test_create(conn, outxml)
|
||||
|
||||
def testRemoteNoStorage(self):
|
||||
"""Test remote clone where VM has no storage that needs cloning"""
|
||||
conn = utils.URIs.open_test_remote()
|
||||
self._clone("nostorage", conn=conn)
|
||||
self._clone("noclone-storage", conn=conn)
|
||||
|
||||
def testRemoteWithStorage(self):
|
||||
def test_clone_unmanaged():
|
||||
"""
|
||||
Test remote clone with storage needing cloning. Should fail,
|
||||
since libvirt has no storage clone api.
|
||||
Test that unmanaged storage duplication via the clone wizard
|
||||
actually copies data
|
||||
"""
|
||||
conn = utils.URIs.open_test_remote()
|
||||
disks = ["%s/1.img" % POOL1, "%s/2.img" % POOL1]
|
||||
try:
|
||||
self._clone("general-cfg", disks=disks, conn=conn)
|
||||
# We shouldn't succeed, so test fails
|
||||
raise AssertionError("Remote clone with storage passed "
|
||||
"when it shouldn't.")
|
||||
except (ValueError, RuntimeError) as e:
|
||||
# Exception expected
|
||||
log.debug("Received expected exception: %s", str(e))
|
||||
|
||||
def testCloneStorageManaged(self):
|
||||
disks = ["%s/new1.img" % POOL1, "%s/new2.img" % DISKPOOL]
|
||||
self._clone("managed-storage", disks=disks)
|
||||
|
||||
def testCloneStorageCrossPool(self):
|
||||
conn = utils.URIs.open_test_remote()
|
||||
clone_disks_file = os.path.join(
|
||||
CLONEXML_DIR, "cross-pool-disks-out.xml")
|
||||
disks = ["%s/new1.img" % POOL2, "%s/new2.img" % POOL1]
|
||||
self._clone("cross-pool", disks=disks,
|
||||
clone_disks_file=clone_disks_file, conn=conn)
|
||||
|
||||
def testCloneStorageForce(self):
|
||||
disks = ["/dev/default-pool/1234.img", None, "/clone2.img"]
|
||||
self._clone("force", disks=disks, force_list=["hda", "fdb", "sdb"])
|
||||
|
||||
def testCloneStorageSkip(self):
|
||||
disks = ["/dev/default-pool/1234.img", None, "/tmp/clone2.img"]
|
||||
skip_list = ["hda", "fdb"]
|
||||
self._clone("skip", disks=disks, skip_list=skip_list)
|
||||
|
||||
def testCloneFullPool(self):
|
||||
with self.assertRaises(Exception):
|
||||
self._clone("fullpool",
|
||||
disks=["/full-pool/test.img"], compare=False)
|
||||
|
||||
def testCloneNvramAuto(self):
|
||||
self._clone("nvram-auto")
|
||||
|
||||
def testCloneNvramNewpool(self):
|
||||
self._clone("nvram-newpool")
|
||||
|
||||
def testCloneNvramMissing(self):
|
||||
self._clone("nvram-missing")
|
||||
|
||||
def testCloneGraphicsPassword(self):
|
||||
self._clone("graphics-password")
|
||||
|
||||
def testCloneChannelSource(self):
|
||||
self._clone("channel-source")
|
||||
|
||||
def testCloneMisc(self):
|
||||
xmlpath = CLI_XMLDIR + "clone-disk.xml"
|
||||
conn = utils.URIs.open_testdriver_cached()
|
||||
xml = open(xmlpath).read()
|
||||
|
||||
with self.assertRaises(RuntimeError) as err:
|
||||
cloner = Cloner(conn)
|
||||
# Add this bit here for coverage testing
|
||||
cloner.clone_xml = None
|
||||
cloner.setup_original()
|
||||
self.assertTrue("Original guest name or XML" in str(err.exception))
|
||||
tmp1 = tempfile.NamedTemporaryFile()
|
||||
tmp2 = tempfile.NamedTemporaryFile()
|
||||
inp1 = os.path.abspath(__file__)
|
||||
inp2 = xmlpath
|
||||
|
||||
with self.assertRaises(RuntimeError) as err:
|
||||
cloner = Cloner(conn)
|
||||
cloner.original_guest = "test-snapshots"
|
||||
cloner.setup_original()
|
||||
self.assertTrue("must be shutoff" in str(err.exception))
|
||||
xml = xml.replace("/tmp/__virtinst_cli_exist1.img", inp1)
|
||||
xml = xml.replace("/tmp/__virtinst_cli_exist2.img", inp2)
|
||||
cloner = Cloner(conn, src_xml=xml)
|
||||
|
||||
with self.assertRaises(ValueError) as err:
|
||||
cloner = Cloner(conn)
|
||||
cloner.original_guest = "test-clone-simple"
|
||||
cloner.setup_original()
|
||||
cloner.setup_clone()
|
||||
self.assertTrue("More disks to clone" in str(err.exception))
|
||||
diskinfos = cloner.get_diskinfos_to_clone()
|
||||
assert len(diskinfos) == 2
|
||||
diskinfos[0].set_clone_path(tmp1.name, True, False)
|
||||
diskinfos[1].set_clone_path(tmp2.name, True, False)
|
||||
|
||||
cloner = Cloner(conn)
|
||||
self.assertEqual(
|
||||
cloner.generate_clone_name("test-clone5"), "test-clone6")
|
||||
cloner.prepare()
|
||||
cloner.start_duplicate(None)
|
||||
|
||||
assert open(tmp1.name).read() == open(inp1).read()
|
||||
assert open(tmp2.name).read() == open(inp2).read()
|
||||
|
|
|
@ -315,3 +315,15 @@ class TestXMLMisc(unittest.TestCase):
|
|||
newdisk.path = newdisk.path
|
||||
newdisk.set_local_disk_to_clone(srcdisk, True)
|
||||
newdisk.build_storage(None)
|
||||
|
||||
newdisk = virtinst.DeviceDisk(conn)
|
||||
newdisk.type = "block"
|
||||
newdisk.path = "/dev/foo/idontexist"
|
||||
assert newdisk.get_size() == 0
|
||||
|
||||
conn = utils.URIs.open_testdriver_cached()
|
||||
volpath = "/dev/default-pool/test-clone-simple.img"
|
||||
assert virtinst.DeviceDisk.path_definitely_exists(conn, volpath)
|
||||
disk = virtinst.DeviceDisk(conn)
|
||||
disk.path = volpath
|
||||
assert disk.get_size()
|
||||
|
|
|
@ -14,7 +14,6 @@ import libvirt
|
|||
|
||||
from . import generatename
|
||||
from . import progress
|
||||
from . import xmlutil
|
||||
from .guest import Guest
|
||||
from .devices import DeviceInterface
|
||||
from .devices import DeviceDisk
|
||||
|
@ -49,449 +48,37 @@ def _replace_vm(conn, name):
|
|||
})
|
||||
|
||||
|
||||
class Cloner(object):
|
||||
|
||||
# Reasons why we don't default to cloning.
|
||||
CLONE_POLICY_NO_READONLY = 1
|
||||
CLONE_POLICY_NO_SHAREABLE = 2
|
||||
CLONE_POLICY_NO_EMPTYMEDIA = 3
|
||||
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
|
||||
# original guest name or uuid
|
||||
self._original_guest = None
|
||||
self.original_dom = None
|
||||
self._original_disks = []
|
||||
self._original_xml = None
|
||||
self._guest = None
|
||||
|
||||
# clone guest
|
||||
self._clone_name = None
|
||||
self._clone_disks = []
|
||||
self._clone_macs = []
|
||||
self._clone_uuid = None
|
||||
self._clone_sparse = True
|
||||
self._clone_xml = None
|
||||
self.clone_nvram = None
|
||||
self._nvram_disk = None
|
||||
|
||||
self._force_target = []
|
||||
self._skip_target = []
|
||||
self._preserve = True
|
||||
self._clone_running = False
|
||||
self._replace = False
|
||||
self._reflink = False
|
||||
|
||||
# Default clone policy for back compat: don't clone readonly,
|
||||
# shareable, or empty disks
|
||||
self._clone_policy = []
|
||||
self.clone_policy = [self.CLONE_POLICY_NO_READONLY,
|
||||
self.CLONE_POLICY_NO_SHAREABLE,
|
||||
self.CLONE_POLICY_NO_EMPTYMEDIA]
|
||||
|
||||
# Generate a random UUID at the start
|
||||
self.clone_uuid = Guest.generate_uuid(conn)
|
||||
|
||||
|
||||
##############
|
||||
# Properties #
|
||||
##############
|
||||
|
||||
# Original guest name
|
||||
def get_original_guest(self):
|
||||
return self._original_guest
|
||||
def set_original_guest(self, original_guest):
|
||||
if self._lookup_vm(original_guest):
|
||||
self._original_guest = original_guest
|
||||
original_guest = property(get_original_guest, set_original_guest)
|
||||
|
||||
# XML of the original guest
|
||||
def set_original_xml(self, val):
|
||||
self._original_xml = val
|
||||
self._original_guest = Guest(self.conn,
|
||||
parsexml=self._original_xml).name
|
||||
def get_original_xml(self):
|
||||
return self._original_xml
|
||||
original_xml = property(get_original_xml, set_original_xml)
|
||||
|
||||
# Name to use for the new guest clone
|
||||
def get_clone_name(self):
|
||||
return self._clone_name
|
||||
def set_clone_name(self, name):
|
||||
try:
|
||||
Guest.validate_name(self.conn, name,
|
||||
check_collision=not self.replace,
|
||||
validate=False)
|
||||
except ValueError as e:
|
||||
raise ValueError(_("Invalid name for new guest: %s") % e)
|
||||
|
||||
self._clone_name = name
|
||||
clone_name = property(get_clone_name, set_clone_name)
|
||||
|
||||
# UUID to use for the new guest clone
|
||||
def set_clone_uuid(self, uuid):
|
||||
self._clone_uuid = uuid
|
||||
def get_clone_uuid(self):
|
||||
return self._clone_uuid
|
||||
clone_uuid = property(get_clone_uuid, set_clone_uuid)
|
||||
|
||||
# Paths to use for the new disk locations
|
||||
def set_clone_paths(self, paths):
|
||||
disklist = []
|
||||
for path in xmlutil.listify(paths):
|
||||
try:
|
||||
device = DeviceDisk.DEVICE_DISK
|
||||
if not path:
|
||||
device = DeviceDisk.DEVICE_CDROM
|
||||
|
||||
disk = DeviceDisk(self.conn)
|
||||
disk.path = path
|
||||
disk.device = device
|
||||
|
||||
if (not self.preserve_dest_disks and
|
||||
disk.wants_storage_creation()):
|
||||
vol_install = DeviceDisk.build_vol_install(
|
||||
self.conn, os.path.basename(disk.path),
|
||||
disk.get_parent_pool(), .000001, False)
|
||||
disk.set_vol_install(vol_install)
|
||||
disk.validate()
|
||||
disklist.append(disk)
|
||||
except Exception as e:
|
||||
log.debug("Error setting clone path.", exc_info=True)
|
||||
raise ValueError(
|
||||
_("Could not use path '%(path)s' for cloning: %(error)s") % {
|
||||
"path": path,
|
||||
"error": str(e),
|
||||
})
|
||||
|
||||
self._clone_disks = disklist
|
||||
def get_clone_paths(self):
|
||||
return [d.path for d in self.clone_disks]
|
||||
clone_paths = property(get_clone_paths, set_clone_paths)
|
||||
|
||||
# DeviceDisk instances for the new disk paths
|
||||
@property
|
||||
def clone_disks(self):
|
||||
return self._clone_disks
|
||||
|
||||
# MAC address for the new guest clone
|
||||
def set_clone_macs(self, mac):
|
||||
self._clone_macs = xmlutil.listify(mac)
|
||||
def get_clone_macs(self):
|
||||
return self._clone_macs
|
||||
clone_macs = property(get_clone_macs, set_clone_macs)
|
||||
|
||||
# DeviceDisk instances of the original disks being cloned
|
||||
@property
|
||||
def original_disks(self):
|
||||
return self._original_disks
|
||||
|
||||
# Generated XML for the guest clone
|
||||
def get_clone_xml(self):
|
||||
return self._clone_xml
|
||||
def set_clone_xml(self, clone_xml):
|
||||
self._clone_xml = clone_xml
|
||||
clone_xml = property(get_clone_xml, set_clone_xml)
|
||||
|
||||
# Whether to attempt sparse allocation during cloning
|
||||
def get_clone_sparse(self):
|
||||
return self._clone_sparse
|
||||
def set_clone_sparse(self, flg):
|
||||
self._clone_sparse = flg
|
||||
clone_sparse = property(get_clone_sparse, set_clone_sparse)
|
||||
|
||||
# If true, preserve ALL original disk devices
|
||||
def get_preserve(self):
|
||||
return self._preserve
|
||||
def set_preserve(self, flg):
|
||||
self._preserve = flg
|
||||
preserve = property(get_preserve, set_preserve)
|
||||
|
||||
# If true, preserve ALL disk devices for the NEW guest.
|
||||
# This means no storage cloning.
|
||||
# This is a convenience access for not Cloner.preserve
|
||||
@property
|
||||
def preserve_dest_disks(self):
|
||||
return not self.preserve
|
||||
|
||||
# List of disk targets that we force cloning despite
|
||||
# Cloner's recommendation
|
||||
def set_force_target(self, dev):
|
||||
if isinstance(dev, list):
|
||||
self._force_target = dev[:]
|
||||
else:
|
||||
self._force_target.append(dev)
|
||||
def get_force_target(self):
|
||||
return self._force_target
|
||||
force_target = property(get_force_target, set_force_target)
|
||||
|
||||
# List of disk targets that we skip cloning despite Cloner's
|
||||
# recommendation. This takes precedence over force_target.")
|
||||
def set_skip_target(self, dev):
|
||||
if isinstance(dev, list):
|
||||
self._skip_target = dev[:]
|
||||
else:
|
||||
self._skip_target.append(dev)
|
||||
def get_skip_target(self):
|
||||
return self._skip_target
|
||||
skip_target = property(get_skip_target, set_skip_target)
|
||||
|
||||
# List of policy rules for determining which vm disks to clone.
|
||||
# See CLONE_POLICY_*
|
||||
def set_clone_policy(self, policy_list):
|
||||
self._clone_policy = policy_list
|
||||
def get_clone_policy(self):
|
||||
return self._clone_policy
|
||||
clone_policy = property(get_clone_policy, set_clone_policy)
|
||||
|
||||
# Allow cloning a running VM. If enabled, domain state is not
|
||||
# checked before cloning.
|
||||
def get_clone_running(self):
|
||||
return self._clone_running
|
||||
def set_clone_running(self, val):
|
||||
self._clone_running = bool(val)
|
||||
clone_running = property(get_clone_running, set_clone_running)
|
||||
|
||||
# If enabled, don't check for clone name collision, simply undefine
|
||||
# any conflicting guest.
|
||||
def _get_replace(self):
|
||||
return self._replace
|
||||
def _set_replace(self, val):
|
||||
self._replace = bool(val)
|
||||
replace = property(_get_replace, _set_replace)
|
||||
|
||||
# If true, use COW lightweight copy
|
||||
def _get_reflink(self):
|
||||
return self._reflink
|
||||
def _set_reflink(self, reflink):
|
||||
self._reflink = reflink
|
||||
reflink = property(_get_reflink, _set_reflink)
|
||||
|
||||
|
||||
######################
|
||||
# Functional methods #
|
||||
######################
|
||||
|
||||
def setup_original(self):
|
||||
def _generate_clone_name(conn, basename):
|
||||
"""
|
||||
Validate and setup all parameters needed for the original (cloned) VM
|
||||
If the orig name is "foo-clone", we don't want the clone to be
|
||||
"foo-clone-clone", we want "foo-clone1"
|
||||
"""
|
||||
log.debug("Validating original guest parameters")
|
||||
match = re.search("-clone[1-9]*$", basename)
|
||||
start_num = 1
|
||||
force_num = False
|
||||
if match:
|
||||
num_match = re.search("[1-9]+$", match.group())
|
||||
if num_match:
|
||||
start_num = int(str(num_match.group())) + 1
|
||||
force_num = True
|
||||
basename = basename.replace(match.group(), "")
|
||||
|
||||
if self.original_guest is None and self.original_xml is None:
|
||||
raise RuntimeError(_("Original guest name or XML is required."))
|
||||
def cb(n):
|
||||
return generatename.check_libvirt_collision(
|
||||
conn.lookupByName, n)
|
||||
basename = basename + "-clone"
|
||||
return generatename.generate_name(basename, cb,
|
||||
sep="", start_num=start_num, force_num=force_num)
|
||||
|
||||
if self.original_guest is not None and not self.original_xml:
|
||||
self.original_dom = self._lookup_vm(self.original_guest)
|
||||
flags = libvirt.VIR_DOMAIN_XML_SECURE
|
||||
self.original_xml = self.original_dom.XMLDesc(flags)
|
||||
|
||||
log.debug("Original XML:\n%s", self.original_xml)
|
||||
|
||||
self._guest = Guest(self.conn, parsexml=self.original_xml)
|
||||
self._guest.id = None
|
||||
|
||||
# Pull clonable storage info from the original xml
|
||||
self._original_disks = self._get_original_disks_info()
|
||||
|
||||
log.debug("Original paths: %s",
|
||||
[d.path for d in self.original_disks])
|
||||
log.debug("Original sizes: %s",
|
||||
[d.get_size() for d in self.original_disks])
|
||||
|
||||
if not self.clone_running and self.original_dom:
|
||||
status = self.original_dom.info()[0]
|
||||
if status not in [libvirt.VIR_DOMAIN_SHUTOFF]:
|
||||
raise RuntimeError(_("Domain to clone must be shutoff."))
|
||||
|
||||
def _setup_disk_clone_destination(self, orig_disk, clone_disk):
|
||||
def _generate_clone_disk_path(conn, origname, newname, origpath):
|
||||
"""
|
||||
Helper that validates the new path location
|
||||
Generate desired cloned disk path name, derived from the
|
||||
original path, original VM name, and proposed new VM name
|
||||
"""
|
||||
if self.preserve_dest_disks:
|
||||
return
|
||||
if origpath is None:
|
||||
return None
|
||||
|
||||
if clone_disk.get_vol_object():
|
||||
# Special case: non remote cloning of a guest using
|
||||
# managed block devices: fall back to local cloning if
|
||||
# we have permissions to do so. This validation check
|
||||
# caused a few bug reports in a short period of time,
|
||||
# so must be a common case.
|
||||
if (self.conn.is_remote() or
|
||||
clone_disk.type != clone_disk.TYPE_BLOCK or
|
||||
not orig_disk.path or
|
||||
not os.access(orig_disk.path, os.R_OK) or
|
||||
not clone_disk.path or
|
||||
not os.access(clone_disk.path, os.W_OK)):
|
||||
raise RuntimeError(
|
||||
_("Clone onto existing storage volume is not "
|
||||
"currently supported: '%s'") % clone_disk.path)
|
||||
|
||||
# Setup proper cloning inputs for the new virtual disks
|
||||
if (orig_disk.get_vol_object() and
|
||||
clone_disk.get_vol_install()):
|
||||
clone_vol_install = clone_disk.get_vol_install()
|
||||
|
||||
# Source and dest are managed. If they share the same pool,
|
||||
# replace vol_install with a CloneVolume instance, otherwise
|
||||
# simply set input_vol on the dest vol_install
|
||||
if (clone_vol_install.pool.name() ==
|
||||
orig_disk.get_parent_pool().name()):
|
||||
vol_install = StorageVolume(self.conn)
|
||||
vol_install.input_vol = orig_disk.get_vol_object()
|
||||
vol_install.sync_input_vol()
|
||||
vol_install.name = clone_vol_install.name
|
||||
else:
|
||||
# Cross pool cloning
|
||||
# Sync only the format of the image.
|
||||
clone_vol_install.input_vol = orig_disk.get_vol_object()
|
||||
vol_install = clone_vol_install
|
||||
vol_install.input_vol = orig_disk.get_vol_object()
|
||||
vol_install.sync_input_vol(only_format=True)
|
||||
|
||||
if not self.clone_sparse:
|
||||
vol_install.allocation = vol_install.capacity
|
||||
vol_install.reflink = self.reflink
|
||||
clone_disk.set_vol_install(vol_install)
|
||||
elif orig_disk.path:
|
||||
clone_disk.set_local_disk_to_clone(orig_disk, self.clone_sparse)
|
||||
|
||||
clone_disk.validate()
|
||||
|
||||
|
||||
def _prepare_nvram(self):
|
||||
if self.clone_nvram is None:
|
||||
nvram_dir = os.path.dirname(self._guest.os.nvram)
|
||||
self.clone_nvram = os.path.join(nvram_dir,
|
||||
"%s_VARS.fd" % self._clone_name)
|
||||
|
||||
old_nvram = DeviceDisk(self.conn)
|
||||
old_nvram.path = self._guest.os.nvram
|
||||
|
||||
nvram = DeviceDisk(self.conn)
|
||||
nvram.path = self.clone_nvram
|
||||
|
||||
if (not self.preserve_dest_disks and
|
||||
nvram.wants_storage_creation() and
|
||||
old_nvram.get_vol_object()):
|
||||
|
||||
nvram_install = DeviceDisk.build_vol_install(
|
||||
self.conn, os.path.basename(nvram.path),
|
||||
nvram.get_parent_pool(), nvram.get_size(), False)
|
||||
nvram_install.input_vol = old_nvram.get_vol_object()
|
||||
nvram_install.sync_input_vol(only_format=True)
|
||||
nvram_install.reflink = self.reflink
|
||||
nvram.set_vol_install(nvram_install)
|
||||
|
||||
nvram.validate()
|
||||
self._nvram_disk = nvram
|
||||
|
||||
self._guest.os.nvram = nvram.path
|
||||
|
||||
|
||||
def setup_clone(self):
|
||||
"""
|
||||
Validate and set up all parameters needed for the new (clone) VM
|
||||
"""
|
||||
log.debug("Validating clone parameters.")
|
||||
|
||||
self._clone_xml = self.original_xml
|
||||
|
||||
if len(self.clone_disks) < len(self.original_disks):
|
||||
raise ValueError(_("More disks to clone than new paths specified. "
|
||||
"(%(passed)d specified, %(need)d needed") %
|
||||
{"passed": len(self.clone_disks),
|
||||
"need": len(self.original_disks)})
|
||||
|
||||
log.debug("Clone paths: %s", [d.path for d in self.clone_disks])
|
||||
|
||||
self._guest.name = self._clone_name
|
||||
self._guest.uuid = self._clone_uuid
|
||||
self._guest.title = None
|
||||
|
||||
self._clone_macs.reverse()
|
||||
for dev in self._guest.devices.graphics:
|
||||
if dev.port and dev.port != -1:
|
||||
log.warning(_("Setting the graphics device port to autoport, "
|
||||
"in order to avoid conflicting."))
|
||||
dev.port = -1
|
||||
|
||||
clone_macs = self._clone_macs[:]
|
||||
for iface in self._guest.devices.interface:
|
||||
iface.target_dev = None
|
||||
|
||||
if clone_macs:
|
||||
mac = clone_macs.pop()
|
||||
else:
|
||||
mac = DeviceInterface.generate_mac(self.conn)
|
||||
iface.macaddr = mac
|
||||
|
||||
# Changing storage XML
|
||||
for i, orig_disk in enumerate(self._original_disks):
|
||||
clone_disk = self._clone_disks[i]
|
||||
|
||||
for disk in self._guest.devices.disk:
|
||||
if disk.target == orig_disk.target:
|
||||
xmldisk = disk
|
||||
|
||||
self._setup_disk_clone_destination(orig_disk, clone_disk)
|
||||
|
||||
# Change the XML
|
||||
xmldisk.path = None
|
||||
xmldisk.type = clone_disk.type
|
||||
xmldisk.driver_name = orig_disk.driver_name
|
||||
xmldisk.driver_type = orig_disk.driver_type
|
||||
xmldisk.path = clone_disk.path
|
||||
|
||||
# For guest agent channel, remove a path to generate a new one with
|
||||
# new guest name
|
||||
for channel in self._guest.devices.channel:
|
||||
if (channel.type == DeviceChannel.TYPE_UNIX and
|
||||
channel.target_name and channel.source.path and
|
||||
channel.target_name in channel.source.path):
|
||||
channel.source.path = None
|
||||
|
||||
if self._guest.os.nvram:
|
||||
self._prepare_nvram()
|
||||
|
||||
# Save altered clone xml
|
||||
self._clone_xml = self._guest.get_xml()
|
||||
log.debug("Clone guest xml is\n%s", self._clone_xml)
|
||||
|
||||
def start_duplicate(self, meter=None):
|
||||
"""
|
||||
Actually perform the duplication: cloning disks if needed and defining
|
||||
the new clone xml.
|
||||
"""
|
||||
log.debug("Starting duplicate.")
|
||||
meter = progress.ensure_meter(meter)
|
||||
|
||||
dom = None
|
||||
try:
|
||||
# Replace orig VM if required
|
||||
if self.replace:
|
||||
_replace_vm(self.conn, self.clone_name)
|
||||
|
||||
# Define domain early to catch any xml errors before duping storage
|
||||
dom = self.conn.defineXML(self.clone_xml)
|
||||
|
||||
if self.preserve:
|
||||
for dst_dev in self.clone_disks:
|
||||
dst_dev.build_storage(meter)
|
||||
if self._nvram_disk:
|
||||
self._nvram_disk.build_storage(meter)
|
||||
except Exception as e:
|
||||
log.debug("Duplicate failed: %s", str(e))
|
||||
if dom:
|
||||
dom.undefine()
|
||||
raise
|
||||
|
||||
log.debug("Duplicating finished.")
|
||||
|
||||
def generate_clone_disk_path(self, origpath, newname=None):
|
||||
origname = self.original_guest
|
||||
newname = newname or self.clone_name
|
||||
path = origpath
|
||||
suffix = ""
|
||||
|
||||
|
@ -514,108 +101,403 @@ class Cloner(object):
|
|||
|
||||
clonebase = os.path.join(dirname, clonebase)
|
||||
def cb(p):
|
||||
return DeviceDisk.path_definitely_exists(self.conn, p)
|
||||
return DeviceDisk.path_definitely_exists(conn, p)
|
||||
return generatename.generate_name(clonebase, cb, suffix=suffix)
|
||||
|
||||
def generate_clone_name(self, basename=None):
|
||||
# If the orig name is "foo-clone", we don't want the clone to be
|
||||
# "foo-clone-clone", we want "foo-clone1"
|
||||
if not basename:
|
||||
basename = self.original_guest
|
||||
|
||||
match = re.search("-clone[1-9]*$", basename)
|
||||
start_num = 1
|
||||
force_num = False
|
||||
if match:
|
||||
num_match = re.search("[1-9]+$", match.group())
|
||||
if num_match:
|
||||
start_num = int(str(num_match.group())) + 1
|
||||
force_num = True
|
||||
basename = basename.replace(match.group(), "")
|
||||
|
||||
def cb(n):
|
||||
return generatename.check_libvirt_collision(
|
||||
self.conn.lookupByName, n)
|
||||
basename = basename + "-clone"
|
||||
return generatename.generate_name(basename, cb,
|
||||
sep="", start_num=start_num, force_num=force_num)
|
||||
|
||||
|
||||
############################
|
||||
# Private helper functions #
|
||||
############################
|
||||
|
||||
# Parse disk paths that need to be cloned from the original guest's xml
|
||||
# Return a list of DeviceDisk instances pointing to the original
|
||||
# storage
|
||||
def _get_original_disks_info(self):
|
||||
clonelist = []
|
||||
retdisks = []
|
||||
|
||||
for disk in self._guest.devices.disk:
|
||||
if self._do_we_clone_device(disk):
|
||||
clonelist.append(disk)
|
||||
continue
|
||||
|
||||
# Set up virtual disk to encapsulate all relevant path info
|
||||
for disk in clonelist:
|
||||
validate = not self.preserve_dest_disks
|
||||
|
||||
def _lookup_vm(conn, name):
|
||||
try:
|
||||
return conn.lookupByName(name)
|
||||
except libvirt.libvirtError:
|
||||
e = ValueError(_("Domain '%s' was not found.") % str(name))
|
||||
raise e from None
|
||||
|
||||
|
||||
def _build_clone_vol_install(orig_disk, clone_disk):
|
||||
vol_install = DeviceDisk.build_vol_install(
|
||||
orig_disk.conn, os.path.basename(clone_disk.path),
|
||||
clone_disk.get_parent_pool(), .000001, False)
|
||||
vol_install.input_vol = orig_disk.get_vol_object()
|
||||
|
||||
# Source and dest are managed. If they share the same pool,
|
||||
# replace vol_install with a CloneVolume instance, otherwise
|
||||
# simply set input_vol on the dest vol_install
|
||||
if (vol_install.pool.name() ==
|
||||
orig_disk.get_parent_pool().name()):
|
||||
vol_install.sync_input_vol()
|
||||
else:
|
||||
# Cross pool cloning
|
||||
# Sync only the format of the image.
|
||||
vol_install.sync_input_vol(only_format=True)
|
||||
|
||||
return vol_install
|
||||
|
||||
|
||||
def _build_clone_disk(orig_disk, clonepath, allow_create, sparse):
|
||||
conn = orig_disk.conn
|
||||
device = DeviceDisk.DEVICE_DISK
|
||||
if not disk.path:
|
||||
# Tell DeviceDisk we are a cdrom to allow empty media
|
||||
if not clonepath:
|
||||
device = DeviceDisk.DEVICE_CDROM
|
||||
|
||||
newd = DeviceDisk(self.conn)
|
||||
newd.path = disk.path
|
||||
newd.device = device
|
||||
newd.driver_name = disk.driver_name
|
||||
newd.driver_type = disk.driver_type
|
||||
newd.target = disk.target
|
||||
if validate:
|
||||
if newd.wants_storage_creation():
|
||||
raise ValueError(_("Disk path '%s' does not exist.") %
|
||||
newd.path)
|
||||
except Exception as e:
|
||||
log.debug("Exception creating clone disk objects",
|
||||
exc_info=True)
|
||||
raise ValueError(_("Could not determine original disk "
|
||||
"information: %s" % str(e)))
|
||||
retdisks.append(newd)
|
||||
clone_disk = DeviceDisk(conn)
|
||||
clone_disk.path = clonepath
|
||||
clone_disk.device = device
|
||||
|
||||
return retdisks
|
||||
if not allow_create:
|
||||
clone_disk.validate()
|
||||
return clone_disk
|
||||
|
||||
# Pull disk #i from the original guest xml, return it's source path
|
||||
# if it should be cloned
|
||||
# Cloning policy based on 'clone_policy', 'force_target' and 'skip_target'
|
||||
def _do_we_clone_device(self, disk):
|
||||
if disk.target in self.skip_target:
|
||||
if clone_disk.get_vol_object():
|
||||
# Special case: non remote cloning of a guest using
|
||||
# managed block devices: fall back to local cloning if
|
||||
# we have permissions to do so. This validation check
|
||||
# caused a few bug reports in a short period of time,
|
||||
# so must be a common case.
|
||||
if (conn.is_remote() or
|
||||
clone_disk.type != clone_disk.TYPE_BLOCK or
|
||||
not orig_disk.path or
|
||||
not os.access(orig_disk.path, os.R_OK) or
|
||||
not clone_disk.path or
|
||||
not os.access(clone_disk.path, os.W_OK)):
|
||||
raise RuntimeError(
|
||||
_("Clone onto existing storage volume is not "
|
||||
"currently supported: '%s'") % clone_disk.path)
|
||||
|
||||
if (orig_disk.get_vol_object() and
|
||||
clone_disk.wants_storage_creation()):
|
||||
vol_install = _build_clone_vol_install(orig_disk, clone_disk)
|
||||
if not sparse:
|
||||
vol_install.allocation = vol_install.capacity
|
||||
clone_disk.set_vol_install(vol_install)
|
||||
elif orig_disk.path:
|
||||
clone_disk.set_local_disk_to_clone(orig_disk, sparse)
|
||||
|
||||
clone_disk.validate()
|
||||
return clone_disk
|
||||
|
||||
|
||||
class _CloneDiskInfo:
|
||||
"""
|
||||
Class that tracks some additional information about how we want
|
||||
to default handle each disk of the source VM
|
||||
"""
|
||||
def __init__(self, srcdisk):
|
||||
self.disk = DeviceDisk(srcdisk.conn, parsexml=srcdisk.get_xml())
|
||||
self._do_clone = self._do_we_clone_default()
|
||||
self.clone_disk = None
|
||||
|
||||
def is_clone_requested(self):
|
||||
return self._do_clone
|
||||
def set_clone_requested(self, val):
|
||||
self._do_clone = val
|
||||
|
||||
def _do_we_clone_default(self):
|
||||
if not self.disk.path:
|
||||
return False
|
||||
if self.disk.read_only:
|
||||
return False
|
||||
if self.disk.shareable:
|
||||
return False
|
||||
|
||||
if disk.target in self.force_target:
|
||||
return True
|
||||
|
||||
# No media path
|
||||
if (not disk.path and
|
||||
self.CLONE_POLICY_NO_EMPTYMEDIA in self.clone_policy):
|
||||
return False
|
||||
|
||||
# Readonly disks
|
||||
if (disk.read_only and
|
||||
self.CLONE_POLICY_NO_READONLY in self.clone_policy):
|
||||
return False
|
||||
|
||||
# Shareable disks
|
||||
if (disk.shareable and
|
||||
self.CLONE_POLICY_NO_SHAREABLE in self.clone_policy):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# Simple wrapper for checking a vm exists and returning the domain
|
||||
def _lookup_vm(self, name):
|
||||
def check_clonable(self):
|
||||
try:
|
||||
return self.conn.lookupByName(name)
|
||||
except libvirt.libvirtError:
|
||||
raise ValueError(_("Domain '%s' was not found.") % str(name))
|
||||
# This forces DeviceDisk to resolve the storage backend
|
||||
self.disk.path = self.disk.path
|
||||
if self.disk.wants_storage_creation():
|
||||
raise ValueError(
|
||||
_("Disk path '%s' does not exist.") % self.disk.path)
|
||||
except Exception as e:
|
||||
log.debug("Exception processing clone original path", exc_info=True)
|
||||
err = _("Could not determine original disk information: %s" % str(e))
|
||||
raise ValueError(err) from None
|
||||
|
||||
def set_clone_path(self, path, allow_create, sparse):
|
||||
if allow_create:
|
||||
self.check_clonable()
|
||||
|
||||
try:
|
||||
self.clone_disk = _build_clone_disk(
|
||||
self.disk, path, allow_create, sparse)
|
||||
except Exception as e:
|
||||
log.debug("Error setting clone path.", exc_info=True)
|
||||
raise ValueError(
|
||||
_("Could not use path '%(path)s' for cloning: %(error)s") % {
|
||||
"path": path,
|
||||
"error": str(e),
|
||||
})
|
||||
|
||||
|
||||
class Cloner(object):
|
||||
@staticmethod
|
||||
def generate_clone_name(conn, basename):
|
||||
return _generate_clone_name(conn, basename)
|
||||
|
||||
@staticmethod
|
||||
def generate_clone_disk_path(conn, origname, newname, origpath):
|
||||
return _generate_clone_disk_path(conn, origname, newname, origpath)
|
||||
|
||||
def __init__(self, conn, src_name=None, src_xml=None):
|
||||
self.conn = conn
|
||||
|
||||
self._src_guest = None
|
||||
self._new_guest = None
|
||||
self._diskinfos = []
|
||||
self._init_src(src_name, src_xml)
|
||||
|
||||
self._new_nvram_path = None
|
||||
self._nvram_disk = None
|
||||
|
||||
self._sparse = True
|
||||
self._overwrite = True
|
||||
self._replace = False
|
||||
self._reflink = False
|
||||
|
||||
|
||||
#################
|
||||
# Init routines #
|
||||
#################
|
||||
|
||||
def _init_src(self, src_name, src_xml):
|
||||
"""
|
||||
Set up the source VM info we are cloning, from passed in VM name
|
||||
or full XML
|
||||
"""
|
||||
if not src_xml:
|
||||
dom = _lookup_vm(self.conn, src_name)
|
||||
status = dom.info()[0]
|
||||
if status not in [libvirt.VIR_DOMAIN_SHUTOFF]:
|
||||
raise RuntimeError(_("Domain to clone must be shutoff."))
|
||||
flags = libvirt.VIR_DOMAIN_XML_SECURE
|
||||
src_xml = dom.XMLDesc(flags)
|
||||
|
||||
log.debug("Original XML:\n%s", src_xml)
|
||||
|
||||
self._src_guest = Guest(self.conn, parsexml=src_xml)
|
||||
self._new_guest = Guest(self.conn, parsexml=src_xml)
|
||||
self._init_new_guest()
|
||||
|
||||
# Collect disk info for every disk to determine if we will
|
||||
# default to cloning or not
|
||||
for disk in self._src_guest.devices.disk:
|
||||
self._diskinfos.append(_CloneDiskInfo(disk))
|
||||
for diskinfo in [d for d in self._diskinfos if d.is_clone_requested()]:
|
||||
disk = diskinfo.disk
|
||||
log.debug("Wants cloning: size=%s path=%s",
|
||||
disk.get_size(), disk.path)
|
||||
|
||||
def _init_new_guest(self):
|
||||
"""
|
||||
Perform the series of unconditional new VM changes we always make
|
||||
"""
|
||||
self._new_guest.id = None
|
||||
self._new_guest.title = None
|
||||
self._new_guest.uuid = None
|
||||
self._new_guest.uuid = Guest.generate_uuid(self.conn)
|
||||
|
||||
for dev in self._new_guest.devices.graphics:
|
||||
if dev.port and dev.port != -1:
|
||||
log.warning(_("Setting the graphics device port to autoport, "
|
||||
"in order to avoid conflicting."))
|
||||
dev.port = -1
|
||||
|
||||
for iface in self._new_guest.devices.interface:
|
||||
iface.target_dev = None
|
||||
iface.macaddr = DeviceInterface.generate_mac(self.conn)
|
||||
|
||||
# For guest agent channel, remove a path to generate a new one with
|
||||
# new guest name
|
||||
for channel in self._new_guest.devices.channel:
|
||||
if (channel.type == DeviceChannel.TYPE_UNIX and
|
||||
channel.target_name and channel.source.path and
|
||||
channel.target_name in channel.source.path):
|
||||
channel.source.path = None
|
||||
|
||||
new_name = Cloner.generate_clone_name(self.conn, self.src_name)
|
||||
log.debug("Auto-generated clone name '%s'", new_name)
|
||||
self.set_clone_name(new_name)
|
||||
|
||||
|
||||
##############
|
||||
# Properties #
|
||||
##############
|
||||
|
||||
@property
|
||||
def src_name(self):
|
||||
"""
|
||||
The name of the original VM we are cloning
|
||||
"""
|
||||
return self._src_guest.name
|
||||
|
||||
@property
|
||||
def new_guest(self):
|
||||
"""
|
||||
The Guest instance of the new XML we will create
|
||||
"""
|
||||
return self._new_guest
|
||||
|
||||
def set_clone_name(self, name):
|
||||
self._new_guest.name = name
|
||||
|
||||
def set_clone_uuid(self, uuid):
|
||||
"""
|
||||
Override the new VMs generated UUId
|
||||
"""
|
||||
self._new_guest.uuid = uuid
|
||||
|
||||
def set_replace(self, val):
|
||||
"""
|
||||
If True, don't check for clone name collision, simply undefine
|
||||
any conflicting guest.
|
||||
"""
|
||||
self._replace = bool(val)
|
||||
|
||||
def set_reflink(self, reflink):
|
||||
"""
|
||||
If true, use COW lightweight copy
|
||||
"""
|
||||
self._reflink = reflink
|
||||
|
||||
def set_sparse(self, flg):
|
||||
"""
|
||||
If True, attempt sparse allocation during cloning
|
||||
"""
|
||||
self._sparse = flg
|
||||
|
||||
def get_diskinfos(self):
|
||||
"""
|
||||
Return the list of _CloneDiskInfo instances
|
||||
"""
|
||||
return self._diskinfos[:]
|
||||
|
||||
def get_diskinfos_to_clone(self):
|
||||
"""
|
||||
Return a list of _CloneDiskInfo that are tagged for cloning
|
||||
"""
|
||||
return [di for di in self.get_diskinfos() if di.is_clone_requested()]
|
||||
|
||||
def set_nvram_path(self, val):
|
||||
"""
|
||||
If the VM needs to have nvram content cloned, this overrides the
|
||||
destination path
|
||||
"""
|
||||
self._new_nvram_path = val
|
||||
|
||||
def set_overwrite(self, flg):
|
||||
"""
|
||||
If False, no data is copied to the destination disks by default.
|
||||
Storage may be created, but it is empty.
|
||||
"""
|
||||
self._overwrite = flg
|
||||
|
||||
|
||||
######################
|
||||
# Functional methods #
|
||||
######################
|
||||
|
||||
def _prepare_nvram(self):
|
||||
new_nvram_path = self._new_nvram_path
|
||||
if new_nvram_path is None:
|
||||
nvram_dir = os.path.dirname(self._new_guest.os.nvram)
|
||||
new_nvram_path = os.path.join(
|
||||
nvram_dir, "%s_VARS.fd" % self._new_guest.name)
|
||||
|
||||
old_nvram = DeviceDisk(self.conn)
|
||||
old_nvram.path = self._new_guest.os.nvram
|
||||
nvram = DeviceDisk(self.conn)
|
||||
nvram.path = new_nvram_path
|
||||
diskinfo = _CloneDiskInfo(old_nvram)
|
||||
allow_create = self._overwrite
|
||||
|
||||
if (allow_create and
|
||||
nvram.wants_storage_creation() and
|
||||
old_nvram.get_vol_object()):
|
||||
# We only run validation if there's some existing nvram we
|
||||
# can copy. It's valid for nvram to not exist at VM define
|
||||
# time, libvirt will create it for us
|
||||
diskinfo.set_clone_path(new_nvram_path, allow_create, self._sparse)
|
||||
self._nvram_disk = diskinfo.clone_disk
|
||||
self._nvram_disk.get_vol_install().reflink = self._reflink
|
||||
|
||||
self._new_guest.os.nvram = nvram.path
|
||||
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
Validate and set up all parameters needed for the new (clone) VM
|
||||
"""
|
||||
try:
|
||||
Guest.validate_name(self.conn, self._new_guest.name,
|
||||
check_collision=not self._replace,
|
||||
validate=False)
|
||||
except ValueError as e:
|
||||
raise ValueError(_("Invalid name for new guest: %s") % e)
|
||||
|
||||
for diskinfo in self.get_diskinfos_to_clone():
|
||||
orig_disk = diskinfo.disk
|
||||
|
||||
if not diskinfo.clone_disk:
|
||||
# User didn't set a path, generate one
|
||||
newpath = Cloner.generate_clone_disk_path(
|
||||
self.conn, self.src_name,
|
||||
self.new_guest.name,
|
||||
orig_disk.path)
|
||||
diskinfo.set_clone_path(newpath,
|
||||
self._overwrite, self._sparse)
|
||||
|
||||
clone_disk = diskinfo.clone_disk
|
||||
assert clone_disk
|
||||
log.debug("Cloning srcpath=%s dstpath=%s",
|
||||
orig_disk.path, clone_disk.path)
|
||||
|
||||
if self._reflink:
|
||||
vol_install = clone_disk.get_vol_install()
|
||||
vol_install.reflink = self._reflink
|
||||
|
||||
for disk in self._new_guest.devices.disk:
|
||||
if disk.target == orig_disk.target:
|
||||
xmldisk = disk
|
||||
|
||||
# Change the XML
|
||||
xmldisk.path = None
|
||||
xmldisk.type = clone_disk.type
|
||||
xmldisk.driver_name = orig_disk.driver_name
|
||||
xmldisk.driver_type = orig_disk.driver_type
|
||||
xmldisk.path = clone_disk.path
|
||||
|
||||
if self._new_guest.os.nvram:
|
||||
self._prepare_nvram()
|
||||
|
||||
# Save altered clone xml
|
||||
log.debug("Clone guest xml is\n%s", self._new_guest.get_xml())
|
||||
|
||||
def start_duplicate(self, meter=None):
|
||||
"""
|
||||
Actually perform the duplication: cloning disks if needed and defining
|
||||
the new clone xml.
|
||||
"""
|
||||
log.debug("Starting duplicate.")
|
||||
meter = progress.ensure_meter(meter)
|
||||
|
||||
dom = None
|
||||
try:
|
||||
# Replace orig VM if required
|
||||
if self._replace:
|
||||
_replace_vm(self.conn, self._new_guest.name)
|
||||
|
||||
# Define domain early to catch any xml errors before duping storage
|
||||
dom = self.conn.defineXML(self._new_guest.get_xml())
|
||||
|
||||
if self._overwrite:
|
||||
diskinfos = self.get_diskinfos_to_clone()
|
||||
for dst_dev in [d.clone_disk for d in diskinfos]:
|
||||
dst_dev.build_storage(meter)
|
||||
if self._nvram_disk:
|
||||
self._nvram_disk.build_storage(meter)
|
||||
except Exception as e:
|
||||
log.debug("Duplicate failed: %s", str(e))
|
||||
if dom:
|
||||
dom.undefine()
|
||||
raise
|
||||
|
||||
log.debug("Duplicating finished.")
|
||||
|
|
|
@ -517,7 +517,7 @@ class CloneStorageCreator(_StorageCreator):
|
|||
if self.get_dev_type() == "block":
|
||||
avail = _get_size(self._path) # pragma: no cover
|
||||
else:
|
||||
vfs = os.statvfs(os.path.dirname(self._path))
|
||||
vfs = os.statvfs(os.path.dirname(os.path.abspath(self._path)))
|
||||
avail = vfs.f_frsize * vfs.f_bavail
|
||||
need = int(self._size) * 1024 * 1024 * 1024
|
||||
if need > avail: # pragma: no cover
|
||||
|
|
|
@ -14,71 +14,54 @@ from .cloner import Cloner
|
|||
from .logger import log
|
||||
|
||||
|
||||
# General input gathering functions
|
||||
def get_clone_name(new_name, auto_clone, design):
|
||||
if not new_name and auto_clone:
|
||||
# Generate a name to use
|
||||
new_name = design.generate_clone_name()
|
||||
log.debug("Auto-generated clone name '%s'", new_name)
|
||||
|
||||
if not new_name:
|
||||
fail(_("A name is required for the new virtual machine,"
|
||||
" use '--name NEW_VM_NAME' to specify one."))
|
||||
design.clone_name = new_name
|
||||
|
||||
|
||||
def get_original_guest(guest_name, origfile, design):
|
||||
origxml = None
|
||||
if origfile:
|
||||
f = open(origfile, "r")
|
||||
origxml = f.read()
|
||||
f.close()
|
||||
|
||||
try:
|
||||
design.original_xml = origxml
|
||||
return
|
||||
except (ValueError, RuntimeError) as e: # pragma: no cover
|
||||
fail(e)
|
||||
|
||||
if not guest_name:
|
||||
def _process_src(options):
|
||||
src_name = options.src_name
|
||||
src_xml = None
|
||||
if options.original_xml:
|
||||
src_xml = open(options.original_xml).read()
|
||||
elif not src_name:
|
||||
fail(_("An original machine name is required,"
|
||||
" use '--original ORIGINAL_GUEST' and try again."))
|
||||
design.original_guest = guest_name
|
||||
" use '--original src_name' and try again."))
|
||||
return src_name, src_xml
|
||||
|
||||
|
||||
def get_clone_macaddr(new_mac, design):
|
||||
if new_mac is None or new_mac[0] == "RANDOM":
|
||||
def _process_macs(options, cloner):
|
||||
new_macs = options.new_mac
|
||||
if not new_macs or new_macs[0] == "RANDOM":
|
||||
return
|
||||
design.clone_macs = new_mac
|
||||
|
||||
for mac in design.clone_macs:
|
||||
cli.validate_mac(design.conn, mac)
|
||||
for mac in new_macs:
|
||||
cli.validate_mac(cloner.conn, mac)
|
||||
|
||||
for iface in cloner.new_guest.devices.interface[:]:
|
||||
iface.macaddr = new_macs.pop(0)
|
||||
|
||||
|
||||
def get_clone_diskfile(new_diskfiles, design, preserve, auto_clone):
|
||||
if new_diskfiles is None:
|
||||
new_diskfiles = [None]
|
||||
def _process_disks(options, cloner):
|
||||
newpaths = (options.new_diskfile or [])[:]
|
||||
|
||||
newidx = 0
|
||||
clonepaths = []
|
||||
for origpath in [d.path for d in design.original_disks]:
|
||||
if len(new_diskfiles) <= newidx:
|
||||
# Extend the new/passed paths list with None if it's not
|
||||
# long enough
|
||||
new_diskfiles.append(None)
|
||||
newpath = new_diskfiles[newidx]
|
||||
diskinfos = cloner.get_diskinfos_to_clone()
|
||||
for diskinfo in diskinfos:
|
||||
origpath = diskinfo.disk.path
|
||||
newpath = None
|
||||
if newpaths:
|
||||
newpath = newpaths.pop(0)
|
||||
elif options.auto_clone:
|
||||
break
|
||||
|
||||
if origpath is None:
|
||||
newpath = None
|
||||
elif newpath is None and auto_clone:
|
||||
newpath = design.generate_clone_disk_path(origpath)
|
||||
allow_create = options.overwrite
|
||||
diskinfo.set_clone_path(newpath, allow_create, options.sparse)
|
||||
|
||||
clonepaths.append(newpath)
|
||||
newidx += 1
|
||||
design.clone_paths = clonepaths
|
||||
|
||||
for disk in design.clone_disks:
|
||||
cli.validate_disk(disk, warn_overwrite=not preserve)
|
||||
def _validate_disks(options, cloner):
|
||||
# Extra CLI validation for specified disks
|
||||
warn_overwrite = options.overwrite
|
||||
for diskinfo in cloner.get_diskinfos():
|
||||
if not diskinfo.clone_disk:
|
||||
continue
|
||||
cli.validate_disk(diskinfo.clone_disk, warn_overwrite=warn_overwrite)
|
||||
|
||||
|
||||
def parse_args():
|
||||
|
@ -93,7 +76,7 @@ def parse_args():
|
|||
cli.add_connect_option(parser)
|
||||
|
||||
geng = parser.add_argument_group(_("General Options"))
|
||||
geng.add_argument("-o", "--original", dest="original_guest",
|
||||
geng.add_argument("-o", "--original", dest="src_name",
|
||||
help=_("Name of the original guest to clone."))
|
||||
geng.add_argument("--original-xml",
|
||||
help=_("XML file to use as the original guest."))
|
||||
|
@ -121,10 +104,12 @@ def parse_args():
|
|||
default=True,
|
||||
help=_("Do not use a sparse file for the clone's "
|
||||
"disk image"))
|
||||
stog.add_argument("--preserve-data", action="store_false",
|
||||
dest="preserve", default=True,
|
||||
help=_("Do not clone storage, new disk images specified "
|
||||
"via --file are preserved unchanged"))
|
||||
stog.add_argument("--preserve-data", dest="overwrite",
|
||||
action="store_false", default=True,
|
||||
help=_("Do not clone storage contents to specified file paths, "
|
||||
"their contents will be left untouched. "
|
||||
"This requires specifying existing paths for "
|
||||
"every clonable disk image."))
|
||||
stog.add_argument("--nvram", dest="new_nvram",
|
||||
help=_("New file to use as storage for nvram VARS"))
|
||||
|
||||
|
@ -136,9 +121,7 @@ def parse_args():
|
|||
misc = parser.add_argument_group(_("Miscellaneous Options"))
|
||||
|
||||
# Just used for clone tests
|
||||
misc.add_argument("--clone-running", action="store_true",
|
||||
default=False, help=argparse.SUPPRESS)
|
||||
misc.add_argument("--__test-nodry", action="store_true",
|
||||
misc.add_argument("--__test-nodry", action="store_true", dest="test_nodry",
|
||||
default=False, help=argparse.SUPPRESS)
|
||||
|
||||
cli.add_misc_options(misc, prompt=True, replace=True, printxml=True)
|
||||
|
@ -148,7 +131,6 @@ def parse_args():
|
|||
return parser.parse_args()
|
||||
|
||||
|
||||
|
||||
def main(conn=None):
|
||||
cli.earlyLogging()
|
||||
options = parse_args()
|
||||
|
@ -167,47 +149,49 @@ def main(conn=None):
|
|||
fail(_("Either --auto-clone or --file is required,"
|
||||
" use '--auto-clone or --file' and try again."))
|
||||
|
||||
design = Cloner(conn)
|
||||
src_name, src_xml = _process_src(options)
|
||||
cloner = Cloner(conn, src_name, src_xml)
|
||||
|
||||
design.clone_running = options.clone_running
|
||||
design.replace = bool(options.replace)
|
||||
get_original_guest(options.original_guest, options.original_xml,
|
||||
design)
|
||||
get_clone_name(options.new_name, options.auto_clone, design)
|
||||
cloner.set_replace(bool(options.replace))
|
||||
cloner.set_reflink(bool(options.reflink))
|
||||
cloner.set_sparse(bool(options.sparse))
|
||||
cloner.set_overwrite(bool(options.overwrite))
|
||||
|
||||
get_clone_macaddr(options.new_mac, design)
|
||||
if options.new_uuid is not None:
|
||||
design.clone_uuid = options.new_uuid
|
||||
if options.reflink is True:
|
||||
design.reflink = True
|
||||
for i in options.target or []:
|
||||
design.force_target = i
|
||||
for i in options.skip_copy or []:
|
||||
design.skip_target = i
|
||||
design.clone_sparse = options.sparse
|
||||
design.preserve = options.preserve
|
||||
cloner.set_clone_uuid(options.new_uuid)
|
||||
if options.new_nvram:
|
||||
cloner.set_nvram_path(options.new_nvram)
|
||||
|
||||
design.clone_nvram = options.new_nvram
|
||||
force_targets = options.target or []
|
||||
skip_targets = options.skip_copy or []
|
||||
for diskinfo in cloner.get_diskinfos():
|
||||
if diskinfo.disk.target in force_targets:
|
||||
diskinfo.set_clone_requested(True)
|
||||
if diskinfo.disk.target in skip_targets:
|
||||
diskinfo.set_clone_requested(False)
|
||||
|
||||
# This determines the devices that need to be cloned, so that
|
||||
# get_clone_diskfile knows how many new disk paths it needs
|
||||
design.setup_original()
|
||||
if options.new_name:
|
||||
cloner.set_clone_name(options.new_name)
|
||||
elif not options.auto_clone:
|
||||
fail(_("A name is required for the new virtual machine,"
|
||||
" use '--name NEW_VM_NAME' to specify one."))
|
||||
|
||||
get_clone_diskfile(options.new_diskfile, design,
|
||||
not options.preserve, options.auto_clone)
|
||||
_process_macs(options, cloner)
|
||||
_process_disks(options, cloner)
|
||||
|
||||
# setup design object
|
||||
design.setup_clone()
|
||||
cloner.prepare()
|
||||
|
||||
_validate_disks(options, cloner)
|
||||
|
||||
run = True
|
||||
if options.xmlonly:
|
||||
run = options.__test_nodry
|
||||
print_stdout(design.clone_xml, do_force=True)
|
||||
run = options.test_nodry
|
||||
print_stdout(cloner.new_guest.get_xml(), do_force=True)
|
||||
if run:
|
||||
design.start_duplicate(cli.get_meter())
|
||||
cloner.start_duplicate(cli.get_meter())
|
||||
|
||||
print_stdout("")
|
||||
print_stdout(_("Clone '%s' created successfully.") % design.clone_name)
|
||||
print_stdout(_("Clone '%s' created successfully.") % cloner.new_guest.name)
|
||||
log.debug("end clone")
|
||||
return 0
|
||||
|
||||
|
|
Loading…
Reference in New Issue