create: Make sure we restart new VMs the correct number of times
This means booting live cd's after initial creation, doing a post install boot for most other installs, and booting windows twice for the 2 stage install.
This commit is contained in:
parent
4db7c4e48d
commit
c6e9c4befe
|
@ -1531,13 +1531,10 @@ class vmmCreate(gobject.GObject):
|
|||
self.failed_guest = self.guest
|
||||
return
|
||||
|
||||
# Ensure new VM is loaded
|
||||
# FIXME: Hmm, shouldn't we emit a signal here rather than do this?
|
||||
self.conn.tick(noStatsUpdate=True)
|
||||
vm = self.conn.get_vm(guest.uuid)
|
||||
|
||||
if self.config.get_console_popup() == 1:
|
||||
# user has requested console on new created vms only
|
||||
vm = self.conn.get_vm(guest.uuid)
|
||||
gtype = vm.get_graphics_console()[0]
|
||||
if gtype == "vnc":
|
||||
self.emit("action-show-console", self.conn.get_uri(),
|
||||
|
@ -1566,6 +1563,22 @@ class vmmCreate(gobject.GObject):
|
|||
logging.error("Guest install did not return a domain")
|
||||
else:
|
||||
logging.debug("Install completed")
|
||||
|
||||
# Make sure we pick up the domain object
|
||||
self.conn.tick(noStatsUpdate=True)
|
||||
vm = self.conn.get_vm(guest.uuid)
|
||||
|
||||
if vm.is_shutoff():
|
||||
# Domain is already shutdown, but no error was raised.
|
||||
# Probably means guest had no 'install' phase, as in
|
||||
# for live cds. Try to restart the domain.
|
||||
vm.startup()
|
||||
else:
|
||||
# Register a status listener, which will restart the
|
||||
# guest after the install has finished
|
||||
util.connect_opt_out(vm, "status-changed",
|
||||
self.check_install_status, guest)
|
||||
|
||||
except:
|
||||
(_type, value, stacktrace) = sys.exc_info ()
|
||||
|
||||
|
@ -1578,6 +1591,36 @@ class vmmCreate(gobject.GObject):
|
|||
if error:
|
||||
asyncjob.set_error(error, details)
|
||||
|
||||
def check_install_status(self, vm, ignore1, ignore2, virtinst_guest=None):
|
||||
if vm.is_crashed():
|
||||
logging.debug("VM crashed, cancelling install plans.")
|
||||
return True
|
||||
|
||||
if not vm.is_shutoff():
|
||||
return
|
||||
|
||||
try:
|
||||
if virtinst_guest:
|
||||
continue_inst = virtinst_guest.get_continue_inst()
|
||||
|
||||
if continue_inst:
|
||||
logging.debug("VM needs a 2 stage install, continuing.")
|
||||
# Continue the install, then reconnect this opt
|
||||
# out handler, removing the virtinst_guest which
|
||||
# will force one final restart.
|
||||
virtinst_guest.continue_install()
|
||||
util.connect_opt_out(vm, "status-changed",
|
||||
self.check_install_status, None)
|
||||
return True
|
||||
|
||||
logging.debug("Install should be completed, starting VM.")
|
||||
vm.startup()
|
||||
except Exception, e:
|
||||
self.err.show_err(_("Error continue install: %s") % str(e),
|
||||
"".join(traceback.format_exc()))
|
||||
|
||||
return True
|
||||
|
||||
def pretty_storage(self, size):
|
||||
return "%.1f Gb" % float(size)
|
||||
|
||||
|
|
|
@ -1051,6 +1051,12 @@ class vmmDomainBase(vmmLibvirtObject):
|
|||
def disk_io_vector_limit(self, limit):
|
||||
return self.in_out_vector_limit(self.disk_io_vector(), limit)
|
||||
|
||||
def is_shutoff(self):
|
||||
return self.status() == libvirt.VIR_DOMAIN_SHUTOFF
|
||||
|
||||
def is_crashed(self):
|
||||
return self.status() == libvirt.VIR_DOMAIN_CRASHED
|
||||
|
||||
def is_stoppable(self):
|
||||
return self.status() in [libvirt.VIR_DOMAIN_RUNNING,
|
||||
libvirt.VIR_DOMAIN_PAUSED]
|
||||
|
@ -1159,8 +1165,10 @@ class vmmDomain(vmmDomainBase):
|
|||
|
||||
self._update_status()
|
||||
|
||||
self.config.on_stats_enable_net_poll_changed(self.toggle_sample_network_traffic)
|
||||
self.config.on_stats_enable_disk_poll_changed(self.toggle_sample_disk_io)
|
||||
self.config.on_stats_enable_net_poll_changed(
|
||||
self.toggle_sample_network_traffic)
|
||||
self.config.on_stats_enable_disk_poll_changed(
|
||||
self.toggle_sample_disk_io)
|
||||
|
||||
self.getvcpus_supported = support.check_domain_support(self._backend,
|
||||
support.SUPPORT_DOMAIN_GETVCPUS)
|
||||
|
@ -1176,7 +1184,6 @@ class vmmDomain(vmmDomainBase):
|
|||
|
||||
# Hook up our own status listeners
|
||||
self.connect("status-changed", self._update_start_vcpus)
|
||||
self.connect("status-changed", self._check_install_status)
|
||||
|
||||
##########################
|
||||
# Internal virDomain API #
|
||||
|
@ -1337,21 +1344,10 @@ class vmmDomain(vmmDomainBase):
|
|||
if maxmem != self.maximum_memory():
|
||||
self._backend.setMaxMemory(maxmem)
|
||||
|
||||
|
||||
####################
|
||||
# End internal API #
|
||||
####################
|
||||
|
||||
#########################
|
||||
# XML fetching routines #
|
||||
#########################
|
||||
|
||||
|
||||
|
||||
#############################
|
||||
# End XML fetching routines #
|
||||
#############################
|
||||
|
||||
###########################
|
||||
# XML/Config Altering API #
|
||||
###########################
|
||||
|
@ -1774,9 +1770,6 @@ class vmmDomain(vmmDomainBase):
|
|||
self._startup_vcpus = None
|
||||
self.vcpu_max_count()
|
||||
|
||||
def _check_install_status(self, ignore, status, oldstatus):
|
||||
pass
|
||||
|
||||
def _update_status(self, status=None):
|
||||
if status == None:
|
||||
info = self.get_info()
|
||||
|
|
|
@ -240,6 +240,33 @@ def pretty_hv(gtype, domtype):
|
|||
|
||||
return label
|
||||
|
||||
def connect_once(obj, signal, func, *args):
|
||||
id_list = []
|
||||
|
||||
def wrap_func(*wrapargs):
|
||||
if id_list:
|
||||
obj.disconnect(id_list[0])
|
||||
|
||||
return func(*wrapargs)
|
||||
|
||||
conn_id = obj.connect(signal, wrap_func, *args)
|
||||
id_list.append(conn_id)
|
||||
|
||||
return conn_id
|
||||
|
||||
def connect_opt_out(obj, signal, func, *args):
|
||||
id_list = []
|
||||
|
||||
def wrap_func(*wrapargs):
|
||||
ret = func(*wrapargs)
|
||||
if ret and id_list:
|
||||
obj.disconnect(id_list[0])
|
||||
|
||||
conn_id = obj.connect(signal, wrap_func, *args)
|
||||
id_list.append(conn_id)
|
||||
|
||||
return conn_id
|
||||
|
||||
def idle_emit(self, signal, *args):
|
||||
"""
|
||||
Safe wrapper for using 'self.emit' with gobject.idle_add
|
||||
|
|
Loading…
Reference in New Issue