Moved vmStats() object code into vmmConection() and a new vmmDomain() class, providing an stateful model around libvirt objects
This commit is contained in:
parent
34e653b7b7
commit
aaad065c35
|
@ -1,17 +1,18 @@
|
|||
|
||||
import gobject
|
||||
import libvirt
|
||||
from time import time
|
||||
|
||||
from virtManager.stats import vmmStats
|
||||
from virtManager.domain import vmmDomain
|
||||
|
||||
class vmmConnection(gobject.GObject):
|
||||
__gsignals__ = {
|
||||
"vm-added": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
|
||||
(str, str, str,)),
|
||||
[str, str]),
|
||||
"vm-removed": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
|
||||
(str, str)),
|
||||
[str, str]),
|
||||
"vm-updated": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
|
||||
(str, str)),
|
||||
[str, str]),
|
||||
"disconnected": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, [str])
|
||||
}
|
||||
|
||||
|
@ -29,15 +30,11 @@ class vmmConnection(gobject.GObject):
|
|||
self.windowDetails = {}
|
||||
self.windowConsole = {}
|
||||
self.vms = {}
|
||||
|
||||
self.stats = vmmStats(config, self)
|
||||
self.tick()
|
||||
|
||||
def get_uri(self):
|
||||
return self.uri
|
||||
|
||||
def get_stats(self):
|
||||
return self.stats
|
||||
|
||||
def get_vm(self, uuid):
|
||||
return self.vms[uuid]
|
||||
|
||||
|
@ -60,14 +57,24 @@ class vmmConnection(gobject.GObject):
|
|||
self.emit("disconnected", self.uri)
|
||||
|
||||
def get_host_info(self):
|
||||
return self.vmm.getInfo()
|
||||
return self.hostinfo
|
||||
|
||||
def connect(self, name, callback):
|
||||
gobject.GObject.connect(self, name, callback)
|
||||
print "Cnnect " + name + " to " + str(callback)
|
||||
if name == "vm-added":
|
||||
for uuid in self.vms.keys():
|
||||
self.emit("vm-added", self.uri, uuid, self.vms[uuid].name())
|
||||
self.emit("vm-added", self.uri, uuid)
|
||||
|
||||
def host_memory_size(self):
|
||||
return self.hostinfo[1]*1024
|
||||
|
||||
def host_active_processor_count(self):
|
||||
return self.hostinfo[2]
|
||||
|
||||
def host_maximum_processor_count(self):
|
||||
return self.hostinfo[4] * self.hostinfo[5] * self.hostinfo[6] * self.hostinfo[7]
|
||||
|
||||
|
||||
def tick(self):
|
||||
if self.vmm == None:
|
||||
|
@ -78,7 +85,8 @@ class vmmConnection(gobject.GObject):
|
|||
if doms != None:
|
||||
for id in doms:
|
||||
vm = self.vmm.lookupByID(id)
|
||||
newVms[self.uuidstr(vm.UUID())] = vm
|
||||
uuid = self.uuidstr(vm.UUID())
|
||||
newVms[uuid] = vmmDomain(self.config, self, vm, uuid)
|
||||
|
||||
for uuid in self.vms.keys():
|
||||
if not(newVms.has_key(uuid)):
|
||||
|
@ -88,11 +96,12 @@ class vmmConnection(gobject.GObject):
|
|||
for uuid in newVms.keys():
|
||||
if not(self.vms.has_key(uuid)):
|
||||
self.vms[uuid] = newVms[uuid]
|
||||
print "Trying to emit"
|
||||
self.emit("vm-added", self.uri, uuid, newVms[uuid].name())
|
||||
self.emit("vm-added", self.uri, uuid)
|
||||
|
||||
now = time()
|
||||
self.hostinfo = self.vmm.getInfo()
|
||||
for uuid in self.vms.keys():
|
||||
self.stats.update(uuid, self.vms[uuid])
|
||||
self.vms[uuid].tick(now)
|
||||
self.emit("vm-updated", self.uri, uuid)
|
||||
|
||||
return 1
|
||||
|
|
|
@ -12,19 +12,16 @@ class vmmConsole(gobject.GObject):
|
|||
"action-take-snapshot": (gobject.SIGNAL_RUN_FIRST,
|
||||
gobject.TYPE_NONE, (str,str))
|
||||
}
|
||||
def __init__(self, config, hvuri, stats, vm, vmuuid):
|
||||
def __init__(self, config, vm):
|
||||
self.__gobject_init__()
|
||||
self.window = gtk.glade.XML(config.get_glade_file(), "vmm-console")
|
||||
self.config = config
|
||||
self.hvuri = hvuri
|
||||
self.stats = stats
|
||||
self.vm = vm
|
||||
self.vmuuid = vmuuid
|
||||
self.lastStatus = None
|
||||
|
||||
topwin = self.window.get_widget("vmm-console")
|
||||
topwin.hide()
|
||||
topwin.set_title(vm.name() + " " + topwin.get_title())
|
||||
topwin.set_title(vm.get_name() + " " + topwin.get_title())
|
||||
|
||||
self.window.get_widget("control-run").set_icon_widget(gtk.Image())
|
||||
self.window.get_widget("control-run").get_icon_widget().set_from_file(config.get_icon_dir() + "/icon_run.png")
|
||||
|
@ -76,11 +73,11 @@ class vmmConsole(gobject.GObject):
|
|||
print "Shutdown requested, but machine is already shutting down / shutoff"
|
||||
|
||||
def control_vm_pause(self, src):
|
||||
info = self.vm.info()
|
||||
if info[0] in [ libvirt.VIR_DOMAIN_SHUTDOWN, libvirt.VIR_DOMAIN_SHUTOFF, libvirt.VIR_DOMAIN_CRASHED ]:
|
||||
status = self.vm.status()
|
||||
if status in [ libvirt.VIR_DOMAIN_SHUTDOWN, libvirt.VIR_DOMAIN_SHUTOFF, libvirt.VIR_DOMAIN_CRASHED ]:
|
||||
print "Pause/resume requested, but machine is shutdown / shutoff"
|
||||
else:
|
||||
if info[0] in [ libvirt.VIR_DOMAIN_PAUSED ]:
|
||||
if status in [ libvirt.VIR_DOMAIN_PAUSED ]:
|
||||
if not src.get_active():
|
||||
self.vm.resume()
|
||||
else:
|
||||
|
@ -93,19 +90,17 @@ class vmmConsole(gobject.GObject):
|
|||
|
||||
|
||||
def control_vm_terminal(self, src):
|
||||
self.emit("action-launch-terminal", self.hvuri, self.vmuuid)
|
||||
self.emit("action-launch-terminal", self.vm.get_connection().get_uri(), self.vm.get_uuid())
|
||||
|
||||
def control_vm_snapshot(self, src):
|
||||
self.emit("action-take-snapshot", self.hvuri, self.vmuuid)
|
||||
self.emit("action-take-snapshot", self.vm.get_connection().get_uri(), self.vm.get_uuid())
|
||||
|
||||
def control_vm_details(self, src):
|
||||
self.emit("action-show-details", self.hvuri, self.vmuuid)
|
||||
self.emit("action-show-details", self.vm.get_connection().get_uri(), self.vm.get_uuid())
|
||||
|
||||
def refresh(self):
|
||||
print "In console refresh"
|
||||
info = self.vm.info()
|
||||
status = info[0]
|
||||
|
||||
print "Hell " + str(self) + " " + str(self.vm)
|
||||
status = self.vm.status()
|
||||
if self.lastStatus == status:
|
||||
return
|
||||
|
||||
|
|
|
@ -21,22 +21,19 @@ class vmmDetails(gobject.GObject):
|
|||
"action-take-snapshot": (gobject.SIGNAL_RUN_FIRST,
|
||||
gobject.TYPE_NONE, (str,str))
|
||||
}
|
||||
def __init__(self, config, hvuri, stats, vm, vmuuid):
|
||||
def __init__(self, config, vm):
|
||||
self.__gobject_init__()
|
||||
self.window = gtk.glade.XML(config.get_glade_file(), "vmm-details")
|
||||
self.config = config
|
||||
self.hvuri = hvuri
|
||||
self.stats = stats
|
||||
self.vm = vm
|
||||
self.vmuuid = vmuuid
|
||||
self.lastStatus = None
|
||||
|
||||
topwin = self.window.get_widget("vmm-details")
|
||||
topwin.hide()
|
||||
topwin.set_title(vm.name() + " " + topwin.get_title())
|
||||
topwin.set_title(self.vm.get_name() + " " + topwin.get_title())
|
||||
|
||||
self.window.get_widget("overview-name").set_text(vm.name())
|
||||
self.window.get_widget("overview-uuid").set_text(vmuuid)
|
||||
self.window.get_widget("overview-name").set_text(self.vm.get_name())
|
||||
self.window.get_widget("overview-uuid").set_text(self.vm.get_uuid())
|
||||
|
||||
self.window.get_widget("control-run").set_icon_widget(gtk.Image())
|
||||
self.window.get_widget("control-run").get_icon_widget().set_from_file(config.get_icon_dir() + "/icon_run.png")
|
||||
|
@ -145,16 +142,16 @@ class vmmDetails(gobject.GObject):
|
|||
return 0
|
||||
|
||||
def control_vm_shutdown(self, src):
|
||||
if not(self.stats.run_status(self.vmuuid) in [ "shutdown", "shutoff" ]):
|
||||
if not(self.vm.run_status() in [ "shutdown", "shutoff" ]):
|
||||
self.vm.shutdown()
|
||||
else:
|
||||
print "Shutdown requested, but machine is already shutting down / shutoff"
|
||||
|
||||
def control_vm_pause(self, src):
|
||||
if self.stats.run_status(self.vmuuid) in [ "shutdown", "shutoff" ]:
|
||||
if self.vm.run_status() in [ "shutdown", "shutoff" ]:
|
||||
print "Pause/resume requested, but machine is shutdown / shutoff"
|
||||
else:
|
||||
if self.stats.run_status(self.vmuuid) in [ "paused" ]:
|
||||
if self.vm.run_status() in [ "paused" ]:
|
||||
if not src.get_active():
|
||||
self.vm.resume()
|
||||
else:
|
||||
|
@ -167,10 +164,10 @@ class vmmDetails(gobject.GObject):
|
|||
|
||||
|
||||
def control_vm_terminal(self, src):
|
||||
self.emit("action-launch-terminal", self.hvuri, self.vmuuid)
|
||||
self.emit("action-launch-terminal", self.vm.get_connection().get_uri(), self.vm.get_uuid())
|
||||
|
||||
def control_vm_snapshot(self, src):
|
||||
self.emit("action-take-snapshot", self.hvuri, self.vmuuid)
|
||||
self.emit("action-take-snapshot", self.vm.get_connection().get_uri(), self.vm.get_uuid())
|
||||
|
||||
def change_graph_ranges(self, ignore1=None,ignore2=None,ignore3=None,ignore4=None):
|
||||
self.cpu_usage_graph.clear()
|
||||
|
@ -219,18 +216,18 @@ class vmmDetails(gobject.GObject):
|
|||
|
||||
def refresh(self):
|
||||
print "In details refresh"
|
||||
status = self.stats.run_status(self.vmuuid)
|
||||
status = self.vm.run_status()
|
||||
self.update_widget_states(status)
|
||||
|
||||
self.window.get_widget("overview-status-text").set_text(status)
|
||||
self.window.get_widget("overview-status-icon").set_from_pixbuf(self.stats.run_status_icon(self.vmuuid))
|
||||
self.window.get_widget("overview-cpu-usage-text").set_text("%d %%" % self.stats.cpu_time_percentage(self.vmuuid))
|
||||
self.window.get_widget("overview-memory-usage-text").set_text("%d MB of %d MB" % (self.stats.current_memory(self.vmuuid)/1024, self.stats.host_memory_size()/1024))
|
||||
self.window.get_widget("overview-status-icon").set_from_pixbuf(self.vm.run_status_icon())
|
||||
self.window.get_widget("overview-cpu-usage-text").set_text("%d %%" % self.vm.cpu_time_percentage())
|
||||
self.window.get_widget("overview-memory-usage-text").set_text("%d MB of %d MB" % (self.vm.current_memory()/1024, self.vm.get_connection().host_memory_size()/1024))
|
||||
|
||||
history_len = self.config.get_stats_history_length()
|
||||
cpu_vector = self.stats.cpu_time_vector(self.vmuuid)
|
||||
cpu_vector = self.vm.cpu_time_vector()
|
||||
cpu_vector.reverse()
|
||||
cpu_vector_avg = self.stats.cpu_time_moving_avg_vector(self.vmuuid)
|
||||
cpu_vector_avg = self.vm.cpu_time_moving_avg_vector()
|
||||
cpu_vector_avg.reverse()
|
||||
if self.cpu_usage_line == None:
|
||||
self.cpu_usage_line = self.cpu_usage_graph.plot(cpu_vector)
|
||||
|
@ -247,7 +244,7 @@ class vmmDetails(gobject.GObject):
|
|||
self.cpu_usage_canvas.draw()
|
||||
|
||||
history_len = self.config.get_stats_history_length()
|
||||
memory_vector = self.stats.current_memory_vector(self.vmuuid)
|
||||
memory_vector = self.vm.current_memory_vector()
|
||||
memory_vector.reverse()
|
||||
if self.memory_usage_line == None:
|
||||
self.memory_usage_line = self.memory_usage_graph.plot(memory_vector)
|
||||
|
@ -263,9 +260,9 @@ class vmmDetails(gobject.GObject):
|
|||
|
||||
history_len = self.config.get_stats_history_length()
|
||||
#if self.network_traffic_line == None:
|
||||
#self.network_traffic_line = self.network_traffic_graph.plot(self.stats.network_traffic_vector(self.vmuuid))
|
||||
#self.network_traffic_line = self.network_traffic_graph.plot(self.vm.network_traffic_vector())
|
||||
#else:
|
||||
#self.network_traffic_line[0].set_ydata(self.stats.network_traffic_vector(self.vmuuid))
|
||||
#self.network_traffic_line[0].set_ydata(self.vm.network_traffic_vector())
|
||||
self.network_traffic_graph.set_xlim(0, history_len)
|
||||
self.network_traffic_graph.set_ylim(0, 100)
|
||||
self.network_traffic_graph.set_yticklabels(["0","","","","","100"])
|
||||
|
|
|
@ -0,0 +1,215 @@
|
|||
|
||||
import gobject
|
||||
import libvirt
|
||||
|
||||
class vmmDomain(gobject.GObject):
|
||||
__gsignals__ = {
|
||||
"status-changed": (gobject.SIGNAL_RUN_FIRST,
|
||||
gobject.TYPE_NONE,
|
||||
[str]),
|
||||
}
|
||||
|
||||
def __init__(self, config, connection, vm, uuid):
|
||||
self.__gobject_init__()
|
||||
self.config = config
|
||||
self.connection = connection
|
||||
self.vm = vm
|
||||
self.uuid = uuid
|
||||
self.lastStatus = None
|
||||
self.record = []
|
||||
|
||||
def get_connection(self):
|
||||
return self.connection
|
||||
|
||||
def get_name(self):
|
||||
return self.vm.name()
|
||||
|
||||
def get_uuid(self):
|
||||
return self.uuid
|
||||
|
||||
def _normalize_status(self, status):
|
||||
if self.lastStatus == libvirt.VIR_DOMAIN_NOSTATE:
|
||||
return libvirt.VIR_DOMAIN_RUNNING
|
||||
elif self.lastStatus == libvirt.VIR_DOMAIN_BLOCKED:
|
||||
return libvirt.VIR_DOMAIN_RUNNING
|
||||
return status
|
||||
|
||||
def _update_status(self, status=None):
|
||||
if status == None:
|
||||
info = self.vm.info()
|
||||
status = info[0]
|
||||
status = self._normalize_status(status)
|
||||
if status != self.lastStatus:
|
||||
self.lastStatus = status
|
||||
self.emit("status-changed", status)
|
||||
|
||||
def tick(self, now):
|
||||
hostInfo = self.connection.get_host_info()
|
||||
info = self.vm.info()
|
||||
expected = self.config.get_stats_history_length()
|
||||
current = len(self.record)
|
||||
if current > expected:
|
||||
del self.record[expected:current]
|
||||
|
||||
prevCpuTime = 0
|
||||
prevTimestamp = 0
|
||||
if len(self.record) > 0:
|
||||
prevTimestamp = self.record[0]["timestamp"]
|
||||
prevCpuTime = self.record[0]["cpuTimeAbs"]
|
||||
|
||||
pcentCpuTime = (info[4]-prevCpuTime) * 100 / ((now - prevTimestamp)*1000*1000*1000*self.connection.host_active_processor_count())
|
||||
|
||||
pcentCurrMem = info[2] * 100 / self.connection.host_memory_size()
|
||||
pcentMaxMem = info[1] * 100 / self.connection.host_memory_size()
|
||||
|
||||
newStats = { "timestamp": now,
|
||||
"cpuTime": (info[4]-prevCpuTime),
|
||||
"cpuTimeAbs": info[4],
|
||||
"cpuTimePercent": pcentCpuTime,
|
||||
"currMem": info[2],
|
||||
"currMemPercent": pcentCurrMem,
|
||||
"maxMem": info[1],
|
||||
"maxMemPercent": pcentMaxMem,
|
||||
}
|
||||
|
||||
self.record.insert(0, newStats)
|
||||
|
||||
nSamples = 5
|
||||
#nSamples = len(self.record)
|
||||
if nSamples > len(self.record):
|
||||
nSamples = len(self.record)
|
||||
|
||||
startCpuTime = self.record[nSamples-1]["cpuTimeAbs"]
|
||||
startTimestamp = self.record[nSamples-1]["timestamp"]
|
||||
|
||||
if startTimestamp == now:
|
||||
self.record[0]["cpuTimeMovingAvg"] = self.record[0]["cpuTimeAbs"]
|
||||
self.record[0]["cpuTimeMovingAvgPercent"] = 0
|
||||
else:
|
||||
self.record[0]["cpuTimeMovingAvg"] = (self.record[0]["cpuTimeAbs"]-startCpuTime) / nSamples
|
||||
self.record[0]["cpuTimeMovingAvgPercent"] = (self.record[0]["cpuTimeAbs"]-startCpuTime) * 100 / ((now-startTimestamp)*1000*1000*1000 * self.connection.host_active_processor_count())
|
||||
|
||||
self._update_status(info[0])
|
||||
|
||||
|
||||
def current_memory(self):
|
||||
if len(self.record) == 0:
|
||||
return 0
|
||||
return self.record[0]["currMem"]
|
||||
|
||||
def current_memory_percentage(self):
|
||||
if len(self.record) == 0:
|
||||
return 0
|
||||
return self.record[0]["currMemPercent"]
|
||||
|
||||
def maximum_memory(self):
|
||||
if len(self.record) == 0:
|
||||
return 0
|
||||
return self.record[0]["maxMem"]
|
||||
|
||||
def maximum_memory_percentage(self):
|
||||
if len(self.record) == 0:
|
||||
return 0
|
||||
return self.record[0]["maxMemPercent"]
|
||||
|
||||
def cpu_time(self):
|
||||
if len(self.record) == 0:
|
||||
return 0
|
||||
return self.record[0]["cpuTime"]
|
||||
|
||||
def cpu_time_percentage(self):
|
||||
if len(self.record) == 0:
|
||||
return 0
|
||||
return self.record[0]["cpuTimePercent"]
|
||||
|
||||
def network_traffic(self):
|
||||
return 1
|
||||
|
||||
def network_traffic_percentage(self):
|
||||
return 1
|
||||
|
||||
def disk_usage(self):
|
||||
return 1
|
||||
|
||||
def disk_usage_percentage(self):
|
||||
return 1
|
||||
|
||||
def cpu_time_vector(self):
|
||||
vector = []
|
||||
stats = self.record
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
if i < len(stats):
|
||||
vector.append(stats[i]["cpuTimePercent"])
|
||||
else:
|
||||
vector.append(0)
|
||||
return vector
|
||||
|
||||
def cpu_time_moving_avg_vector(self):
|
||||
vector = []
|
||||
stats = self.record
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
if i < len(stats):
|
||||
vector.append(stats[i]["cpuTimeMovingAvgPercent"])
|
||||
else:
|
||||
vector.append(0)
|
||||
return vector
|
||||
|
||||
def current_memory_vector(self):
|
||||
vector = []
|
||||
stats = self.record
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
if i < len(stats):
|
||||
vector.append(stats[i]["currMemPercent"])
|
||||
else:
|
||||
vector.append(0)
|
||||
return vector
|
||||
|
||||
def network_traffic_vector(self):
|
||||
vector = []
|
||||
stats = self.record
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
vector.append(1)
|
||||
return vector
|
||||
|
||||
def disk_usage_vector(self):
|
||||
vector = []
|
||||
stats = self.record
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
vector.append(1)
|
||||
return vector
|
||||
|
||||
def shutdown(self):
|
||||
self.vm.shutdown()
|
||||
self._update_status()
|
||||
|
||||
def suspend(self):
|
||||
self.vm.suspend()
|
||||
self._update_status()
|
||||
|
||||
def resume(self):
|
||||
self.vm.resume()
|
||||
self._update_status()
|
||||
|
||||
def status(self):
|
||||
return self.lastStatus
|
||||
|
||||
def run_status(self):
|
||||
if self.lastStatus == libvirt.VIR_DOMAIN_RUNNING:
|
||||
return "Running"
|
||||
elif self.lastStatus == libvirt.VIR_DOMAIN_PAUSED:
|
||||
return "Paused"
|
||||
elif self.lastStatus == libvirt.VIR_DOMAIN_SHUTDOWN:
|
||||
return "Shutdown"
|
||||
elif self.lastStatus == libvirt.VIR_DOMAIN_SHUTOFF:
|
||||
return "Shutoff"
|
||||
elif self.lastStatus == libvirt.VIR_DOMAIN_CRASHED:
|
||||
return "Crashed"
|
||||
else:
|
||||
raise "Unknown status code"
|
||||
|
||||
def run_status_icon(self):
|
||||
status = self.run_status()
|
||||
return self.config.get_vm_status_icon(status.lower())
|
||||
|
||||
|
||||
gobject.type_register(vmmDomain)
|
|
@ -127,10 +127,7 @@ class vmmEngine:
|
|||
|
||||
if not(self.connections[uri]["windowConsole"].has_key(uuid)):
|
||||
console = vmmConsole(self.get_config(),
|
||||
uri,
|
||||
con.get_stats(),
|
||||
con.get_vm(uuid),
|
||||
uuid)
|
||||
con.get_vm(uuid))
|
||||
console.connect("action-show-details", self._do_show_details)
|
||||
self.connections[uri]["windowConsole"][uuid] = console
|
||||
self.connections[uri]["windowConsole"][uuid].show()
|
||||
|
@ -140,10 +137,7 @@ class vmmEngine:
|
|||
|
||||
if not(self.connections[uri]["windowDetails"].has_key(uuid)):
|
||||
details = vmmDetails(self.get_config(),
|
||||
uri,
|
||||
con.get_stats(),
|
||||
con.get_vm(uuid),
|
||||
uuid)
|
||||
con.get_vm(uuid))
|
||||
details.connect("action-show-console", self._do_show_console)
|
||||
self.connections[uri]["windowDetails"][uuid] = details
|
||||
self.connections[uri]["windowDetails"][uuid].show()
|
||||
|
@ -153,8 +147,7 @@ class vmmEngine:
|
|||
|
||||
if self.connections[uri]["windowManager"] == None:
|
||||
manager = vmmManager(self.get_config(),
|
||||
con,
|
||||
uri)
|
||||
con)
|
||||
manager.connect("action-show-console", self._do_show_console)
|
||||
manager.connect("action-show-details", self._do_show_details)
|
||||
manager.connect("action-show-preferences", self._do_show_preferences)
|
||||
|
|
|
@ -22,12 +22,11 @@ class vmmManager(gobject.GObject):
|
|||
"action-show-preferences": (gobject.SIGNAL_RUN_FIRST,
|
||||
gobject.TYPE_NONE, []),
|
||||
}
|
||||
def __init__(self, config, connection, hvuri):
|
||||
def __init__(self, config, connection):
|
||||
self.__gobject_init__()
|
||||
self.window = gtk.glade.XML(config.get_glade_file(), "vmm-manager")
|
||||
self.config = config
|
||||
self.connection = connection
|
||||
self.hvuri = hvuri
|
||||
self.prepare_vmlist()
|
||||
|
||||
self.connection.connect("vm-added", self.vm_added)
|
||||
|
@ -111,7 +110,7 @@ class vmmManager(gobject.GObject):
|
|||
def open_connection(self, src=None):
|
||||
self.emit("action-show-connect");
|
||||
|
||||
def vm_added(self, connection, uri, vmuuid, name):
|
||||
def vm_added(self, connection, uri, vmuuid):
|
||||
vmlist = self.window.get_widget("vm-list")
|
||||
model = vmlist.get_model()
|
||||
print "Added\n"
|
||||
|
@ -121,6 +120,8 @@ class vmmManager(gobject.GObject):
|
|||
if vm == vmuuid:
|
||||
dup = 1
|
||||
|
||||
name = self.connection.get_vm(vmuuid).get_name()
|
||||
|
||||
if dup != 1:
|
||||
model.append([vmuuid, name])
|
||||
|
||||
|
@ -154,12 +155,10 @@ class vmmManager(gobject.GObject):
|
|||
return None
|
||||
|
||||
def show_vm_details(self,ignore):
|
||||
print "Show detail"
|
||||
self.emit("action-show-details", self.hvuri, self.current_vm())
|
||||
self.emit("action-show-details", self.connection.get_uri(), self.current_vm())
|
||||
|
||||
def open_vm_console(self,ignore,ignore2=None,ignore3=None):
|
||||
print "Show console"
|
||||
self.emit("action-show-console", self.hvuri, self.current_vm())
|
||||
self.emit("action-show-console", self.connection.get_uri(), self.current_vm())
|
||||
|
||||
|
||||
def vm_selected(self, selection):
|
||||
|
@ -283,16 +282,16 @@ class vmmManager(gobject.GObject):
|
|||
return self.sort_op(model.get_value(iter1, 0), model.get_value(iter2, 0))
|
||||
|
||||
def vmlist_cpu_usage_sorter(self, model, iter1, iter2):
|
||||
return self.sort_op(self.connection.get_stats().cpu_time(model.get_value(iter1, 0)), self.connection.get_stats().cpu_time(model.get_value(iter2, 0)))
|
||||
return self.sort_op(self.connection.get_vm(model.get_value(iter1, 0)).cpu_time(), self.connection.get_vm(model.get_value(iter2, 0)).cpu_time())
|
||||
|
||||
def vmlist_memory_usage_sorter(self, model, iter1, iter2):
|
||||
return self.sort_op(self.connection.get_stats().current_memory(model.get_value(iter1, 0)), self.connection.get_stats().current_memory(model.get_value(iter2, 0)))
|
||||
return self.sort_op(self.connection.get_vm(model.get_value(iter1, 0)).current_memory(), self.connection.get_vm(model.get_value(iter2, 0)).current_memory())
|
||||
|
||||
def vmlist_disk_usage_sorter(self, model, iter1, iter2):
|
||||
return self.sort_op(self.connection.get_stats().disk_usage(model.get_value(iter1, 0)), self.connection.get_stats().disk_usage(model.get_value(iter2, 0)))
|
||||
return self.sort_op(self.connection.get_vm(model.get_value(iter1, 0)).disk_usage(), self.connection.get_vm(model.get_value(iter2, 0)).disk_usage())
|
||||
|
||||
def vmlist_network_usage_sorter(self, model, iter1, iter2):
|
||||
return self.sort_op(self.connection.get_stats().network_traffic(model.get_value(iter1, 0)), self.connection.get_stats().network_traffic(model.get_value(iter2, 0)))
|
||||
return self.sort_op(self.connection.get_vm(model.get_value(iter1, 0)).network_traffic(), self.connection.get_vm(model.get_value(iter2, 0)).network_traffic())
|
||||
|
||||
def toggle_status_visible_conf(self, menu):
|
||||
self.config.set_vmlist_status_visible(menu.get_active())
|
||||
|
@ -341,55 +340,55 @@ class vmmManager(gobject.GObject):
|
|||
|
||||
|
||||
def status_text(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
cell.set_property('text', self.connection.get_stats().run_status(name))
|
||||
uuid = model.get_value(iter, 0)
|
||||
cell.set_property('text', self.connection.get_vm(uuid).run_status())
|
||||
|
||||
def status_icon(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
cell.set_property('pixbuf', self.connection.get_stats().run_status_icon(name))
|
||||
uuid = model.get_value(iter, 0)
|
||||
cell.set_property('pixbuf', self.connection.get_vm(uuid).run_status_icon())
|
||||
|
||||
def cpu_usage_text(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
cell.set_property('text', "%2.2f %%" % self.connection.get_stats().cpu_time_percentage(name))
|
||||
uuid = model.get_value(iter, 0)
|
||||
cell.set_property('text', "%2.2f %%" % self.connection.get_vm(uuid).cpu_time_percentage())
|
||||
|
||||
def cpu_usage_img(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
uuid = model.get_value(iter, 0)
|
||||
cell.set_property('text', '')
|
||||
cell.set_property('value', self.connection.get_stats().cpu_time_percentage(name))
|
||||
cell.set_property('value', self.connection.get_vm(uuid).cpu_time_percentage())
|
||||
|
||||
def memory_usage_text(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
current = self.connection.get_stats().current_memory(name)
|
||||
currentPercent = self.connection.get_stats().current_memory_percentage(name)
|
||||
uuid = model.get_value(iter, 0)
|
||||
current = self.connection.get_vm(uuid).current_memory()
|
||||
currentPercent = self.connection.get_vm(uuid).current_memory_percentage()
|
||||
cell.set_property('text', "%s (%2.2f%%)" % (self.pretty_mem(current) , currentPercent))
|
||||
|
||||
def memory_usage_img(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
currentPercent = self.connection.get_stats().current_memory_percentage(name)
|
||||
uuid = model.get_value(iter, 0)
|
||||
currentPercent = self.connection.get_vm(uuid).current_memory_percentage()
|
||||
cell.set_property('text', '')
|
||||
cell.set_property('value', currentPercent)
|
||||
|
||||
def disk_usage_text(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
current = self.connection.get_stats().disk_usage(name)
|
||||
currentPercent = self.connection.get_stats().disk_usage_percentage(name)
|
||||
uuid = model.get_value(iter, 0)
|
||||
current = self.connection.get_vm(uuid).disk_usage()
|
||||
currentPercent = self.connection.get_vm(uuid).disk_usage_percentage()
|
||||
cell.set_property('text', "%s (%2.2f%%)" % (self.pretty_mem(current) , currentPercent))
|
||||
|
||||
def disk_usage_img(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
currentPercent = self.connection.get_stats().disk_usage_percentage(name)
|
||||
uuid = model.get_value(iter, 0)
|
||||
currentPercent = self.connection.get_vm(uuid).disk_usage_percentage()
|
||||
cell.set_property('text', '')
|
||||
cell.set_property('value', currentPercent)
|
||||
|
||||
def network_traffic_text(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
current = self.connection.get_stats().network_traffic(name)
|
||||
currentPercent = self.connection.get_stats().network_traffic_percentage(name)
|
||||
uuid = model.get_value(iter, 0)
|
||||
current = self.connection.get_vm(uuid).network_traffic()
|
||||
currentPercent = self.connection.get_vm(uuid).network_traffic_percentage()
|
||||
cell.set_property('text', "%s (%2.2f%%)" % (self.pretty_mem(current) , currentPercent))
|
||||
|
||||
def network_traffic_img(self, column, cell, model, iter, data):
|
||||
name = model.get_value(iter, 0)
|
||||
currentPercent = self.connection.get_stats().network_traffic_percentage(name)
|
||||
uuid = model.get_value(iter, 0)
|
||||
currentPercent = self.connection.get_vm(uuid).network_traffic_percentage()
|
||||
cell.set_property('text', '')
|
||||
cell.set_property('value', currentPercent)
|
||||
|
||||
|
|
|
@ -1,192 +0,0 @@
|
|||
|
||||
import libvirt
|
||||
import gtk.gdk
|
||||
from time import time
|
||||
|
||||
class vmmStats:
|
||||
def __init__(self, config, connection):
|
||||
self.config = config
|
||||
self.connection = connection
|
||||
self.record = {}
|
||||
|
||||
self.hostinfo = self.connection.get_host_info()
|
||||
self.connection.connect("vm-added", self._vm_added)
|
||||
self.connection.connect("vm-removed", self._vm_removed)
|
||||
|
||||
|
||||
def _vm_added(self, connection, uri, vmuuid, name):
|
||||
self.record[vmuuid] = []
|
||||
|
||||
def _vm_removed(self, connection, uri, vmuuid):
|
||||
del self.record[vmuuid]
|
||||
|
||||
def update(self, vmuuid, vm):
|
||||
now = time()
|
||||
|
||||
self.hostinfo = self.connection.get_host_info()
|
||||
info = vm.info()
|
||||
|
||||
expected = self.config.get_stats_history_length()
|
||||
current = len(self.record[vmuuid])
|
||||
if current > expected:
|
||||
del self.record[vmuuid][expected:current]
|
||||
|
||||
prevCpuTime = 0
|
||||
prevTimestamp = 0
|
||||
if len(self.record[vmuuid]) > 0:
|
||||
prevTimestamp = self.record[vmuuid][0]["timestamp"]
|
||||
prevCpuTime = self.record[vmuuid][0]["cpuTimeAbs"]
|
||||
|
||||
pcentCpuTime = (info[4]-prevCpuTime) * 100 / ((now - prevTimestamp)*1000*1000*1000*self.host_active_processor_count())
|
||||
|
||||
pcentCurrMem = info[2] * 100 / self.host_memory_size()
|
||||
pcentMaxMem = info[1] * 100 / self.host_memory_size()
|
||||
|
||||
newStats = { "timestamp": now,
|
||||
"status": info[0],
|
||||
"cpuTime": (info[4]-prevCpuTime),
|
||||
"cpuTimeAbs": info[4],
|
||||
"cpuTimePercent": pcentCpuTime,
|
||||
"currMem": info[2],
|
||||
"currMemPercent": pcentCurrMem,
|
||||
"maxMem": info[1],
|
||||
"maxMemPercent": pcentMaxMem,
|
||||
}
|
||||
|
||||
self.record[vmuuid].insert(0, newStats)
|
||||
|
||||
nSamples = 5
|
||||
#nSamples = len(self.record[vmuuid])
|
||||
if nSamples > len(self.record[vmuuid]):
|
||||
nSamples = len(self.record[vmuuid])
|
||||
|
||||
startCpuTime = self.record[vmuuid][nSamples-1]["cpuTimeAbs"]
|
||||
startTimestamp = self.record[vmuuid][nSamples-1]["timestamp"]
|
||||
|
||||
if startTimestamp == now:
|
||||
self.record[vmuuid][0]["cpuTimeMovingAvg"] = self.record[vmuuid][0]["cpuTimeAbs"]
|
||||
self.record[vmuuid][0]["cpuTimeMovingAvgPercent"] = 0
|
||||
else:
|
||||
self.record[vmuuid][0]["cpuTimeMovingAvg"] = (self.record[vmuuid][0]["cpuTimeAbs"]-startCpuTime) / nSamples
|
||||
self.record[vmuuid][0]["cpuTimeMovingAvgPercent"] = (self.record[vmuuid][0]["cpuTimeAbs"]-startCpuTime) * 100 / ((now-startTimestamp)*1000*1000*1000 * self.host_active_processor_count())
|
||||
|
||||
|
||||
def current_memory(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return 0
|
||||
return self.record[vmuuid][0]["currMem"]
|
||||
|
||||
def current_memory_percentage(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return 0
|
||||
return self.record[vmuuid][0]["currMemPercent"]
|
||||
|
||||
def maximum_memory(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return 0
|
||||
return self.record[vmuuid][0]["maxMem"]
|
||||
|
||||
def maximum_memory_percentage(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return 0
|
||||
return self.record[vmuuid][0]["maxMemPercent"]
|
||||
|
||||
def cpu_time(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return 0
|
||||
return self.record[vmuuid][0]["cpuTime"]
|
||||
|
||||
def cpu_time_percentage(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return 0
|
||||
return self.record[vmuuid][0]["cpuTimePercent"]
|
||||
|
||||
def network_traffic(self, vmuuid):
|
||||
return 1
|
||||
|
||||
def network_traffic_percentage(self, vmuuid):
|
||||
return 1
|
||||
|
||||
def disk_usage(self, vmuuid):
|
||||
return 1
|
||||
|
||||
def disk_usage_percentage(self, vmuuid):
|
||||
return 1
|
||||
|
||||
def cpu_time_vector(self, vmuuid):
|
||||
vector = []
|
||||
stats = self.record[vmuuid]
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
if i < len(stats):
|
||||
vector.append(stats[i]["cpuTimePercent"])
|
||||
else:
|
||||
vector.append(0)
|
||||
return vector
|
||||
|
||||
def cpu_time_moving_avg_vector(self, vmuuid):
|
||||
vector = []
|
||||
stats = self.record[vmuuid]
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
if i < len(stats):
|
||||
vector.append(stats[i]["cpuTimeMovingAvgPercent"])
|
||||
else:
|
||||
vector.append(0)
|
||||
return vector
|
||||
|
||||
def current_memory_vector(self, vmuuid):
|
||||
vector = []
|
||||
stats = self.record[vmuuid]
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
if i < len(stats):
|
||||
vector.append(stats[i]["currMemPercent"])
|
||||
else:
|
||||
vector.append(0)
|
||||
return vector
|
||||
|
||||
def network_traffic_vector(self, vmuuid):
|
||||
vector = []
|
||||
stats = self.record[vmuuid]
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
vector.append(1)
|
||||
return vector
|
||||
|
||||
def disk_usage_vector(self, vmuuid):
|
||||
vector = []
|
||||
stats = self.record[vmuuid]
|
||||
for i in range(self.config.get_stats_history_length()+1):
|
||||
vector.append(1)
|
||||
return vector
|
||||
|
||||
def host_memory_size(self):
|
||||
return self.hostinfo[1]*1024
|
||||
|
||||
def host_active_processor_count(self):
|
||||
return self.hostinfo[2]
|
||||
|
||||
def host_maximum_processor_count(self):
|
||||
return self.hostinfo[4] * self.hostinfo[5] * self.hostinfo[6] * self.hostinfo[7]
|
||||
|
||||
def run_status(self, vmuuid):
|
||||
if len(self.record[vmuuid]) == 0:
|
||||
return "Shutoff"
|
||||
status = self.record[vmuuid][0]["status"]
|
||||
if status == libvirt.VIR_DOMAIN_NOSTATE:
|
||||
return "Idle"
|
||||
elif status == libvirt.VIR_DOMAIN_RUNNING:
|
||||
return "Running"
|
||||
elif status == libvirt.VIR_DOMAIN_BLOCKED:
|
||||
return "Blocked"
|
||||
elif status == libvirt.VIR_DOMAIN_PAUSED:
|
||||
return "Paused"
|
||||
elif status == libvirt.VIR_DOMAIN_SHUTDOWN:
|
||||
return "Shutdown"
|
||||
elif status == libvirt.VIR_DOMAIN_SHUTOFF:
|
||||
return "Shutoff"
|
||||
elif status == libvirt.VIR_DOMAIN_CRASHED:
|
||||
return "Crashed"
|
||||
else:
|
||||
raise "Unknown status code"
|
||||
|
||||
def run_status_icon(self, name):
|
||||
status = self.run_status(name)
|
||||
return self.config.get_vm_status_icon(status.lower())
|
Loading…
Reference in New Issue