32 from cm.models.node
import Node
33 from common.states
import node_states
34 from cm.utils
import log
35 from cm
import settings
39 nlist = [{
'address':node.dict[
'address'],
'state':node.dict[
'state'],
'conn_string':node.conn_string}
for node
in Node.objects.filter(state__exact=node_states[
'ok'])]
45 if not settings.MONITOR_ENABLE:
46 return 'Monitoring disabled'
49 return 'No nodes to monitor'
50 e = threading.enumerate()
52 if i.name ==
"initiator":
63 if not settings.MONITOR_ENABLE:
65 return 'Monitoring disabled'
69 return 'No nodes to monitor'
71 e = threading.enumerate()
75 if t.name ==
"initiator":
77 log.info(0,
'Monitoring nodes list updated')
78 r.append(
'node list updated')
81 if not [t
for t
in e
if t.name ==
'initiator']:
84 r.append(
'initiator started')
85 log.info(0,
'Monitoring thread MonitorInitiator started')
88 if not [t
for t
in e
if t.name ==
'cleaner']:
91 r.append(
'cleaner started')
92 log.info(0,
'Monitoring thread CleanerThread started')
105 t = threading.activeCount()
106 e = threading.enumerate()
109 th.append(i.getName())
110 if i.getName() ==
"initiator":
112 if i.getName() ==
"cleaner":
114 log.info(0,
'Monitoring threads stopped')
115 return [str(t), str(th)]
120 threading.Thread.__init__(self)
124 if not cm.utils.monia.os.path.exists(settings.PATH_TO_RRD):
125 cm.utils.monia.os.makedirs(settings.PATH_TO_RRD)
126 if not cm.utils.monia.os.path.exists(settings.BACKUP_PATH):
127 cm.utils.monia.os.makedirs(settings.BACKUP_PATH)
138 log.info(0,
'updating nodes list')
148 threads = threading.enumerate()
149 if not one[
'address']
in [i.name
for i
in threads]:
153 log.error(0,
'Monitoring error %s: %s' % (one[
'address'], e))
154 st2 = time.time() - st1
158 log.info(0,
"mon_time %f %s threads: %d" % (st2, one[
'address'], len(threads)))
159 log.info(0,
"MonitorInitiator stopped")
162 log.info(0,
"stopping MonitorInitiator... ")
169 threading.Thread.__init__(self)
175 log.info(0,
"stopping CleanerThread... ")
182 time.sleep(settings.CLEANING_PERIOD)
185 if time.time() - settings.TIME_TO_REMOVE > rrds[vm][1]:
187 log.info(0,
"CleanerThread stopped")
189 log.exception(0,
'CleanerThread: %s'%(e))
194 threading.Thread.__init__(self)
195 self.
addr = data[
'conn_string']
198 self.
t = threading.Timer(settings.TIMEOUT, self.
kill)
199 self.t.name =
'timer-%s' % (self.
name)
220 self.
c = libvirt.openReadOnly(self.
addr)
221 total_cpu = self.c.getInfo()[2]
222 total_memory = self.c.getInfo()[1]
224 log.error(0,
'libvirt getting info: %s' % (e))
229 domains = self.c.listDomainsID()
231 log.exception(0,
'libvirt listDomainsID: %s' % (str(e)))
234 for domain_id
in domains:
236 hostname = self.c.getHostname()
237 dom = self.c.lookupByID(domain_id)
240 used_memory += info[1]
244 devs = re.search(
'<devices>(.*?)</devices>', self.
xml_data, re.S)
248 hdd = re.findall(
'<disk.*?<target dev=\'(.*?)\'.*?</disk>', devs.group(), re.S)
249 hdd_stat = dom.blockStats(hdd[0])
251 hdd_stat = [0,0,0,0,0,0,0,0,0,0,0,0,0]
255 net = re.findall(
'<interface.*?<target dev=\'(.*?)\'.*?</interface>', devs.group(), re.S)
256 net_stat = dom.interfaceStats(net[0])
258 net_stat = [0,0,0,0,0,0,0,0,0,0,0,0,0]
260 vms.append({
'name': dom.name(),
264 'cpu_count': info[3],
266 'rd_req': hdd_stat[0],
267 'rd_bytes': hdd_stat[1],
268 'wr_req': hdd_stat[2],
269 'wr_bytes': hdd_stat[3],
270 'rx_bytes': net_stat[0],
271 'rx_packets': net_stat[1],
272 'tx_bytes': net_stat[4],
273 'tx_packets': net_stat[5]
276 log.exception(0,
'libvirt lookup (%s id=%d): %s' % (hostname, domain_id, str(e)))
282 log.error(0,
'libvirt close error %s' % (str(g)))
283 self.
lv_data = [used_cpu, used_memory, total_cpu, total_memory, vms]
287 log.info(0,
'killing MonitorThread %s...' % (self.
name))
291 log.info(0,
'monitorThread %s error...' % (self.
name))
292 log.info(0,
'MonitorThread killed')