This commit is contained in:
dave 2017-05-15 17:29:20 -07:00
parent ec819b2cad
commit aafe393b62
8 changed files with 143 additions and 119 deletions

View File

@ -1,4 +1,5 @@
from subprocess import Popen,PIPE
from subprocess import Popen, PIPE
def ipaddr():
"""
@ -6,6 +7,7 @@ def ipaddr():
"""
return Popen(["hostname", "--all-ip-addresses"], stdout=PIPE).communicate()[0].decode().split(" ")[0].strip()
def hostname():
"""
Return system hostname from hostname -f

View File

@ -2,7 +2,7 @@
from threading import Thread
from elasticsearch import Elasticsearch
from time import time,sleep
from time import time, sleep
from pymonitor.builtins import sysinfo
import traceback
import datetime
@ -11,6 +11,7 @@ import json
import sys
import os
class MonitorDaemon(Thread):
def __init__(self, config):
Thread.__init__(self)
@ -24,7 +25,7 @@ class MonitorDaemon(Thread):
"""
logger = logging.getLogger("monitordaemon")
checkerPath = os.path.dirname(os.path.realpath(__file__))+"/monitors/"
checkerPath = os.path.dirname(os.path.realpath(__file__)) + "/monitors/"
sys.path.append(checkerPath)
logger.debug("path %s" % checkerPath)
@ -98,7 +99,7 @@ class Backend:
if not self.es.indices.exists(index=indexName):
mapping = {
"mappings": {
"_default_":{
"_default_": {
"properties": {
"ipaddr": {
"type": "ip"
@ -107,8 +108,8 @@ class Backend:
"type": "string"
},
"hostname_raw": {
"type" : "string",
"index" : "not_analyzed"
"type": "string",
"index": "not_analyzed"
}
}
}
@ -116,7 +117,7 @@ class Backend:
}
mapping["mappings"].update(self.mapping)
self.logger.debug("creating index %s with mapping %s" % (indexName, json.dumps(mapping, indent=4)))
self.es.indices.create(index=indexName, ignore=400, body=mapping)# ignore already exists error
self.es.indices.create(index=indexName, ignore=400, body=mapping) # ignore already exists error
self.current_index = indexName
def check_before_entry(self):
@ -150,7 +151,7 @@ class MonitorThread(Thread):
Thread.__init__(self)
self.config = config
self.backend = backend
self.logger = logging.getLogger("monitordaemon.monitorthread.%s"%self.config["type"])
self.logger = logging.getLogger("monitordaemon.monitorthread.%s" % self.config["type"])
self.logger.debug("initing worker thread with config %s" % self.config)
self.logger.debug("importing %s" % self.config["type"])
@ -158,10 +159,10 @@ class MonitorThread(Thread):
self.logger.debug("checker func %s" % self.checker_func)
self.mapping = {}
#try:
# try:
self.mapping.update(__import__(self.config["type"]).mapping)
#except:
# pass
# except:
# pass
self.logger.debug("mapping %s" % self.mapping)
self.alive = True
@ -200,23 +201,26 @@ class MonitorThread(Thread):
Tell thread to exit
"""
self.logger.debug("cancelling scheduler")
self.alive=False
self.alive = False
def run_cli():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--config", action="store", type="string", dest="config", help="Path to config file")
parser.add_option("-l", "--logging", action="store", dest="logging", help="Logging level", default="INFO", choices=['WARN', 'CRITICAL', 'WARNING', 'INFO', 'ERROR', 'DEBUG'])
parser.add_option("-c", "--config", action="store", type="string", dest="config", help="Path to config file")
parser.add_option("-l", "--logging", action="store", dest="logging", help="Logging level", default="INFO",
choices=['WARN', 'CRITICAL', 'WARNING', 'INFO', 'ERROR', 'DEBUG'])
(options, args) = parser.parse_args()
logging.basicConfig(level=getattr(logging, options.logging), format="%(asctime)-15s %(levelname)-8s %(name)s@%(filename)s:%(lineno)d %(message)s")
logging.basicConfig(level=getattr(logging, options.logging),
format="%(asctime)-15s %(levelname)-8s %(name)s@%(filename)s:%(lineno)d %(message)s")
logger = logging.getLogger("init")
logger.debug("options: %s" % options)
if options.config == None:
if options.config is None:
parser.print_help()
sys.exit()
@ -239,5 +243,6 @@ def run_cli():
print("")
daemon.shutdown()
if __name__ == '__main__':
run_cli()

View File

@ -1,10 +1,11 @@
from psutil import disk_io_counters
def diskio(disks=[]):
with open("/proc/uptime", "r") as f:
uptime = int(float(f.read().split(" ")[0]))
diskinfo = disk_io_counters(perdisk=True)
for disk,stats in diskinfo.items():
for disk, stats in diskinfo.items():
if disks and disk not in disks:
continue
if stats.read_count == 0 and disk not in disks:
@ -12,19 +13,20 @@ def diskio(disks=[]):
stats = {
"disk": disk,
"disk_raw": disk,
"reads_ps": round(stats.read_count/uptime, 2),
"writes_ps":round(stats.write_count/uptime, 2),
"read_ps": round(stats.read_bytes/uptime, 2),
"write_ps": round(stats.write_bytes/uptime, 2),
"reads": stats.read_count,
"writes": stats.write_count,
"read": stats.read_bytes,
"written": stats.write_bytes,
"read_size":round(stats.read_bytes/stats.read_count, 2) if stats.read_count > 0 else 0,
"write_size":round(stats.write_bytes/stats.write_count, 2) if stats.write_count > 0 else 0
"reads_ps": round(stats.read_count / uptime, 2),
"writes_ps": round(stats.write_count / uptime, 2),
"read_ps": round(stats.read_bytes / uptime, 2),
"write_ps": round(stats.write_bytes / uptime, 2),
"reads": stats.read_count,
"writes": stats.write_count,
"read": stats.read_bytes,
"written": stats.write_bytes,
"read_size": round(stats.read_bytes / stats.read_count, 2) if stats.read_count > 0 else 0,
"write_size": round(stats.write_bytes / stats.write_count, 2) if stats.write_count > 0 else 0
}
yield(stats)
mapping = {
"diskio": {
"properties": {
@ -33,7 +35,7 @@ mapping = {
},
"disk_raw": {
"type": "string",
"index" : "not_analyzed"
"index": "not_analyzed"
},
"reads_ps": {
"type": "double"
@ -72,4 +74,3 @@ mapping = {
if __name__ == '__main__':
for item in diskio():
print(item)

View File

@ -1,5 +1,6 @@
from os import statvfs
def diskspace(filesystems=[]):
for fs in filesystems:
stats = statvfs(fs)
@ -8,21 +9,22 @@ def diskspace(filesystems=[]):
"fs": fs,
"fs_raw": fs,
"diskfree": stats.f_bsize * stats.f_bavail,
"diskused": (stats.f_blocks-stats.f_bavail) * stats.f_bsize,
"diskused": (stats.f_blocks - stats.f_bavail) * stats.f_bsize,
"disksize": stats.f_bsize * stats.f_blocks,
"inodesmax": stats.f_files,
"inodesfree": stats.f_favail,
"inodesused": stats.f_files - stats.f_favail
}
info["diskpctused"] = round(info["diskused"]/info["disksize"] if info["disksize"] > 0 else 0, 2)
info["diskpctfree"] = round(info["diskfree"]/info["disksize"] if info["disksize"] > 0 else 0, 2)
info["diskpctused"] = round(info["diskused"] / info["disksize"] if info["disksize"] > 0 else 0, 2)
info["diskpctfree"] = round(info["diskfree"] / info["disksize"] if info["disksize"] > 0 else 0, 2)
info["inodesused_pct"] = round(info["inodesused"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 2)
info["inodesfree_pct"] = round(info["inodesfree"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 2)
yield info
mapping = {
"diskspace": {
"properties": {
@ -45,8 +47,8 @@ mapping = {
"type": "string"
},
"fs_raw": {
"type" : "string",
"index" : "not_analyzed"
"type": "string",
"index": "not_analyzed"
},
"inodesmax": {
"type": "long"
@ -67,6 +69,7 @@ mapping = {
}
}
if __name__ == '__main__':
for item in diskspace(filesystems=["/", "/dev"]):
print(item)

View File

@ -4,9 +4,10 @@ def load():
yield {
"load_1m": m1,
"load_5m": m5,
"load_15m":m15
"load_15m": m15
}
mapping = {
"load": {
"properties": {
@ -23,6 +24,7 @@ mapping = {
}
}
if __name__ == '__main__':
for item in load():
print(item)

View File

@ -3,14 +3,19 @@ import re
memline_pattern = re.compile(r'^(?P<key>[^\\:]+)\:\s+(?P<value>[0-9]+)(\s(?P<unit>[a-zA-Z]+))?')
computed_fields = {
"mempctused": lambda items: round((items["memtotal"]-items["memfree"])/items["memtotal"], 2),
"mempctfree": lambda items: 1-round((items["memtotal"]-items["memfree"])/items["memtotal"], 2),
"mempctused_nocache": lambda items: round((items["memtotal"]-items["memfree"]-items["cached"])/items["memtotal"], 2),
"mempctfree_nocache": lambda items: 1-round((items["memtotal"]-items["memfree"]-items["cached"])/items["memtotal"], 2),
"swappctused": lambda items: round((items["swaptotal"]-items["swapfree"])/items["swaptotal"] if items["swaptotal"] > 0 else 0, 2),
"swappctfree": lambda items: 1-round((items["swaptotal"]-items["swapfree"])/items["swaptotal"] if items["swaptotal"] > 0 else 0, 2)
"mempctused": lambda items: round((items["memtotal"] - items["memfree"]) / items["memtotal"], 2),
"mempctfree": lambda items: 1 - round((items["memtotal"] - items["memfree"]) / items["memtotal"], 2),
"mempctused_nocache": lambda items: round((items["memtotal"] - items["memfree"] - items["cached"]) /
items["memtotal"], 2),
"mempctfree_nocache": lambda items: 1 - round((items["memtotal"] - items["memfree"] - items["cached"]) /
items["memtotal"], 2),
"swappctused": lambda items: round((items["swaptotal"] - items["swapfree"]) /
items["swaptotal"] if items["swaptotal"] > 0 else 0, 2),
"swappctfree": lambda items: 1 - round((items["swaptotal"] - items["swapfree"]) /
items["swaptotal"] if items["swaptotal"] > 0 else 0, 2)
}
def meminfo(whitelist=[]):
if not whitelist:
whitelist = ["swaptotal", "swapfree", "swapcached",
@ -27,11 +32,11 @@ def meminfo(whitelist=[]):
if unit:
if unit == "kB":
value*=1024
value *= 1024
else:
raise Exception("Unknown unit")
name = ''.join(c for c in matches.group("key").lower() if 96<ord(c)<123)
name = ''.join(c for c in matches.group("key").lower() if 96 < ord(c) < 123)
if name in whitelist:
result[name] = value
@ -40,28 +45,30 @@ def meminfo(whitelist=[]):
yield result
mapping = {
"meminfo": {
"properties": {
"swaptotal": { "type": "long" },
"swapfree": { "type": "long" },
"swapcached": { "type": "long" },
"memtotal": { "type": "long" },
"memfree": { "type": "long" },
"memavailable": { "type": "long" },
"cached": { "type": "long" },
"active": { "type": "long" },
"inactive": { "type": "long" },
"mempctused": { "type": "double" },
"mempctfree": { "type": "double" },
"mempctused_nocache": { "type": "double" },
"mempctfree_nocache": { "type": "double" },
"swappctused": { "type": "double" },
"swappctfree": { "type": "double" }
"swaptotal": {"type": "long"},
"swapfree": {"type": "long"},
"swapcached": {"type": "long"},
"memtotal": {"type": "long"},
"memfree": {"type": "long"},
"memavailable": {"type": "long"},
"cached": {"type": "long"},
"active": {"type": "long"},
"inactive": {"type": "long"},
"mempctused": {"type": "double"},
"mempctfree": {"type": "double"},
"mempctused_nocache": {"type": "double"},
"mempctfree_nocache": {"type": "double"},
"swappctused": {"type": "double"},
"swappctfree": {"type": "double"}
}
}
}
if __name__ == '__main__':
for item in meminfo():
print(item)

View File

@ -1,11 +1,12 @@
from glob import glob
import re
KTHREADD_PID = 2
PAT_REMOVE_PROC_SPACES = re.compile(r'(\([^\)]+\))')
def procs():
def procs():
# Get uid->name mapping
users = {}
with open('/etc/passwd', 'r') as passwd:
@ -13,8 +14,8 @@ def procs():
line = passwd.readline()
if not line:
break
uname,x,uid,gid,opts,home,shell = line.split(":")
users[int(uid)]=uname
uname, _, uid, gid, opts, home, shell = line.split(":")
users[int(uid)] = uname
# Get gid->groupname mapping
groups = {}
@ -23,8 +24,8 @@ def procs():
line = group.readline()
if not line:
break
gname,x,gid,y = line.split(":")
groups[int(gid)]=gname
gname, _, gid, y = line.split(":")
groups[int(gid)] = gname
num_procs = 0
num_threads = 0
@ -39,20 +40,20 @@ def procs():
stat = PAT_REMOVE_PROC_SPACES.sub("PROCNAME", stat)
stat = stat.split(" ")
proc_id = int(stat[0])
proc_parent = int(stat[3])
if proc_parent == KTHREADD_PID:
num_kthreads+=1
num_kthreads += 1
else:
num_procs+=1
num_procs += 1
num_threads += int(stat[19])
except Exception as e:
print(e)
print("Failed to open %s" % f)
yield {"procs": num_procs, "threads":num_threads, "kthreads": num_kthreads}
yield {"procs": num_procs, "threads": num_threads, "kthreads": num_kthreads}
mapping = {
"procs": {
@ -70,6 +71,7 @@ mapping = {
}
}
if __name__ == '__main__':
for item in procs():
print(item)

View File

@ -1,6 +1,7 @@
def uptime():
with open("/proc/uptime", "r") as f:
yield {"uptime":int(float(f.read().split(" ")[0]))}
yield {"uptime": int(float(f.read().split(" ")[0]))}
mapping = {
"uptime": {
@ -12,6 +13,7 @@ mapping = {
}
}
if __name__ == '__main__':
for item in uptime():
print(item)