This commit is contained in:
dave 2017-05-15 17:29:20 -07:00
parent ec819b2cad
commit aafe393b62
8 changed files with 143 additions and 119 deletions

View File

@ -1,4 +1,5 @@
from subprocess import Popen,PIPE from subprocess import Popen, PIPE
def ipaddr(): def ipaddr():
""" """
@ -6,6 +7,7 @@ def ipaddr():
""" """
return Popen(["hostname", "--all-ip-addresses"], stdout=PIPE).communicate()[0].decode().split(" ")[0].strip() return Popen(["hostname", "--all-ip-addresses"], stdout=PIPE).communicate()[0].decode().split(" ")[0].strip()
def hostname(): def hostname():
""" """
Return system hostname from hostname -f Return system hostname from hostname -f

View File

@ -2,7 +2,7 @@
from threading import Thread from threading import Thread
from elasticsearch import Elasticsearch from elasticsearch import Elasticsearch
from time import time,sleep from time import time, sleep
from pymonitor.builtins import sysinfo from pymonitor.builtins import sysinfo
import traceback import traceback
import datetime import datetime
@ -11,43 +11,44 @@ import json
import sys import sys
import os import os
class MonitorDaemon(Thread): class MonitorDaemon(Thread):
def __init__(self, config): def __init__(self, config):
Thread.__init__(self) Thread.__init__(self)
self.config = config self.config = config
self.threads = [] self.threads = []
self.backend = Backend(self.config["backend"]["url"]) self.backend = Backend(self.config["backend"]["url"])
def run(self): def run(self):
""" """
Start all monitoring threads and block until they exit Start all monitoring threads and block until they exit
""" """
logger = logging.getLogger("monitordaemon") logger = logging.getLogger("monitordaemon")
checkerPath = os.path.dirname(os.path.realpath(__file__))+"/monitors/" checkerPath = os.path.dirname(os.path.realpath(__file__)) + "/monitors/"
sys.path.append(checkerPath) sys.path.append(checkerPath)
logger.debug("path %s" % checkerPath) logger.debug("path %s" % checkerPath)
# Create/start all monitoring threads # Create/start all monitoring threads
logger.debug("creating monitor threads") logger.debug("creating monitor threads")
for instance in self.config["monitors"]: for instance in self.config["monitors"]:
monitor_thread = MonitorThread(instance, self.backend) monitor_thread = MonitorThread(instance, self.backend)
self.threads.append(monitor_thread) self.threads.append(monitor_thread)
self.backend.mapping.update(monitor_thread.mapping) self.backend.mapping.update(monitor_thread.mapping)
self.backend.connect() self.backend.connect()
logger.debug("starting monitor threads") logger.debug("starting monitor threads")
for monitor_thread in self.threads: for monitor_thread in self.threads:
monitor_thread.start() monitor_thread.start()
# Tear down all threads # Tear down all threads
logger.debug("joining monitor threads") logger.debug("joining monitor threads")
for monitor_thread in self.threads: for monitor_thread in self.threads:
monitor_thread.join() monitor_thread.join()
logger.debug("joined monitor threads") logger.debug("joined monitor threads")
def shutdown(self): def shutdown(self):
""" """
Signal all monitoring threads to stop Signal all monitoring threads to stop
@ -64,11 +65,11 @@ class Backend:
self.es_url = es_url self.es_url = es_url
self.mapping = {} self.mapping = {}
self.logger = logging.getLogger("monitordaemon.backend") self.logger = logging.getLogger("monitordaemon.backend")
self.sysinfo = {} self.sysinfo = {}
self.update_sys_info() self.update_sys_info()
self.logger.debug("running on %(hostname)s (%(ipaddr)s)" % self.sysinfo) self.logger.debug("running on %(hostname)s (%(ipaddr)s)" % self.sysinfo)
def connect(self): def connect(self):
self.logger.debug("final mapping %s" % self.mapping) self.logger.debug("final mapping %s" % self.mapping)
self.logger.debug("connecting to backend at %s" % self.es_url) self.logger.debug("connecting to backend at %s" % self.es_url)
@ -76,7 +77,7 @@ class Backend:
self.logger.debug("connected to backend") self.logger.debug("connected to backend")
self.current_index = "" self.current_index = ""
self.check_before_entry() self.check_before_entry()
def update_sys_info(self): def update_sys_info(self):
""" """
Fetch generic system info that is sent with every piece of monitoring data Fetch generic system info that is sent with every piece of monitoring data
@ -84,13 +85,13 @@ class Backend:
self.sysinfo["hostname"] = sysinfo.hostname() self.sysinfo["hostname"] = sysinfo.hostname()
self.sysinfo["hostname_raw"] = self.sysinfo["hostname"] self.sysinfo["hostname_raw"] = self.sysinfo["hostname"]
self.sysinfo["ipaddr"] = sysinfo.ipaddr() self.sysinfo["ipaddr"] = sysinfo.ipaddr()
def get_index_name(self): def get_index_name(self):
""" """
Return name of current index such as 'monitor-2015.12.05' Return name of current index such as 'monitor-2015.12.05'
""" """
return "monitor-%s" % datetime.datetime.now().strftime("%Y.%m.%d") return "monitor-%s" % datetime.datetime.now().strftime("%Y.%m.%d")
def create_index(self, indexName): def create_index(self, indexName):
""" """
Check if current index exists, and if not, create it Check if current index exists, and if not, create it
@ -98,7 +99,7 @@ class Backend:
if not self.es.indices.exists(index=indexName): if not self.es.indices.exists(index=indexName):
mapping = { mapping = {
"mappings": { "mappings": {
"_default_":{ "_default_": {
"properties": { "properties": {
"ipaddr": { "ipaddr": {
"type": "ip" "type": "ip"
@ -107,8 +108,8 @@ class Backend:
"type": "string" "type": "string"
}, },
"hostname_raw": { "hostname_raw": {
"type" : "string", "type": "string",
"index" : "not_analyzed" "index": "not_analyzed"
} }
} }
} }
@ -116,9 +117,9 @@ class Backend:
} }
mapping["mappings"].update(self.mapping) mapping["mappings"].update(self.mapping)
self.logger.debug("creating index %s with mapping %s" % (indexName, json.dumps(mapping, indent=4))) self.logger.debug("creating index %s with mapping %s" % (indexName, json.dumps(mapping, indent=4)))
self.es.indices.create(index=indexName, ignore=400, body=mapping)# ignore already exists error self.es.indices.create(index=indexName, ignore=400, body=mapping) # ignore already exists error
self.current_index = indexName self.current_index = indexName
def check_before_entry(self): def check_before_entry(self):
""" """
Called before adding any data to ES. Checks if a new index should be created due to date change Called before adding any data to ES. Checks if a new index should be created due to date change
@ -126,21 +127,21 @@ class Backend:
indexName = self.get_index_name() indexName = self.get_index_name()
if indexName != self.current_index: if indexName != self.current_index:
self.create_index(indexName) self.create_index(indexName)
def add_data(self, data_type, data): def add_data(self, data_type, data):
""" """
Submit a piece of monitoring data Submit a piece of monitoring data
""" """
self.check_before_entry() self.check_before_entry()
doc = self.sysinfo.copy() doc = self.sysinfo.copy()
doc.update(data) doc.update(data)
doc["@timestamp"] = datetime.datetime.utcnow().isoformat() doc["@timestamp"] = datetime.datetime.utcnow().isoformat()
self.logger.debug("logging type %s: %s" % (data_type, doc)) self.logger.debug("logging type %s: %s" % (data_type, doc))
res = self.es.index(index=self.current_index, doc_type=data_type, body=doc) res = self.es.index(index=self.current_index, doc_type=data_type, body=doc)
self.logger.debug("%s created %s" % (data_type, res["_id"])) self.logger.debug("%s created %s" % (data_type, res["_id"]))
class MonitorThread(Thread): class MonitorThread(Thread):
def __init__(self, config, backend): def __init__(self, config, backend):
@ -150,24 +151,24 @@ class MonitorThread(Thread):
Thread.__init__(self) Thread.__init__(self)
self.config = config self.config = config
self.backend = backend self.backend = backend
self.logger = logging.getLogger("monitordaemon.monitorthread.%s"%self.config["type"]) self.logger = logging.getLogger("monitordaemon.monitorthread.%s" % self.config["type"])
self.logger.debug("initing worker thread with config %s" % self.config) self.logger.debug("initing worker thread with config %s" % self.config)
self.logger.debug("importing %s" % self.config["type"]) self.logger.debug("importing %s" % self.config["type"])
self.checker_func = getattr(__import__(self.config["type"]), self.config["type"]) self.checker_func = getattr(__import__(self.config["type"]), self.config["type"])
self.logger.debug("checker func %s" % self.checker_func) self.logger.debug("checker func %s" % self.checker_func)
self.mapping = {} self.mapping = {}
#try: # try:
self.mapping.update(__import__(self.config["type"]).mapping) self.mapping.update(__import__(self.config["type"]).mapping)
#except: # except:
# pass # pass
self.logger.debug("mapping %s" % self.mapping) self.logger.debug("mapping %s" % self.mapping)
self.alive = True self.alive = True
self.delay = int(self.config["freq"]) self.delay = int(self.config["freq"])
self.lastRun = 0 self.lastRun = 0
def run(self): def run(self):
""" """
Call execute method every x seconds forever Call execute method every x seconds forever
@ -183,7 +184,7 @@ class MonitorThread(Thread):
self.logger.warning(tb) self.logger.warning(tb)
sleep(0.5) sleep(0.5)
self.logger.debug("scheduler exited") self.logger.debug("scheduler exited")
def execute(self, args): def execute(self, args):
""" """
Run the loaded checker function Run the loaded checker function
@ -194,32 +195,35 @@ class MonitorThread(Thread):
self.backend.add_data(self.config["type"], result) self.backend.add_data(self.config["type"], result)
duration = time() - before duration = time() - before
self.logger.info("runtime: %.3f" % duration) self.logger.info("runtime: %.3f" % duration)
def shutdown(self): def shutdown(self):
""" """
Tell thread to exit Tell thread to exit
""" """
self.logger.debug("cancelling scheduler") self.logger.debug("cancelling scheduler")
self.alive=False self.alive = False
def run_cli(): def run_cli():
from optparse import OptionParser from optparse import OptionParser
parser = OptionParser() parser = OptionParser()
parser.add_option("-c", "--config", action="store", type="string", dest="config", help="Path to config file") parser.add_option("-c", "--config", action="store", type="string", dest="config", help="Path to config file")
parser.add_option("-l", "--logging", action="store", dest="logging", help="Logging level", default="INFO", choices=['WARN', 'CRITICAL', 'WARNING', 'INFO', 'ERROR', 'DEBUG']) parser.add_option("-l", "--logging", action="store", dest="logging", help="Logging level", default="INFO",
choices=['WARN', 'CRITICAL', 'WARNING', 'INFO', 'ERROR', 'DEBUG'])
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
logging.basicConfig(level=getattr(logging, options.logging), format="%(asctime)-15s %(levelname)-8s %(name)s@%(filename)s:%(lineno)d %(message)s") logging.basicConfig(level=getattr(logging, options.logging),
format="%(asctime)-15s %(levelname)-8s %(name)s@%(filename)s:%(lineno)d %(message)s")
logger = logging.getLogger("init") logger = logging.getLogger("init")
logger.debug("options: %s" % options) logger.debug("options: %s" % options)
if options.config == None: if options.config is None:
parser.print_help() parser.print_help()
sys.exit() sys.exit()
with open(options.config, "r") as c: with open(options.config, "r") as c:
if options.config[-5:] == '.json': if options.config[-5:] == '.json':
conf = json.load(c) conf = json.load(c)
@ -228,9 +232,9 @@ def run_cli():
conf = yaml_load(c) conf = yaml_load(c)
else: else:
raise Exception("Invalid config format") raise Exception("Invalid config format")
logger.debug("starting daemon with conf: %s" % conf) logger.debug("starting daemon with conf: %s" % conf)
daemon = MonitorDaemon(conf) daemon = MonitorDaemon(conf)
try: try:
daemon.start() daemon.start()
@ -239,5 +243,6 @@ def run_cli():
print("") print("")
daemon.shutdown() daemon.shutdown()
if __name__ == '__main__': if __name__ == '__main__':
run_cli() run_cli()

View File

@ -1,10 +1,11 @@
from psutil import disk_io_counters from psutil import disk_io_counters
def diskio(disks=[]): def diskio(disks=[]):
with open("/proc/uptime", "r") as f: with open("/proc/uptime", "r") as f:
uptime = int(float(f.read().split(" ")[0])) uptime = int(float(f.read().split(" ")[0]))
diskinfo = disk_io_counters(perdisk=True) diskinfo = disk_io_counters(perdisk=True)
for disk,stats in diskinfo.items(): for disk, stats in diskinfo.items():
if disks and disk not in disks: if disks and disk not in disks:
continue continue
if stats.read_count == 0 and disk not in disks: if stats.read_count == 0 and disk not in disks:
@ -12,19 +13,20 @@ def diskio(disks=[]):
stats = { stats = {
"disk": disk, "disk": disk,
"disk_raw": disk, "disk_raw": disk,
"reads_ps": round(stats.read_count/uptime, 2), "reads_ps": round(stats.read_count / uptime, 2),
"writes_ps":round(stats.write_count/uptime, 2), "writes_ps": round(stats.write_count / uptime, 2),
"read_ps": round(stats.read_bytes/uptime, 2), "read_ps": round(stats.read_bytes / uptime, 2),
"write_ps": round(stats.write_bytes/uptime, 2), "write_ps": round(stats.write_bytes / uptime, 2),
"reads": stats.read_count, "reads": stats.read_count,
"writes": stats.write_count, "writes": stats.write_count,
"read": stats.read_bytes, "read": stats.read_bytes,
"written": stats.write_bytes, "written": stats.write_bytes,
"read_size":round(stats.read_bytes/stats.read_count, 2) if stats.read_count > 0 else 0, "read_size": round(stats.read_bytes / stats.read_count, 2) if stats.read_count > 0 else 0,
"write_size":round(stats.write_bytes/stats.write_count, 2) if stats.write_count > 0 else 0 "write_size": round(stats.write_bytes / stats.write_count, 2) if stats.write_count > 0 else 0
} }
yield(stats) yield(stats)
mapping = { mapping = {
"diskio": { "diskio": {
"properties": { "properties": {
@ -33,7 +35,7 @@ mapping = {
}, },
"disk_raw": { "disk_raw": {
"type": "string", "type": "string",
"index" : "not_analyzed" "index": "not_analyzed"
}, },
"reads_ps": { "reads_ps": {
"type": "double" "type": "double"
@ -72,4 +74,3 @@ mapping = {
if __name__ == '__main__': if __name__ == '__main__':
for item in diskio(): for item in diskio():
print(item) print(item)

View File

@ -1,28 +1,30 @@
from os import statvfs from os import statvfs
def diskspace(filesystems=[]): def diskspace(filesystems=[]):
for fs in filesystems: for fs in filesystems:
stats = statvfs(fs) stats = statvfs(fs)
info = { info = {
"fs": fs, "fs": fs,
"fs_raw": fs, "fs_raw": fs,
"diskfree": stats.f_bsize * stats.f_bavail, "diskfree": stats.f_bsize * stats.f_bavail,
"diskused": (stats.f_blocks-stats.f_bavail) * stats.f_bsize, "diskused": (stats.f_blocks - stats.f_bavail) * stats.f_bsize,
"disksize": stats.f_bsize * stats.f_blocks, "disksize": stats.f_bsize * stats.f_blocks,
"inodesmax": stats.f_files, "inodesmax": stats.f_files,
"inodesfree": stats.f_favail, "inodesfree": stats.f_favail,
"inodesused": stats.f_files - stats.f_favail "inodesused": stats.f_files - stats.f_favail
} }
info["diskpctused"] = round(info["diskused"]/info["disksize"] if info["disksize"] > 0 else 0, 2) info["diskpctused"] = round(info["diskused"] / info["disksize"] if info["disksize"] > 0 else 0, 2)
info["diskpctfree"] = round(info["diskfree"]/info["disksize"] if info["disksize"] > 0 else 0, 2) info["diskpctfree"] = round(info["diskfree"] / info["disksize"] if info["disksize"] > 0 else 0, 2)
info["inodesused_pct"] = round(info["inodesused"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 2) info["inodesused_pct"] = round(info["inodesused"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 2)
info["inodesfree_pct"] = round(info["inodesfree"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 2) info["inodesfree_pct"] = round(info["inodesfree"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 2)
yield info yield info
mapping = { mapping = {
"diskspace": { "diskspace": {
"properties": { "properties": {
@ -45,8 +47,8 @@ mapping = {
"type": "string" "type": "string"
}, },
"fs_raw": { "fs_raw": {
"type" : "string", "type": "string",
"index" : "not_analyzed" "index": "not_analyzed"
}, },
"inodesmax": { "inodesmax": {
"type": "long" "type": "long"
@ -67,6 +69,7 @@ mapping = {
} }
} }
if __name__ == '__main__': if __name__ == '__main__':
for item in diskspace(filesystems=["/", "/dev"]): for item in diskspace(filesystems=["/", "/dev"]):
print(item) print(item)

View File

@ -4,9 +4,10 @@ def load():
yield { yield {
"load_1m": m1, "load_1m": m1,
"load_5m": m5, "load_5m": m5,
"load_15m":m15 "load_15m": m15
} }
mapping = { mapping = {
"load": { "load": {
"properties": { "properties": {
@ -23,6 +24,7 @@ mapping = {
} }
} }
if __name__ == '__main__': if __name__ == '__main__':
for item in load(): for item in load():
print(item) print(item)

View File

@ -3,65 +3,72 @@ import re
memline_pattern = re.compile(r'^(?P<key>[^\\:]+)\:\s+(?P<value>[0-9]+)(\s(?P<unit>[a-zA-Z]+))?') memline_pattern = re.compile(r'^(?P<key>[^\\:]+)\:\s+(?P<value>[0-9]+)(\s(?P<unit>[a-zA-Z]+))?')
computed_fields = { computed_fields = {
"mempctused": lambda items: round((items["memtotal"]-items["memfree"])/items["memtotal"], 2), "mempctused": lambda items: round((items["memtotal"] - items["memfree"]) / items["memtotal"], 2),
"mempctfree": lambda items: 1-round((items["memtotal"]-items["memfree"])/items["memtotal"], 2), "mempctfree": lambda items: 1 - round((items["memtotal"] - items["memfree"]) / items["memtotal"], 2),
"mempctused_nocache": lambda items: round((items["memtotal"]-items["memfree"]-items["cached"])/items["memtotal"], 2), "mempctused_nocache": lambda items: round((items["memtotal"] - items["memfree"] - items["cached"]) /
"mempctfree_nocache": lambda items: 1-round((items["memtotal"]-items["memfree"]-items["cached"])/items["memtotal"], 2), items["memtotal"], 2),
"swappctused": lambda items: round((items["swaptotal"]-items["swapfree"])/items["swaptotal"] if items["swaptotal"] > 0 else 0, 2), "mempctfree_nocache": lambda items: 1 - round((items["memtotal"] - items["memfree"] - items["cached"]) /
"swappctfree": lambda items: 1-round((items["swaptotal"]-items["swapfree"])/items["swaptotal"] if items["swaptotal"] > 0 else 0, 2) items["memtotal"], 2),
"swappctused": lambda items: round((items["swaptotal"] - items["swapfree"]) /
items["swaptotal"] if items["swaptotal"] > 0 else 0, 2),
"swappctfree": lambda items: 1 - round((items["swaptotal"] - items["swapfree"]) /
items["swaptotal"] if items["swaptotal"] > 0 else 0, 2)
} }
def meminfo(whitelist=[]): def meminfo(whitelist=[]):
if not whitelist: if not whitelist:
whitelist = ["swaptotal", "swapfree", "swapcached", whitelist = ["swaptotal", "swapfree", "swapcached",
"memtotal", "memfree", "cached", "memtotal", "memfree", "cached",
"active", "inactive", ] "active", "inactive", ]
result = {} result = {}
with open("/proc/meminfo", "r") as f: with open("/proc/meminfo", "r") as f:
for line in f.read().strip().split("\n"): for line in f.read().strip().split("\n"):
matches = memline_pattern.match(line) matches = memline_pattern.match(line)
value = int(matches.group("value")) value = int(matches.group("value"))
unit = matches.group("unit") unit = matches.group("unit")
if unit: if unit:
if unit == "kB": if unit == "kB":
value*=1024 value *= 1024
else: else:
raise Exception("Unknown unit") raise Exception("Unknown unit")
name = ''.join(c for c in matches.group("key").lower() if 96<ord(c)<123) name = ''.join(c for c in matches.group("key").lower() if 96 < ord(c) < 123)
if name in whitelist: if name in whitelist:
result[name] = value result[name] = value
for key in computed_fields: for key in computed_fields:
result[key] = computed_fields[key](result) result[key] = computed_fields[key](result)
yield result yield result
mapping = { mapping = {
"meminfo": { "meminfo": {
"properties": { "properties": {
"swaptotal": { "type": "long" }, "swaptotal": {"type": "long"},
"swapfree": { "type": "long" }, "swapfree": {"type": "long"},
"swapcached": { "type": "long" }, "swapcached": {"type": "long"},
"memtotal": { "type": "long" }, "memtotal": {"type": "long"},
"memfree": { "type": "long" }, "memfree": {"type": "long"},
"memavailable": { "type": "long" }, "memavailable": {"type": "long"},
"cached": { "type": "long" }, "cached": {"type": "long"},
"active": { "type": "long" }, "active": {"type": "long"},
"inactive": { "type": "long" }, "inactive": {"type": "long"},
"mempctused": { "type": "double" }, "mempctused": {"type": "double"},
"mempctfree": { "type": "double" }, "mempctfree": {"type": "double"},
"mempctused_nocache": { "type": "double" }, "mempctused_nocache": {"type": "double"},
"mempctfree_nocache": { "type": "double" }, "mempctfree_nocache": {"type": "double"},
"swappctused": { "type": "double" }, "swappctused": {"type": "double"},
"swappctfree": { "type": "double" } "swappctfree": {"type": "double"}
} }
} }
} }
if __name__ == '__main__': if __name__ == '__main__':
for item in meminfo(): for item in meminfo():
print(item) print(item)

View File

@ -1,11 +1,12 @@
from glob import glob from glob import glob
import re import re
KTHREADD_PID = 2 KTHREADD_PID = 2
PAT_REMOVE_PROC_SPACES = re.compile(r'(\([^\)]+\))') PAT_REMOVE_PROC_SPACES = re.compile(r'(\([^\)]+\))')
def procs(): def procs():
# Get uid->name mapping # Get uid->name mapping
users = {} users = {}
with open('/etc/passwd', 'r') as passwd: with open('/etc/passwd', 'r') as passwd:
@ -13,9 +14,9 @@ def procs():
line = passwd.readline() line = passwd.readline()
if not line: if not line:
break break
uname,x,uid,gid,opts,home,shell = line.split(":") uname, _, uid, gid, opts, home, shell = line.split(":")
users[int(uid)]=uname users[int(uid)] = uname
# Get gid->groupname mapping # Get gid->groupname mapping
groups = {} groups = {}
with open('/etc/group', 'r') as group: with open('/etc/group', 'r') as group:
@ -23,13 +24,13 @@ def procs():
line = group.readline() line = group.readline()
if not line: if not line:
break break
gname,x,gid,y = line.split(":") gname, _, gid, y = line.split(":")
groups[int(gid)]=gname groups[int(gid)] = gname
num_procs = 0 num_procs = 0
num_threads = 0 num_threads = 0
num_kthreads = 0 num_kthreads = 0
for f in glob('/proc/[0-9]*/stat'): for f in glob('/proc/[0-9]*/stat'):
try: try:
with open(f, "r") as statfile: with open(f, "r") as statfile:
@ -38,21 +39,21 @@ def procs():
# Fix spaces in process names # Fix spaces in process names
stat = PAT_REMOVE_PROC_SPACES.sub("PROCNAME", stat) stat = PAT_REMOVE_PROC_SPACES.sub("PROCNAME", stat)
stat = stat.split(" ") stat = stat.split(" ")
proc_id = int(stat[0])
proc_parent = int(stat[3]) proc_parent = int(stat[3])
if proc_parent == KTHREADD_PID: if proc_parent == KTHREADD_PID:
num_kthreads+=1 num_kthreads += 1
else: else:
num_procs+=1 num_procs += 1
num_threads += int(stat[19]) num_threads += int(stat[19])
except Exception as e: except Exception as e:
print(e) print(e)
print("Failed to open %s" % f) print("Failed to open %s" % f)
yield {"procs": num_procs, "threads":num_threads, "kthreads": num_kthreads} yield {"procs": num_procs, "threads": num_threads, "kthreads": num_kthreads}
mapping = { mapping = {
"procs": { "procs": {
@ -70,6 +71,7 @@ mapping = {
} }
} }
if __name__ == '__main__': if __name__ == '__main__':
for item in procs(): for item in procs():
print(item) print(item)

View File

@ -1,6 +1,7 @@
def uptime(): def uptime():
with open("/proc/uptime", "r") as f: with open("/proc/uptime", "r") as f:
yield {"uptime":int(float(f.read().split(" ")[0]))} yield {"uptime": int(float(f.read().split(" ")[0]))}
mapping = { mapping = {
"uptime": { "uptime": {
@ -12,6 +13,7 @@ mapping = {
} }
} }
if __name__ == '__main__': if __name__ == '__main__':
for item in uptime(): for item in uptime():
print(item) print(item)