diff --git a/README.md b/README.md index f1a3d59..2ab44cd 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ python-esmonitor ================ -**Modular monitoring tool for logging data to elasticsearch** +**Modular monitoring tool for logging data to various timeseries databases** Quick start ----------- @@ -8,21 +8,37 @@ Quick start * Install: `pip3 install -r requirements.txt ; python3 setup.py install` * Configure: `cd examples ; vim config.json` * Run: `pymonitor -c config.json` - -Requires the [python elasticsearch module](https://github.com/elastic/elasticsearch-py). Configuring ----------- -The config file should contain a json object with the keys `backend` and `monitors`. Backend contains only one key, `url`. This should be the full url to elasticsearch: +The config file should contain a json object with the keys `backend` and `monitors`. Backend contains a key, `type`, to +select what database backend to use. The remaining keys are specific to that database. + +For Elasticsearch 6.x, this should be the full url to elasticsearch: ``` { "backend": { + "type": "elasticsearch" "url": "http://192.168.1.210:8297/" }, ``` +Or for InfluxDB 6.x, several fields describing the connection: + +``` +{ + "backend": { + "type": "influxdb", + "host": "10.0.0.10", + "port": "8086", + "user": "root", + "password": "root", + "database": "monitoring" + }, +``` + The `monitors` key contains a list of monitor modules to run: ``` @@ -42,10 +58,13 @@ The `monitors` key contains a list of monitor modules to run: } ``` -The name of the module to run for a monitor is `type`. The `freq` option is the frequency, in seconds, that this monitor will check and report data. If the monitor being used takes any options, they can be passed as a object with the `args` option, +The name of the module to run for a monitor is `type`. The `freq` option is the frequency, in seconds, that this monitor +will check and report data. If the monitor being used takes any options, they can be passed as a object with the +`args` option, A yaml config can also be used. The data structure must be identical and the filename MUST end in `.yml`. + Developing Modules ------------------ @@ -53,6 +72,9 @@ Developing Modules Add a new python file in *pymonitor/monitors/*, such as `uptime.py`. Add a function named the same as the file, accepting any needed params as keyword args: ``` +from pymonitor import Metric + + def uptime(): ``` Add your code to retrieve any metrics: @@ -60,29 +82,38 @@ Add your code to retrieve any metrics: with open("/proc/uptime", "r") as f: uptime_stats = {"uptime":int(float(f.read().split(" ")[0]))} ``` -This function must yield one or more dictionaries. This dictonary will be sent as a document to elasticsearch, with a `_type` matching the name if this module ("uptime"). System hostname, ip address, and timestamp will be added automatically. + +This function must yield one or more Metric objects. This object will be sent to the database backend, with a `type` +field matching the name if this module ("uptime"). System hostname, ip address, and timestamp will be +added automatically. + ``` - yield uptime_stats + yield Metric(uptime_stats) ``` -The module file must set a variable named `mapping`. This contains data mapping information sent to elasticsearch so our data is structured correctly. This value is used verbatim, so any other elasticsearch options for this type can be specified here. + +The module file must set a variable named `mapping`. For backends that need it, such as Elasticsearch, this contains +data mapping information so our data is structured correctly. This value is used verbatim, so any other Elasticsearch +options for this type can be specified here. + ``` mapping = { "uptime": { - "properties": { - "uptime": { - "type": "integer" - } - } + "type": "integer" } } + ``` + Finally, it's often convenient to test your monitor by adding some code so the script can be ran individually: + ``` if __name__ == '__main__': for item in uptime(): print(item["uptime"]) ``` + Since this module is named 'uptime' and takes no args, the following added to the monitors array in `config.json` would activate it: + ``` { "type":"uptime", @@ -90,16 +121,10 @@ Since this module is named 'uptime' and takes no args, the following added to th "args":{} } ``` + Roadmap ------- * Complete API docs * More builtin monitors * Local logging in case ES can't be reached - -Changelog ---------- - -*0.1.0:* renamed fields with names containing dots for elasticsearch 2.0 compatibility -*0.0.1:* initial release! - diff --git a/examples/config.yml b/examples/config.elasticsearch.yml similarity index 59% rename from examples/config.yml rename to examples/config.elasticsearch.yml index 55788fa..3c086de 100644 --- a/examples/config.yml +++ b/examples/config.elasticsearch.yml @@ -1,25 +1,23 @@ backend: - url: 'http://10.0.3.15:9200/' + type: elasticsearch + url: 'http://10.0.0.10:9200/' monitors: - type: uptime - freq: '30' + freq: 30 args: {} - type: load - freq: '30' + freq: 30 args: {} - type: meminfo - freq: '30' - args: {} -- type: procs - freq: '30' + freq: 30 args: {} - type: diskspace - freq: '30' + freq: 30 args: filesystems: - '/' - '/var' - '/home' - type: diskio - freq: '30' + freq: 30 args: {} diff --git a/examples/config.influxdb.yml b/examples/config.influxdb.yml new file mode 100644 index 0000000..51fb005 --- /dev/null +++ b/examples/config.influxdb.yml @@ -0,0 +1,27 @@ +backend: + type: influxdb + host: 10.0.0.10 + port: 8086 + user: root + password: root + database: monitoring +monitors: +- type: uptime + freq: 30 + args: {} +- type: load + freq: 30 + args: {} +- type: meminfo + freq: 30 + args: {} +- type: diskspace + freq: 30 + args: + filesystems: + - '/' + - '/var' + - '/home' +- type: diskio + freq: 30 + args: {} diff --git a/examples/config.json b/examples/config.json deleted file mode 100644 index d77a44f..0000000 --- a/examples/config.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "backend": { - "url": "http://10.0.3.15:9200/" - }, - "monitors": [ - { - "type":"uptime", - "freq":"30", - "args":{} - }, - { - "type":"load", - "freq":"30", - "args":{} - }, - { - "type":"meminfo", - "freq":"30", - "args":{} - }, - { - "type":"procs", - "freq":"30", - "args":{} - }, - { - "type":"diskspace", - "freq":"30", - "args": { - "filesystems": [ - "/", - "/var", - "/home" - ] - } - }, - { - "type":"diskio", - "freq":"30", - "args":{} - } - ] -} diff --git a/pymonitor/__init__.py b/pymonitor/__init__.py index d3ec452..18e801d 100644 --- a/pymonitor/__init__.py +++ b/pymonitor/__init__.py @@ -1 +1,54 @@ -__version__ = "0.2.0" +__version__ = "0.4.0" +from itertools import chain +import logging +from pymonitor.builtins import sysinfo + + +class Backend(object): + """ + Base class for data storage backends + """ + def __init__(self, master, conf): + self.master = master + self.conf = conf + self.sysinfo = {} + self.logger = logging.getLogger("monitordaemon.backend") + self.update_sys_info() + + def update_sys_info(self): + """ + Fetch generic system info that is sent with every piece of monitoring data + """ + self.sysinfo["hostname"] = sysinfo.hostname() + self.sysinfo["ipaddr"] = sysinfo.ipaddr() + + def connect(self): + """ + Connect to the backend and do any prep work + """ + raise NotImplementedError() + + def add_data(self, metric): + """ + Accept a Metric() object and send it off to the backend + """ + raise NotImplementedError() + + +class Metric(object): + """ + Wrapper for holding metrics gathered from the system. All monitor modules yield multiple of these objects. + """ + def __init__(self, values, tags=None): + """ + :param values: dict of name->value metric data + :param tags: dict of key->value tags associated with the metric data + """ + self.values = values + self.tags = tags or {} + + def __repr__(self): + fields = [] + for k, v in chain(self.values.items(), self.tags.items()): + fields.append("{}={}".format(k, v)) + return "<{}{{{}}}>".format(self.__class__.__name__, ','.join(fields)) diff --git a/pymonitor/daemon.py b/pymonitor/daemon.py index a1e53b0..518954f 100755 --- a/pymonitor/daemon.py +++ b/pymonitor/daemon.py @@ -1,15 +1,14 @@ #!/usr/bin/env python3 from threading import Thread -from elasticsearch import Elasticsearch from time import time, sleep -from pymonitor.builtins import sysinfo import traceback -import datetime import logging import json import sys import os +from pymonitor.elasticsearch import ESBackend +from pymonitor.influxdb import InfluxBackend class MonitorDaemon(Thread): @@ -17,7 +16,8 @@ class MonitorDaemon(Thread): Thread.__init__(self) self.config = config self.threads = [] - self.backend = Backend(self.config["backend"]["url"]) + self.backend = {"elasticsearch": ESBackend, + "influxdb": InfluxBackend}[self.config["backend"]["type"]](self, self.config["backend"]) def run(self): """ @@ -29,13 +29,13 @@ class MonitorDaemon(Thread): sys.path.append(checkerPath) logger.debug("path %s" % checkerPath) - # Create/start all monitoring threads + # Create all monitoring threads logger.debug("creating monitor threads") for instance in self.config["monitors"]: monitor_thread = MonitorThread(instance, self.backend) self.threads.append(monitor_thread) - self.backend.mapping.update(monitor_thread.mapping) + # Setup backend self.backend.connect() logger.debug("starting monitor threads") @@ -57,92 +57,6 @@ class MonitorDaemon(Thread): monitor_thread.shutdown() -class Backend: - def __init__(self, es_url): - """ - Init elasticsearch client - """ - self.es_url = es_url - self.mapping = {} - self.logger = logging.getLogger("monitordaemon.backend") - - self.sysinfo = {} - self.update_sys_info() - self.logger.debug("running on %(hostname)s (%(ipaddr)s)" % self.sysinfo) - - def connect(self): - self.logger.debug("final mapping %s" % self.mapping) - self.logger.debug("connecting to backend at %s" % self.es_url) - self.es = Elasticsearch([self.es_url]) - self.logger.debug("connected to backend") - self.current_index = "" - self.check_before_entry() - - def update_sys_info(self): - """ - Fetch generic system info that is sent with every piece of monitoring data - """ - self.sysinfo["hostname"] = sysinfo.hostname() - self.sysinfo["hostname_raw"] = self.sysinfo["hostname"] - self.sysinfo["ipaddr"] = sysinfo.ipaddr() - - def get_index_name(self): - """ - Return name of current index such as 'monitor-2015.12.05' - """ - return "monitor-%s" % datetime.datetime.now().strftime("%Y.%m.%d") - - def create_index(self, indexName): - """ - Check if current index exists, and if not, create it - """ - if not self.es.indices.exists(index=indexName): - mapping = { - "mappings": { - "_default_": { - "properties": { - "ipaddr": { - "type": "ip" - }, - "hostname": { - "type": "string" - }, - "hostname_raw": { - "type": "string", - "index": "not_analyzed" - } - } - } - } - } - mapping["mappings"].update(self.mapping) - self.logger.debug("creating index %s with mapping %s" % (indexName, json.dumps(mapping, indent=4))) - self.es.indices.create(index=indexName, ignore=400, body=mapping) # ignore already exists error - self.current_index = indexName - - def check_before_entry(self): - """ - Called before adding any data to ES. Checks if a new index should be created due to date change - """ - indexName = self.get_index_name() - if indexName != self.current_index: - self.create_index(indexName) - - def add_data(self, data_type, data): - """ - Submit a piece of monitoring data - """ - self.check_before_entry() - - doc = self.sysinfo.copy() - doc.update(data) - doc["@timestamp"] = datetime.datetime.utcnow().isoformat() - - self.logger.debug("logging type %s: %s" % (data_type, doc)) - res = self.es.index(index=self.current_index, doc_type=data_type, body=doc) - self.logger.debug("%s created %s" % (data_type, res["_id"])) - - class MonitorThread(Thread): def __init__(self, config, backend): """ @@ -155,16 +69,10 @@ class MonitorThread(Thread): self.logger.debug("initing worker thread with config %s" % self.config) self.logger.debug("importing %s" % self.config["type"]) - self.checker_func = getattr(__import__(self.config["type"]), self.config["type"]) + self.imported = __import__(self.config["type"]) + self.checker_func = getattr(self.imported, self.config["type"]) self.logger.debug("checker func %s" % self.checker_func) - self.mapping = {} - # try: - self.mapping.update(__import__(self.config["type"]).mapping) - # except: - # pass - self.logger.debug("mapping %s" % self.mapping) - self.alive = True self.delay = int(self.config["freq"]) self.lastRun = 0 @@ -187,12 +95,13 @@ class MonitorThread(Thread): def execute(self, args): """ - Run the loaded checker function + Run the loaded checker function. Pass each Metric object yielded to the backend. """ before = time() for result in self.checker_func(**args): + result.tags.update(type=self.config["type"]) self.logger.debug("result: %s" % (result,)) - self.backend.add_data(self.config["type"], result) + self.backend.add_data(result) duration = time() - before self.logger.info("runtime: %.3f" % duration) @@ -200,17 +109,17 @@ class MonitorThread(Thread): """ Tell thread to exit """ - self.logger.debug("cancelling scheduler") + self.logger.debug("canceling scheduler") self.alive = False -def run_cli(): +def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-c", "--config", action="store", type="string", dest="config", help="Path to config file") parser.add_option("-l", "--logging", action="store", dest="logging", help="Logging level", default="INFO", - choices=['WARN', 'CRITICAL', 'WARNING', 'INFO', 'ERROR', 'DEBUG']) + choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']) (options, args) = parser.parse_args() @@ -225,9 +134,9 @@ def run_cli(): sys.exit() with open(options.config, "r") as c: - if options.config[-5:] == '.json': + if options.config.endswith('.json'): conf = json.load(c) - elif options.config[-4:] == '.yml': + elif options.config.endswith('.yml'): from yaml import load as yaml_load conf = yaml_load(c) else: @@ -242,7 +151,3 @@ def run_cli(): except KeyboardInterrupt: print("") daemon.shutdown() - - -if __name__ == '__main__': - run_cli() diff --git a/pymonitor/elasticsearch.py b/pymonitor/elasticsearch.py new file mode 100644 index 0000000..8ffd5d4 --- /dev/null +++ b/pymonitor/elasticsearch.py @@ -0,0 +1,84 @@ +from pymonitor import Backend +import datetime +import json + + +class ESBackend(Backend): + def __init__(self, master, conf): + """ + Init elasticsearch client + """ + super().__init__(master, conf) + self.mapping = {} + self.current_index = None + + def connect(self): + self.logger.debug("connecting to elasticsearch at %s" % self.conf["url"]) + from elasticsearch import Elasticsearch + self.es = Elasticsearch([self.conf["url"]]) + self.logger.debug("connected to backend") + + for monitor_thread in self.master.threads: + self.mapping.update(monitor_thread.imported.mapping) + self.logger.debug("final mapping: ", self.mapping) + self.create_mapping_template() + + self.check_index() + + def get_index_name(self): + """ + Return name of current index such as 'monitor-2015.12.05' + """ + return "monitor-%s" % datetime.datetime.now().strftime("%Y.%m.%d") + + def check_index(self): + """ + Called before adding any data to ES. Checks if a new index should be created due to date change + """ + indexName = self.get_index_name() + if indexName != self.current_index: + self.create_index(indexName) + + def create_index(self, indexName): + """ + Check if current index exists, and if not, create it + """ + if not self.es.indices.exists(index=indexName): + self.es.indices.create(index=indexName, ignore=400) # ignore already exists error + self.current_index = indexName + + def create_mapping_template(self): + default_fields = {"ipaddr": {"type": "ip"}, # TODO i dont like how these default fields are handled in general + "hostname": {"type": "text"}, + "hostname_raw": {"type": "keyword"}, + "@timestamp": {"type": "date"}} #"field": "@timestamp" + + fields = dict(**self.mapping) + fields.update(**default_fields) + template = {"index_patterns": ["monitor-*"], + "settings": {"number_of_shards": 1}, # TODO shard info from config file + "mappings": {"_default_": {"properties": fields}}} + self.logger.debug("creating template with body %s", json.dumps(template, indent=4)) + self.es.indices.put_template(name="monitor", body=template) + + def add_data(self, metric): + """ + Submit a piece of monitoring data + """ + self.check_index() + + metric.tags.update(**self.sysinfo) + metric.values["@timestamp"] = datetime.datetime.utcnow().isoformat() + + metric_dict = {} + metric_dict.update(metric.values) + metric_dict.update(metric.tags) + + # We'll likely group by tags on the eventual frontend, and under elasticsearch this works best if the entire + # field is handled as a single keyword. Duplicate all tags into ${NAME}_raw fields, expected to be not analyzed + for k, v in metric.tags.items(): + metric_dict["{}_raw".format(k)] = v + + self.logger.debug("logging type %s: %s" % (metric.tags["type"], metric)) + res = self.es.index(index=self.current_index, doc_type="monitor_data", body=metric_dict) + self.logger.debug("%s created %s" % (metric.tags["type"], res["_id"])) diff --git a/pymonitor/influxdb.py b/pymonitor/influxdb.py new file mode 100644 index 0000000..69a0fcb --- /dev/null +++ b/pymonitor/influxdb.py @@ -0,0 +1,33 @@ +from pymonitor import Backend +from influxdb import InfluxDBClient +import datetime + + +class InfluxBackend(Backend): + def __init__(self, master, conf): + super().__init__(master, conf) + self.client = None + + def connect(self): + """ + Connect to the backend and do any prep work + """ + self.client = InfluxDBClient(self.conf["host"], self.conf["port"], self.conf["user"], self.conf["password"]) # DBNAME + dbname = self.conf.get("database", "monitoring") + self.client.create_database(dbname) + self.client.switch_database(dbname) + + + def add_data(self, metric): + """ + Accept a Metric() object and send it off to the backend + """ + metric.tags.update(**self.sysinfo) + body = [{ + "measurement": metric.tags["type"], + "tags": metric.tags, + "time": datetime.datetime.utcnow().isoformat(), + "fields": metric.values + } + ] + self.client.write_points(body) diff --git a/pymonitor/monitors/diskio.py b/pymonitor/monitors/diskio.py index 90c088e..1f805ba 100644 --- a/pymonitor/monitors/diskio.py +++ b/pymonitor/monitors/diskio.py @@ -1,3 +1,4 @@ +from pymonitor import Metric from psutil import disk_io_counters @@ -12,7 +13,6 @@ def diskio(disks=[]): continue stats = { "disk": disk, - "disk_raw": disk, "reads_ps": round(stats.read_count / uptime, 2), "writes_ps": round(stats.write_count / uptime, 2), "read_ps": round(stats.read_bytes / uptime, 2), @@ -21,53 +21,48 @@ def diskio(disks=[]): "writes": stats.write_count, "read": stats.read_bytes, "written": stats.write_bytes, - "read_size": round(stats.read_bytes / stats.read_count, 2) if stats.read_count > 0 else 0, - "write_size": round(stats.write_bytes / stats.write_count, 2) if stats.write_count > 0 else 0 + "read_size": round(stats.read_bytes / stats.read_count, 2) if stats.read_count > 0 else 0.0, + "write_size": round(stats.write_bytes / stats.write_count, 2) if stats.write_count > 0 else 0.0 } - yield(stats) + yield Metric(stats, {"disk": disk}) mapping = { - "diskio": { - "properties": { - "disk": { - "type": "string" - }, - "disk_raw": { - "type": "string", - "index": "not_analyzed" - }, - "reads_ps": { - "type": "double" - }, - "writes_ps": { - "type": "double" - }, - "read_ps": { - "type": "double" - }, - "write_ps": { - "type": "double" - }, - "reads": { - "type": "long" - }, - "writes": { - "type": "long" - }, - "read": { - "type": "long" - }, - "written": { - "type": "long" - }, - "read_size": { - "type": "double" - }, - "write_size": { - "type": "double" - } - } + "disk": { + "type": "text" + }, + "disk_raw": { + "type": "keyword" + }, + "reads_ps": { + "type": "double" + }, + "writes_ps": { + "type": "double" + }, + "read_ps": { + "type": "double" + }, + "write_ps": { + "type": "double" + }, + "reads": { + "type": "long" + }, + "writes": { + "type": "long" + }, + "read": { + "type": "long" + }, + "written": { + "type": "long" + }, + "read_size": { + "type": "double" + }, + "write_size": { + "type": "double" } } diff --git a/pymonitor/monitors/diskspace.py b/pymonitor/monitors/diskspace.py index 3b60cfa..44cfb41 100644 --- a/pymonitor/monitors/diskspace.py +++ b/pymonitor/monitors/diskspace.py @@ -1,3 +1,4 @@ +from pymonitor import Metric from os import statvfs import logging @@ -10,7 +11,7 @@ def diskspace(filesystems=[], discover=True, omit=[]): filesystems param will be ignored. :param omit: list of paths that, if prefix a discovered mountpoint, to not report on """ - filesystems = [f.rstrip("/") for f in filesystems] + filesystems = [f.rstrip("/") if f != "/" else f for f in filesystems] if discover: with open("/proc/mounts") as f: for line in f.readlines(): @@ -26,12 +27,10 @@ def diskspace(filesystems=[], discover=True, omit=[]): try: stats = statvfs(fs) except FileNotFoundError: - logging.warning("filesystem not found: %s", fs) + logging.warning("filesystem not found: %s", repr(fs)) continue info = { - "fs": fs, - "fs_raw": fs, "diskfree": stats.f_bsize * stats.f_bavail, "diskused": (stats.f_blocks - stats.f_bavail) * stats.f_bsize, "disksize": stats.f_bsize * stats.f_blocks, @@ -40,56 +39,51 @@ def diskspace(filesystems=[], discover=True, omit=[]): "inodesused": stats.f_files - stats.f_favail } - info["diskpctused"] = round(info["diskused"] / info["disksize"] if info["disksize"] > 0 else 0, 5) - info["diskpctfree"] = round(info["diskfree"] / info["disksize"] if info["disksize"] > 0 else 0, 5) + info["diskpctused"] = round(info["diskused"] / info["disksize"] if info["disksize"] > 0 else 0.0, 5) + info["diskpctfree"] = round(info["diskfree"] / info["disksize"] if info["disksize"] > 0 else 0.0, 5) - info["inodesused_pct"] = round(info["inodesused"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 5) - info["inodesfree_pct"] = round(info["inodesfree"] / info["inodesmax"] if info["inodesmax"] > 0 else 0, 5) + info["inodesused_pct"] = round(info["inodesused"] / info["inodesmax"] if info["inodesmax"] > 0 else 0.0, 5) + info["inodesfree_pct"] = round(info["inodesfree"] / info["inodesmax"] if info["inodesmax"] > 0 else 0.0, 5) - yield info + yield Metric(info, {"fs": fs}) mapping = { - "diskspace": { - "properties": { - "diskfree": { - "type": "long" - }, - "diskused": { - "type": "long" - }, - "disksize": { - "type": "long" - }, - "diskpctused": { - "type": "double" - }, - "diskpctfree": { - "type": "double" - }, - "fs": { - "type": "string" - }, - "fs_raw": { - "type": "string", - "index": "not_analyzed" - }, - "inodesmax": { - "type": "long" - }, - "inodesfree": { - "type": "long" - }, - "inodesused": { - "type": "long" - }, - "inodesused_pct": { - "type": "double" - }, - "inodesfree_pct": { - "type": "double" - }, - } + "diskfree": { + "type": "long" + }, + "diskused": { + "type": "long" + }, + "disksize": { + "type": "long" + }, + "diskpctused": { + "type": "double" + }, + "diskpctfree": { + "type": "double" + }, + "fs": { + "type": "text" + }, + "fs_raw": { + "type": "keyword" + }, + "inodesmax": { + "type": "long" + }, + "inodesfree": { + "type": "long" + }, + "inodesused": { + "type": "long" + }, + "inodesused_pct": { + "type": "double" + }, + "inodesfree_pct": { + "type": "double" } } diff --git a/pymonitor/monitors/ifstats.py b/pymonitor/monitors/ifstats.py index 14e22d3..521688d 100644 --- a/pymonitor/monitors/ifstats.py +++ b/pymonitor/monitors/ifstats.py @@ -1,3 +1,4 @@ +from pymonitor import Metric from collections import defaultdict from time import time @@ -22,8 +23,7 @@ def ifstats(omit=[]): for i in range(0, len(fields)): fields[i] = int(fields[i]) - record = {"iface": ifname, - "rx_bytes": fields[rx_bytes], + record = {"rx_bytes": fields[rx_bytes], "tx_bytes": fields[tx_bytes], "rx_packets": fields[rx_packets], "tx_packets": fields[tx_packets], @@ -41,18 +41,17 @@ def ifstats(omit=[]): if any([ifname.startswith(i) for i in omit or []]): continue - yield record + yield Metric(record, {"iface": ifname}) mapping = { "ifstats": { "properties": { "iface": { - "type": "string", + "type": "string" }, "iface_raw": { - "type": "string", - "index": "not_analyzed" + "type": "keyword" }, "rx_bytes": { "type": "long" diff --git a/pymonitor/monitors/load.py b/pymonitor/monitors/load.py index c2e4d19..6d1664e 100644 --- a/pymonitor/monitors/load.py +++ b/pymonitor/monitors/load.py @@ -1,26 +1,22 @@ +from pymonitor import Metric + def load(): with open("/proc/loadavg", "r") as f: m1, m5, m15, procs, pid = f.read().strip().split(" ") - yield { - "load_1m": m1, - "load_5m": m5, - "load_15m": m15 - } + yield Metric({"load_1m": float(m1), + "load_5m": float(m5), + "load_15m": float(m15)}) mapping = { - "load": { - "properties": { - "load_15m": { - "type": "double" - }, - "load_5m": { - "type": "double" - }, - "load_1m": { - "type": "double" - } - } + "load_15m": { + "type": "double" + }, + "load_5m": { + "type": "double" + }, + "load_1m": { + "type": "double" } } diff --git a/pymonitor/monitors/meminfo.py b/pymonitor/monitors/meminfo.py index 9ab25fd..69dd58f 100644 --- a/pymonitor/monitors/meminfo.py +++ b/pymonitor/monitors/meminfo.py @@ -1,3 +1,4 @@ +from pymonitor import Metric import re memline_pattern = re.compile(r'^(?P[^\\:]+)\:\s+(?P[0-9]+)(\s(?P[a-zA-Z]+))?') @@ -10,9 +11,9 @@ computed_fields = { "mempctfree_nocache": lambda items: 1 - round((items["memtotal"] - items["memfree"] - items["cached"]) / items["memtotal"], 5), "swappctused": lambda items: round((items["swaptotal"] - items["swapfree"]) / - items["swaptotal"] if items["swaptotal"] > 0 else 0, 5), + items["swaptotal"] if items["swaptotal"] > 0 else 0.0, 5), "swappctfree": lambda items: 1 - round((items["swaptotal"] - items["swapfree"]) / - items["swaptotal"] if items["swaptotal"] > 0 else 0, 5) + items["swaptotal"] if items["swaptotal"] > 0 else 0.0, 5) } @@ -43,29 +44,25 @@ def meminfo(whitelist=[]): for key in computed_fields: result[key] = computed_fields[key](result) - yield result + yield Metric(result) mapping = { - "meminfo": { - "properties": { - "swaptotal": {"type": "long"}, - "swapfree": {"type": "long"}, - "swapcached": {"type": "long"}, - "memtotal": {"type": "long"}, - "memfree": {"type": "long"}, - "memavailable": {"type": "long"}, - "cached": {"type": "long"}, - "active": {"type": "long"}, - "inactive": {"type": "long"}, - "mempctused": {"type": "double"}, - "mempctfree": {"type": "double"}, - "mempctused_nocache": {"type": "double"}, - "mempctfree_nocache": {"type": "double"}, - "swappctused": {"type": "double"}, - "swappctfree": {"type": "double"} - } - } + "swaptotal": {"type": "long"}, + "swapfree": {"type": "long"}, + "swapcached": {"type": "long"}, + "memtotal": {"type": "long"}, + "memfree": {"type": "long"}, + "memavailable": {"type": "long"}, + "cached": {"type": "long"}, + "active": {"type": "long"}, + "inactive": {"type": "long"}, + "mempctused": {"type": "double"}, + "mempctfree": {"type": "double"}, + "mempctused_nocache": {"type": "double"}, + "mempctfree_nocache": {"type": "double"}, + "swappctused": {"type": "double"}, + "swappctfree": {"type": "double"} } diff --git a/pymonitor/monitors/procs.py b/pymonitor/monitors/procs.py index 2f7ab84..bf089a6 100644 --- a/pymonitor/monitors/procs.py +++ b/pymonitor/monitors/procs.py @@ -1,3 +1,4 @@ +from pymonitor import Metric from glob import glob import re @@ -52,7 +53,7 @@ def procs(): print(e) print("Failed to open %s" % f) - yield {"procs": num_procs, "threads": num_threads, "kthreads": num_kthreads} + yield Metric({"procs": num_procs, "threads": num_threads, "kthreads": num_kthreads}) mapping = { diff --git a/pymonitor/monitors/uptime.py b/pymonitor/monitors/uptime.py index c4ff45a..dfb5268 100644 --- a/pymonitor/monitors/uptime.py +++ b/pymonitor/monitors/uptime.py @@ -1,17 +1,12 @@ +from pymonitor import Metric + + def uptime(): with open("/proc/uptime", "r") as f: - yield {"uptime": int(float(f.read().split(" ")[0]))} + yield Metric({"uptime": int(float(f.read().split(" ")[0]))}) -mapping = { - "uptime": { - "properties": { - "uptime": { - "type": "integer" - } - } - } -} +mapping = {"uptime": {"type": "integer"}} if __name__ == '__main__': diff --git a/requirements.txt b/requirements.txt index ad5b278..6d358e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,12 @@ +certifi==2018.8.24 +chardet==3.0.4 elasticsearch==6.3.1 -psutil==3.3.0 -PyYAML==3.11 +idna==2.7 +influxdb==5.2.0 +psutil==5.4.7 +python-dateutil==2.7.3 +pytz==2018.5 +PyYAML==3.13 +requests==2.19.1 +six==1.11.0 urllib3==1.23 diff --git a/setup.py b/setup.py index 49e90bb..d0df51b 100755 --- a/setup.py +++ b/setup.py @@ -9,5 +9,9 @@ setup(name='pymonitor', author='dpedu', author_email='dave@davepedu.com', packages=['pymonitor', 'pymonitor.builtins', 'pymonitor.monitors'], - scripts=['bin/pymonitor'], + entry_points={ + "console_scripts": [ + "pymonitor = pymonitor.daemon:main" + ] + }, zip_safe=False)