Skip to content

Instantly share code, notes, and snippets.

@aneeshkp
Created November 30, 2017 14:48
Show Gist options
  • Save aneeshkp/fa16bcf16286495f304e486c40ce73db to your computer and use it in GitHub Desktop.
Save aneeshkp/fa16bcf16286495f304e486c40ce73db to your computer and use it in GitHub Desktop.
Prometheus test
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- 'prometheus.rules.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
#aputtur's test node
- job_name: 'test_node1'
scrape_interval: 1s
static_configs:
- targets: ['localhost:9300']
- job_name: 'test_node2'
scrape_interval: 1s
static_configs:
- targets: ['localhost:9300']
- job_name: 'test_node3'
scrape_interval: 1s
static_configs:
- targets: ['localhost:9300']
from prometheus_client import start_http_server,CollectorRegistry,Gauge,Metric
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
import json
import requests
import sys
import time
import collections
import random
import threading
import socket
import time
import signal
import sys
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import logging
import datetime
import daiquiri
daiquiri.setup(
level=logging.DEBUG,
outputs=(
daiquiri.output.File('prometheus.log', level=logging.ERROR),
daiquiri.output.TimedRotatingFile(
'everything.log',
level=logging.DEBUG,
interval=datetime.timedelta(hours=8))
)
)
LOG = daiquiri.getLogger(__name__)
class MyCollector(object):
def __init__(self,host):
self.host = host
def collect(self):
index=10
try:
for index in range(index):
data={"fast": random.random(),"slow": random.random()}
metric = Metric("svc_"+str(index)+"_documents_loaded", 'Requests failed', 'gauge')
for k, v in data.items():
metric.add_sample("svc_"+str(index)+"_documentes_loaded",
value=v, labels={'repository': k})
yield metric
except Exception as e:
LOG.error(e)
#for metric in self.metric_name_list:
#metric.set(float(random.random()))
#metric.set_to_current_time() # Set to current unixtime
# yield metric
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def signal_term_handler(signal, frame):
print "closing"
if __name__ == "__main__":
# Create the registry
# Create the thread that gathers the data while we serve it
#thread = threading.Thread(target=collect, args=(registry,))
#thread.start()
signal.signal(signal.SIGTERM, signal_term_handler)
# Set a server to export (expose to prometheus) the data (in a thread)
try:
# We make this to set the registry in the handler
#def handler(*args, **kwargs):
# PrometheusMetricHandler(registry, *args, **kwargs)
#server = HTTPServer(('', 9100))
#server.serve_forever()
server=start_http_server(9300)
# Create the registry
REGISTRY.register(MyCollector("localhost_1"))
while True: time.sleep(1)
except KeyboardInterrupt:
print "Stop"
#thread.join()
finally:
sys.exit(0)
server.shutdown()
server.close()
groups:
- name: test-node
interval: 30s
rules:
- record: job:svc_1_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_1_documents_loaded_count[1s])) by (job)
- record: job:svc_2_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_2_documents_loaded_count[1s])) by (job)
- record: job:svc_3_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_3_documents_loaded_count[1s])) by (job)
- record: job:svc_4_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_4_documents_loaded_count[1s])) by (job)
- record: job:svc_5_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_5_documents_loaded_count[1s])) by (job)
- record: job:svc_6_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_6_documents_loaded_count[1s])) by (job)
- record: job:svc_7_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_7_documents_loaded_count[1s])) by (job)
- record: job:svc_8_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_8_documents_loaded_count[1s])) by (job)
- record: job:svc_9_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_9_documents_loaded_count[1s])) by (job)
- record: job:svc_10_documents_loaded_count:avg_rate1s
expr: avg(rate(svc_10_documents_loaded_count[1s])) by (job)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment