Filippo Giunchedi has uploaded a new change for review.

  https://gerrit.wikimedia.org/r/257860

Change subject: prometheus: add graphite_exporter support
......................................................................

prometheus: add graphite_exporter support

graphite_exporter accepts as input graphite line protocol metrics and converts
those to prometheus format, according to a mapping file.

Two configs are provided to target labs and production. In particular
only essential machine metrics from diamond are considered. This should
provide for an initial but realistic evaluation.

The mapping file tries to mimick metrics exported by
https://github.com/prometheus/node_exporter as much as possible.

Bug: T92813
Change-Id: Ibe558acfc0594e394e26464c9b4562e434876a4d
---
A modules/prometheus/files/graphite_exporter.conf
A modules/prometheus/files/graphite_exporter_labs.conf
A modules/prometheus/manifests/graphite_exporter.pp
3 files changed, 334 insertions(+), 0 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/operations/puppet 
refs/changes/60/257860/1

diff --git a/modules/prometheus/files/graphite_exporter.conf 
b/modules/prometheus/files/graphite_exporter.conf
new file mode 100644
index 0000000..cf11d9d
--- /dev/null
+++ b/modules/prometheus/files/graphite_exporter.conf
@@ -0,0 +1,136 @@
+servers.*.cpu.total.*
+name="graphite_servers_cpu_percent"
+mode="$2"
+instance="$1"
+
+servers.*.memory.*
+name="graphite_servers_memory_${3}"
+instance="$1"
+
+servers.*.diskspace.*.inodes_free
+name="graphite_servers_filesystem_files_free"
+instance="$1"
+mountpoint="$2"
+
+servers.*.diskspace.*.inodes_used
+name="graphite_servers_filesystem_files_used"
+instance="$1"
+mountpoint="$2"
+
+servers.*.diskspace.*.byte_free
+name="graphite_servers_filesystem_free"
+instance="$1"
+mountpoint="$2"
+
+servers.*.diskspace.*.byte_used
+name="graphite_servers_filesystem_used"
+instance="$1"
+mountpoint="$2"
+
+servers.*.diskspace.*.byte_avail
+name="graphite_servers_filesystem_avail"
+instance="$1"
+mountpoint="$2"
+
+servers.*.loadavg.01
+name="graphite_servers_load1"
+instance="$1"
+
+servers.*.loadavg.05
+name="graphite_servers_load5"
+instance="$1"
+
+servers.*.loadavg.15
+name="graphite_servers_load15"
+instance="$1"
+
+servers.*.loadavg.processes_running
+name="graphite_servers_procs_running"
+instance="$1"
+
+
+
+
+servers.*.iostat.*.reads_per_second
+name="graphite_servers_disk_reads_per_second"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.writes_per_second
+name="graphite_servers_disk_writes_per_second"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.read_byte_per_second
+name="graphite_servers_disk_read_byte_per_second"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.write_byte_per_second
+name="graphite_servers_disk_write_byte_per_second"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.iops
+name="graphite_servers_disk_io_per_second"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.used_percent
+name="graphite_servers_disk_io_used_percent"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.service_time
+name="graphite_servers_disk_service_time_milliseconds"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.average_request_size_bytes
+name="graphite_servers_disk_average_request_size_bytes"
+instance="$1"
+device="$2"
+
+servers.*.iostat.*.average_queue_length
+name="graphite_servers_disk_average_queue_length"
+instance="$1"
+device="$2"
+
+servers.*.network.*.rx_byte
+name="graphite_servers_network_receive_bytes_per_second"
+instance="$1"
+device="$2"
+
+servers.*.network.*.tx_byte
+name="graphite_servers_network_transmit_bytes_per_second"
+instance="$1"
+device="$2"
+
+servers.*.network.*.rx_drop
+name="graphite_servers_network_receive_drop_per_second"
+instance="$1"
+device="$2"
+
+servers.*.network.*.tx_drop
+name="graphite_servers_network_transmit_drop_per_second"
+instance="$1"
+device="$2"
+
+servers.*.network.*.rx_errors
+name="graphite_servers_network_receive_errs_per_second"
+instance="$1"
+device="$2"
+
+servers.*.network.*.tx_errors
+name="graphite_servers_network_transmit_errs_per_second"
+instance="$1"
+device="$2"
+
+servers.*.network.*.rx_packets
+name="graphite_servers_network_receive_packets_per_second"
+instance="$1"
+device="$2"
+
+servers.*.udp.*
+name="graphite_servers_netstat_Udp_${2}_per_minute"
+instance="$1"
diff --git a/modules/prometheus/files/graphite_exporter_labs.conf 
b/modules/prometheus/files/graphite_exporter_labs.conf
new file mode 100644
index 0000000..e4c283c
--- /dev/null
+++ b/modules/prometheus/files/graphite_exporter_labs.conf
@@ -0,0 +1,164 @@
+*.*.cpu.total.*
+name="graphite_servers_cpu_percent"
+mode="$3"
+instance="$2"
+job="$1"
+
+*.*.memory.*
+name="graphite_servers_memory_${3}"
+job="$1"
+instance="$2"
+
+*.*.diskspace.*.inodes_free
+name="graphite_servers_filesystem_files_free"
+job="$1"
+instance="$2"
+mountpoint="$3"
+
+*.*.diskspace.*.inodes_used
+name="graphite_servers_filesystem_files_used"
+job="$1"
+instance="$2"
+mountpoint="$3"
+
+*.*.diskspace.*.byte_free
+name="graphite_servers_filesystem_free"
+job="$1"
+instance="$2"
+mountpoint="$3"
+
+*.*.diskspace.*.byte_used
+name="graphite_servers_filesystem_used"
+job="$1"
+instance="$2"
+mountpoint="$3"
+
+*.*.diskspace.*.byte_avail
+name="graphite_servers_filesystem_avail"
+job="$1"
+instance="$2"
+mountpoint="$3"
+
+*.*.loadavg.01
+name="graphite_servers_load1"
+job="$1"
+instance="$2"
+
+*.*.loadavg.05
+name="graphite_servers_load5"
+job="$1"
+instance="$2"
+
+*.*.loadavg.15
+name="graphite_servers_load15"
+job="$1"
+instance="$2"
+
+*.*.loadavg.processes_running
+name="graphite_servers_procs_running"
+job="$1"
+instance="$2"
+
+
+
+
+*.*.iostat.*.reads_per_second
+name="graphite_servers_disk_reads_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.writes_per_second
+name="graphite_servers_disk_writes_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.read_byte_per_second
+name="graphite_servers_disk_read_byte_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.write_byte_per_second
+name="graphite_servers_disk_write_byte_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.iops
+name="graphite_servers_disk_io_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.used_percent
+name="graphite_servers_disk_io_used_percent"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.service_time
+name="graphite_servers_disk_service_time_milliseconds"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.average_request_size_bytes
+name="graphite_servers_disk_average_request_size_bytes"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.iostat.*.average_queue_length
+name="graphite_servers_disk_average_queue_length"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.rx_byte
+name="graphite_servers_network_receive_bytes_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.tx_byte
+name="graphite_servers_network_transmit_bytes_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.rx_drop
+name="graphite_servers_network_receive_drop_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.tx_drop
+name="graphite_servers_network_transmit_drop_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.rx_errors
+name="graphite_servers_network_receive_errs_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.tx_errors
+name="graphite_servers_network_transmit_errs_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.network.*.rx_packets
+name="graphite_servers_network_receive_packets_per_second"
+job="$1"
+instance="$2"
+device="$3"
+
+*.*.udp.*
+name="graphite_servers_netstat_Udp_${3}_per_minute"
+job="$1"
+instance="$2"
diff --git a/modules/prometheus/manifests/graphite_exporter.pp 
b/modules/prometheus/manifests/graphite_exporter.pp
new file mode 100644
index 0000000..4b6387e
--- /dev/null
+++ b/modules/prometheus/manifests/graphite_exporter.pp
@@ -0,0 +1,34 @@
+class prometheus::graphite_exporter (
+    $config_file = 'graphite_exporter.conf',
+) {
+    require_package('prometheus-graphite-exporter')
+
+    file { '/etc/prometheus':
+        ensure => directory,
+        mode   => '0555',
+        owner  => 'root',
+        group  => 'root',
+    }
+
+    file { '/etc/prometheus/graphite-exporter.conf':
+        ensure => present,
+        source => "puppet:///modules/${module_name}/${config_file}",
+        mode   => '0444',
+        owner  => 'root',
+        group  => 'root',
+        notify  => Service['prometheus-graphite-exporter'],
+    }
+
+    file { '/etc/default/prometheus-graphite-exporter':
+        ensure  => present,
+        content => 
"ARGS='-graphite.mapping-config=/etc/prometheus/graphite-exporter.conf 
-graphite.mapping-strict-match'\n",
+        mode    => '0444',
+        owner   => 'root',
+        group   => 'root',
+        notify  => Service['prometheus-graphite-exporter'],
+    }
+
+    base::service_unit { 'prometheus-graphite-exporter':
+        ensure => present,
+    }
+}

-- 
To view, visit https://gerrit.wikimedia.org/r/257860
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ibe558acfc0594e394e26464c9b4562e434876a4d
Gerrit-PatchSet: 1
Gerrit-Project: operations/puppet
Gerrit-Branch: production
Gerrit-Owner: Filippo Giunchedi <[email protected]>

_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to