mirror of
https://github.com/Freika/dawarich.git
synced 2026-01-13 02:31:39 -05:00
Merge pull request #422 from Freika/feature/prometheus-monitoring
Expose Prometheus exporter to the outside world
This commit is contained in:
commit
3994b3b2d4
6 changed files with 62 additions and 16 deletions
|
|
@ -1 +1 @@
|
|||
0.16.6
|
||||
0.16.7
|
||||
|
|
|
|||
27
CHANGELOG.md
27
CHANGELOG.md
|
|
@ -5,6 +5,33 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](http://keepachangelog.com/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# 0.16.7 - 2024-11-20
|
||||
|
||||
### Changed
|
||||
|
||||
- Prometheus exporter is now bound to 0.0.0.0 instead of localhost
|
||||
- `PROMETHEUS_EXPORTER_HOST` and `PROMETHEUS_EXPORTER_PORT` env vars were added to the `docker-compose.yml` file to allow you to set the host and port for the Prometheus exporter. They should be added to both `dawarich_app` and `dawarich_sidekiq` services Example:
|
||||
|
||||
```diff
|
||||
dawarich_app:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_app
|
||||
environment:
|
||||
...
|
||||
PROMETHEUS_EXPORTER_ENABLED: "true"
|
||||
+ PROMETHEUS_EXPORTER_HOST: 0.0.0.0
|
||||
+ PROMETHEUS_EXPORTER_PORT: "9394"
|
||||
|
||||
dawarich_sidekiq:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_sidekiq
|
||||
environment:
|
||||
...
|
||||
PROMETHEUS_EXPORTER_ENABLED: "true"
|
||||
+ PROMETHEUS_EXPORTER_HOST: dawarich_app
|
||||
+ PROMETHEUS_EXPORTER_PORT: "9394"
|
||||
```
|
||||
|
||||
# 0.16.6 - 2024-11-20
|
||||
|
||||
### Added
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
prometheus_exporter: bundle exec prometheus_exporter -b 0.0.0.0
|
||||
web: bin/rails server -p 3000 -b 0.0.0.0
|
||||
prometheus_exporter: bundle exec prometheus_exporter
|
||||
|
|
|
|||
|
|
@ -3,10 +3,20 @@
|
|||
Sidekiq.configure_server do |config|
|
||||
config.redis = { url: ENV['REDIS_URL'] }
|
||||
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
|
||||
config.server_middleware do |chain|
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
chain.add PrometheusExporter::Instrumentation::Sidekiq
|
||||
end
|
||||
end
|
||||
|
||||
config.death_handlers << PrometheusExporter::Instrumentation::Sidekiq.death_handler
|
||||
|
||||
config.on :startup do
|
||||
PrometheusExporter::Instrumentation::Process.start type: 'sidekiq'
|
||||
PrometheusExporter::Instrumentation::SidekiqProcess.start
|
||||
PrometheusExporter::Instrumentation::SidekiqQueue.start
|
||||
PrometheusExporter::Instrumentation::SidekiqStats.start
|
||||
end
|
||||
end
|
||||
|
||||
Sidekiq.configure_client do |config|
|
||||
|
|
|
|||
|
|
@ -1,28 +1,32 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
|
||||
# Puma can serve each request in a thread from an internal thread pool.
|
||||
# The `threads` method setting takes two numbers: a minimum and maximum.
|
||||
# Any libraries that use thread pools should be configured to match
|
||||
# the maximum value specified for Puma. Default is set to 5 threads for minimum
|
||||
# and maximum; this matches the default thread size of Active Record.
|
||||
#
|
||||
max_threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 }
|
||||
min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count }
|
||||
max_threads_count = ENV.fetch('RAILS_MAX_THREADS') { 5 }
|
||||
min_threads_count = ENV.fetch('RAILS_MIN_THREADS') { max_threads_count }
|
||||
threads min_threads_count, max_threads_count
|
||||
|
||||
# Specifies the `worker_timeout` threshold that Puma will use to wait before
|
||||
# terminating a worker in development environments.
|
||||
#
|
||||
worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development"
|
||||
worker_timeout 3600 if ENV.fetch('RAILS_ENV', 'development') == 'development'
|
||||
|
||||
# Specifies the `port` that Puma will listen on to receive requests; default is 3000.
|
||||
#
|
||||
port ENV.fetch("PORT") { 3000 }
|
||||
port ENV.fetch('PORT') { 3000 }
|
||||
|
||||
# Specifies the `environment` that Puma will run in.
|
||||
#
|
||||
environment ENV.fetch("RAILS_ENV") { "development" }
|
||||
environment ENV.fetch('RAILS_ENV') { 'development' }
|
||||
|
||||
# Specifies the `pidfile` that Puma will use.
|
||||
pidfile ENV.fetch("PIDFILE") { "tmp/pids/server.pid" }
|
||||
pidfile ENV.fetch('PIDFILE') { 'tmp/pids/server.pid' }
|
||||
|
||||
# Specifies the number of `workers` to boot in clustered mode.
|
||||
# Workers are forked web server processes. If using threads and workers together
|
||||
|
|
@ -42,8 +46,7 @@ pidfile ENV.fetch("PIDFILE") { "tmp/pids/server.pid" }
|
|||
# Allow puma to be restarted by `bin/rails restart` command.
|
||||
plugin :tmp_restart
|
||||
|
||||
# Promethues exporter
|
||||
after_worker_boot do
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
PrometheusExporter::Instrumentation::Process.start(type:"web")
|
||||
end
|
||||
# Prometheus exporter
|
||||
#
|
||||
# optional check, avoids spinning up and down threads per worker
|
||||
PrometheusExporter::Instrumentation::Puma.start unless PrometheusExporter::Instrumentation::Puma.started?
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ services:
|
|||
- dawarich
|
||||
ports:
|
||||
- 3000:3000
|
||||
# - 9394:9394 # Prometheus exporter, uncomment if needed
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: dev-entrypoint.sh
|
||||
|
|
@ -65,7 +66,9 @@ services:
|
|||
DISTANCE_UNIT: km
|
||||
PHOTON_API_HOST: photon.komoot.io
|
||||
PHOTON_API_USE_HTTPS: true
|
||||
PROMETHEUS_EXPORTER_ENABLED: false
|
||||
PROMETHEUS_EXPORTER_ENABLED: true
|
||||
PROMETHEUS_EXPORTER_HOST: 0.0.0.0
|
||||
PROMETHEUS_EXPORTER_PORT: 9394
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
|
|
@ -117,6 +120,9 @@ services:
|
|||
DISTANCE_UNIT: km
|
||||
PHOTON_API_HOST: photon.komoot.io
|
||||
PHOTON_API_USE_HTTPS: true
|
||||
PROMETHEUS_EXPORTER_ENABLED: false
|
||||
PROMETHEUS_EXPORTER_HOST: dawarich_app
|
||||
PROMETHEUS_EXPORTER_PORT: 9394
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
|
|
|
|||
Loading…
Reference in a new issue