mirror of
https://github.com/Freika/dawarich.git
synced 2026-01-11 09:41:40 -05:00
Merge pull request #1384 from Freika/revert/sidekiq-and-redis
Revert/sidekiq and redis
This commit is contained in:
commit
efe846f2bb
55 changed files with 705 additions and 566 deletions
|
|
@ -1 +1 @@
|
|||
0.27.5
|
||||
0.28.0
|
||||
|
|
|
|||
|
|
@ -16,16 +16,12 @@ jobs:
|
|||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: mysecretpassword
|
||||
DATABASE_PORT: 5432
|
||||
QUEUE_DATABASE_HOST: localhost
|
||||
QUEUE_DATABASE_NAME: dawarich_test_queue
|
||||
QUEUE_DATABASE_USERNAME: postgres
|
||||
QUEUE_DATABASE_PASSWORD: mysecretpassword
|
||||
QUEUE_DATABASE_PORT: 5432
|
||||
- image: cimg/postgres:13.3-postgis
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: dawarich_test
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
- image: redis:7.0
|
||||
- image: selenium/standalone-chrome:latest
|
||||
name: chrome
|
||||
environment:
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ services:
|
|||
- dawarich_public:/var/app/public
|
||||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
- dawarich_sqlite_data:/dawarich_sqlite_data
|
||||
networks:
|
||||
- dawarich
|
||||
ports:
|
||||
|
|
@ -20,19 +19,11 @@ services:
|
|||
tty: true
|
||||
environment:
|
||||
RAILS_ENV: development
|
||||
REDIS_URL: redis://dawarich_redis:6379
|
||||
DATABASE_HOST: dawarich_db
|
||||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_development
|
||||
# PostgreSQL database name for solid_queue
|
||||
QUEUE_DATABASE_HOST: dawarich_db
|
||||
QUEUE_DATABASE_USERNAME: postgres
|
||||
QUEUE_DATABASE_PASSWORD: password
|
||||
QUEUE_DATABASE_PORT: 5432
|
||||
QUEUE_DATABASE_NAME: dawarich_development_queue
|
||||
# SQLite database paths for cache and cable databases
|
||||
CACHE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_development_cache.sqlite3
|
||||
CABLE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_development_cable.sqlite3
|
||||
MIN_MINUTES_SPENT_IN_CITY: 60
|
||||
APPLICATION_HOSTS: localhost
|
||||
TIME_ZONE: Europe/London
|
||||
|
|
@ -40,6 +31,21 @@ services:
|
|||
PROMETHEUS_EXPORTER_ENABLED: false
|
||||
PROMETHEUS_EXPORTER_HOST: 0.0.0.0
|
||||
PROMETHEUS_EXPORTER_PORT: 9394
|
||||
dawarich_redis:
|
||||
image: redis:7.4-alpine
|
||||
container_name: dawarich_redis
|
||||
command: redis-server
|
||||
networks:
|
||||
- dawarich
|
||||
volumes:
|
||||
- dawarich_shared:/data
|
||||
restart: always
|
||||
healthcheck:
|
||||
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
|
||||
interval: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
dawarich_db:
|
||||
image: postgis/postgis:17-3.5-alpine
|
||||
container_name: dawarich_db
|
||||
|
|
@ -64,4 +70,3 @@ volumes:
|
|||
dawarich_public:
|
||||
dawarich_watched:
|
||||
dawarich_storage:
|
||||
dawarich_sqlite_data:
|
||||
|
|
|
|||
|
|
@ -3,3 +3,4 @@ DATABASE_USERNAME=postgres
|
|||
DATABASE_PASSWORD=password
|
||||
DATABASE_NAME=dawarich_development
|
||||
DATABASE_PORT=5432
|
||||
REDIS_URL=redis://localhost:6379
|
||||
|
|
|
|||
|
|
@ -3,3 +3,4 @@ DATABASE_USERNAME=postgres
|
|||
DATABASE_PASSWORD=password
|
||||
DATABASE_NAME=dawarich_test
|
||||
DATABASE_PORT=5432
|
||||
REDIS_URL=redis://localhost:6379
|
||||
|
|
|
|||
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
|
|
@ -19,6 +19,10 @@ jobs:
|
|||
ports:
|
||||
- 5432:5432
|
||||
options: --health-cmd="pg_isready" --health-interval=10s --health-timeout=5s --health-retries=3
|
||||
redis:
|
||||
image: redis
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- name: Install packages
|
||||
|
|
@ -53,12 +57,14 @@ jobs:
|
|||
env:
|
||||
RAILS_ENV: test
|
||||
DATABASE_URL: postgres://postgres:postgres@localhost:5432
|
||||
REDIS_URL: redis://localhost:6379/1
|
||||
run: bin/rails db:setup
|
||||
|
||||
- name: Run main tests (excluding system tests)
|
||||
env:
|
||||
RAILS_ENV: test
|
||||
DATABASE_URL: postgres://postgres:postgres@localhost:5432
|
||||
REDIS_URL: redis://localhost:6379/1
|
||||
run: |
|
||||
bundle exec rspec --exclude-pattern "spec/system/**/*_spec.rb" || (cat log/test.log && exit 1)
|
||||
|
||||
|
|
@ -66,6 +72,7 @@ jobs:
|
|||
env:
|
||||
RAILS_ENV: test
|
||||
DATABASE_URL: postgres://postgres:postgres@localhost:5432
|
||||
REDIS_URL: redis://localhost:6379/1
|
||||
run: |
|
||||
bundle exec rspec spec/system/ || (cat log/test.log && exit 1)
|
||||
|
||||
|
|
|
|||
130
CHANGELOG.md
130
CHANGELOG.md
|
|
@ -4,7 +4,130 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](http://keepachangelog.com/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# 0.27.5 - 2025-06-08
|
||||
# 0.28.0 - 2025-06-09
|
||||
|
||||
⚠️ This release includes a breaking change. ⚠️
|
||||
|
||||
_yet another, yay!_
|
||||
|
||||
Well, we're moving back to Sidekiq and Redis for background jobs and caching. Unfortunately, SolidQueue and SolidCache brought more problems than they solved. Please update your `docker-compose.yml` to use Redis and Sidekiq.
|
||||
|
||||
```diff
|
||||
networks:
|
||||
dawarich:
|
||||
services:
|
||||
+ dawarich_redis:
|
||||
+ image: redis:7.4-alpine
|
||||
+ container_name: dawarich_redis
|
||||
+ command: redis-server
|
||||
+ networks:
|
||||
+ - dawarich
|
||||
+ volumes:
|
||||
+ - dawarich_shared:/data
|
||||
+ restart: always
|
||||
+ healthcheck:
|
||||
+ test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
|
||||
+ interval: 10s
|
||||
+ retries: 5
|
||||
+ start_period: 30s
|
||||
+ timeout: 10s
|
||||
...
|
||||
dawarich_app:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_app
|
||||
volumes:
|
||||
- dawarich_public:/var/app/public
|
||||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
- dawarich_db_data:/dawarich_db_data
|
||||
- - dawarich_sqlite_data:/dawarich_sqlite_data
|
||||
...
|
||||
restart: on-failure
|
||||
environment:
|
||||
RAILS_ENV: development
|
||||
+ REDIS_URL: redis://dawarich_redis:6379
|
||||
DATABASE_HOST: dawarich_db
|
||||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_development
|
||||
- # PostgreSQL database name for solid_queue
|
||||
- QUEUE_DATABASE_NAME: dawarich_development_queue
|
||||
- QUEUE_DATABASE_PASSWORD: password
|
||||
- QUEUE_DATABASE_USERNAME: postgres
|
||||
- QUEUE_DATABASE_HOST: dawarich_db
|
||||
- QUEUE_DATABASE_PORT: 5432
|
||||
- # SQLite database paths for cache and cable databases
|
||||
- CACHE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_development_cache.sqlite3
|
||||
- CABLE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_development_cable.sqlite3
|
||||
...
|
||||
depends_on:
|
||||
dawarich_db:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
+ dawarich_redis:
|
||||
+ condition: service_healthy
|
||||
+ restart: true
|
||||
...
|
||||
+ dawarich_sidekiq:
|
||||
+ image: freikin/dawarich:latest
|
||||
+ container_name: dawarich_sidekiq
|
||||
+ volumes:
|
||||
+ - dawarich_public:/var/app/public
|
||||
+ - dawarich_watched:/var/app/tmp/imports/watched
|
||||
+ - dawarich_storage:/var/app/storage
|
||||
+ networks:
|
||||
+ - dawarich
|
||||
+ stdin_open: true
|
||||
+ tty: true
|
||||
+ entrypoint: sidekiq-entrypoint.sh
|
||||
+ command: ['sidekiq']
|
||||
+ restart: on-failure
|
||||
+ environment:
|
||||
+ RAILS_ENV: development
|
||||
+ REDIS_URL: redis://dawarich_redis:6379
|
||||
+ DATABASE_HOST: dawarich_db
|
||||
+ DATABASE_USERNAME: postgres
|
||||
+ DATABASE_PASSWORD: password
|
||||
+ DATABASE_NAME: dawarich_development
|
||||
+ APPLICATION_HOSTS: localhost
|
||||
+ BACKGROUND_PROCESSING_CONCURRENCY: 10
|
||||
+ APPLICATION_PROTOCOL: http
|
||||
+ PROMETHEUS_EXPORTER_ENABLED: false
|
||||
+ PROMETHEUS_EXPORTER_HOST: dawarich_app
|
||||
+ PROMETHEUS_EXPORTER_PORT: 9394
|
||||
+ SELF_HOSTED: "true"
|
||||
+ STORE_GEODATA: "true"
|
||||
+ logging:
|
||||
+ driver: "json-file"
|
||||
+ options:
|
||||
+ max-size: "100m"
|
||||
+ max-file: "5"
|
||||
+ healthcheck:
|
||||
+ test: [ "CMD-SHELL", "pgrep -f sidekiq" ]
|
||||
+ interval: 10s
|
||||
+ retries: 30
|
||||
+ start_period: 30s
|
||||
+ timeout: 10s
|
||||
+ depends_on:
|
||||
+ dawarich_db:
|
||||
+ condition: service_healthy
|
||||
+ restart: true
|
||||
+ dawarich_redis:
|
||||
+ condition: service_healthy
|
||||
+ restart: true
|
||||
+ dawarich_app:
|
||||
+ condition: service_healthy
|
||||
+ restart: true
|
||||
...
|
||||
volumes:
|
||||
dawarich_db_data:
|
||||
- dawarich_sqlite_data:
|
||||
dawarich_shared:
|
||||
dawarich_public:
|
||||
dawarich_watched:
|
||||
dawarich_storage:
|
||||
|
||||
```
|
||||
|
||||
## Fixed
|
||||
|
||||
|
|
@ -15,6 +138,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
|||
## Changed
|
||||
|
||||
- Geocoder is now being installed from a private fork for debugging purposes.
|
||||
- Redis is now being used for caching.
|
||||
- Sidekiq is now being used for background jobs.
|
||||
|
||||
## Removed
|
||||
- SolidQueue, SolidCache and SolidCable are now removed.
|
||||
|
||||
|
||||
# 0.27.4 - 2025-06-06
|
||||
|
|
|
|||
|
|
@ -7,6 +7,12 @@ Now you can create/prepare the Database (this need to be done once):
|
|||
bundle exec rails db:prepare
|
||||
```
|
||||
|
||||
Afterwards you can run sidekiq:
|
||||
```bash
|
||||
bundle exec sidekiq
|
||||
|
||||
```
|
||||
|
||||
And in a second terminal the dawarich-app:
|
||||
```bash
|
||||
bundle exec bin/dev
|
||||
|
|
|
|||
10
Gemfile
10
Gemfile
|
|
@ -20,7 +20,6 @@ gem 'httparty'
|
|||
gem 'importmap-rails'
|
||||
gem 'kaminari'
|
||||
gem 'lograge'
|
||||
gem 'mission_control-jobs'
|
||||
gem 'oj'
|
||||
gem 'pg'
|
||||
gem 'prometheus_exporter'
|
||||
|
|
@ -28,6 +27,7 @@ gem 'activerecord-postgis-adapter'
|
|||
gem 'puma'
|
||||
gem 'pundit'
|
||||
gem 'rails', '~> 8.0'
|
||||
gem 'redis'
|
||||
gem 'rexml'
|
||||
gem 'rgeo'
|
||||
gem 'rgeo-activerecord'
|
||||
|
|
@ -36,14 +36,13 @@ gem 'rswag-api'
|
|||
gem 'rswag-ui'
|
||||
gem 'sentry-ruby'
|
||||
gem 'sentry-rails'
|
||||
gem 'sqlite3', '~> 2.6'
|
||||
gem 'stackprof'
|
||||
gem 'sidekiq'
|
||||
gem 'sidekiq-cron'
|
||||
gem 'sidekiq-limit_fetch'
|
||||
gem 'sprockets-rails'
|
||||
gem 'stimulus-rails'
|
||||
gem 'strong_migrations'
|
||||
gem 'solid_cable', '~> 3.0'
|
||||
gem 'solid_cache', '1.0.7'
|
||||
gem 'solid_queue', '~> 1.1'
|
||||
gem 'tailwindcss-rails'
|
||||
gem 'turbo-rails'
|
||||
gem 'tzinfo-data', platforms: %i[mingw mswin x64_mingw jruby]
|
||||
|
|
@ -64,6 +63,7 @@ end
|
|||
|
||||
group :test do
|
||||
gem 'capybara'
|
||||
gem 'fakeredis'
|
||||
gem 'selenium-webdriver'
|
||||
gem 'shoulda-matchers'
|
||||
gem 'simplecov', require: false
|
||||
|
|
|
|||
64
Gemfile.lock
64
Gemfile.lock
|
|
@ -134,6 +134,9 @@ GEM
|
|||
bigdecimal
|
||||
rexml
|
||||
crass (1.0.6)
|
||||
cronex (0.15.0)
|
||||
tzinfo
|
||||
unicode (>= 0.4.4.5)
|
||||
csv (3.3.4)
|
||||
data_migrate (11.3.0)
|
||||
activerecord (>= 6.1)
|
||||
|
|
@ -166,6 +169,7 @@ GEM
|
|||
factory_bot_rails (6.4.4)
|
||||
factory_bot (~> 6.5)
|
||||
railties (>= 5.0.0)
|
||||
fakeredis (0.1.4)
|
||||
ffaker (2.24.0)
|
||||
foreman (0.88.1)
|
||||
fugit (1.11.1)
|
||||
|
|
@ -235,16 +239,6 @@ GEM
|
|||
mini_mime (1.1.5)
|
||||
mini_portile2 (2.8.9)
|
||||
minitest (5.25.5)
|
||||
mission_control-jobs (1.0.2)
|
||||
actioncable (>= 7.1)
|
||||
actionpack (>= 7.1)
|
||||
activejob (>= 7.1)
|
||||
activerecord (>= 7.1)
|
||||
importmap-rails (>= 1.2.1)
|
||||
irb (~> 1.13)
|
||||
railties (>= 7.1)
|
||||
stimulus-rails
|
||||
turbo-rails
|
||||
msgpack (1.7.3)
|
||||
multi_json (1.15.0)
|
||||
multi_xml (0.7.1)
|
||||
|
|
@ -351,6 +345,10 @@ GEM
|
|||
rdoc (6.14.0)
|
||||
erb
|
||||
psych (>= 4.0.0)
|
||||
redis (5.4.0)
|
||||
redis-client (>= 0.22.0)
|
||||
redis-client (0.24.0)
|
||||
connection_pool
|
||||
regexp_parser (2.10.0)
|
||||
reline (0.6.1)
|
||||
io-console (~> 0.5)
|
||||
|
|
@ -432,28 +430,25 @@ GEM
|
|||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||
shoulda-matchers (6.5.0)
|
||||
activesupport (>= 5.2.0)
|
||||
sidekiq (8.0.4)
|
||||
connection_pool (>= 2.5.0)
|
||||
json (>= 2.9.0)
|
||||
logger (>= 1.6.2)
|
||||
rack (>= 3.1.0)
|
||||
redis-client (>= 0.23.2)
|
||||
sidekiq-cron (2.3.0)
|
||||
cronex (>= 0.13.0)
|
||||
fugit (~> 1.8, >= 1.11.1)
|
||||
globalid (>= 1.0.1)
|
||||
sidekiq (>= 6.5.0)
|
||||
sidekiq-limit_fetch (4.4.1)
|
||||
sidekiq (>= 6)
|
||||
simplecov (0.22.0)
|
||||
docile (~> 1.1)
|
||||
simplecov-html (~> 0.11)
|
||||
simplecov_json_formatter (~> 0.1)
|
||||
simplecov-html (0.13.1)
|
||||
simplecov_json_formatter (0.1.4)
|
||||
solid_cable (3.0.8)
|
||||
actioncable (>= 7.2)
|
||||
activejob (>= 7.2)
|
||||
activerecord (>= 7.2)
|
||||
railties (>= 7.2)
|
||||
solid_cache (1.0.7)
|
||||
activejob (>= 7.2)
|
||||
activerecord (>= 7.2)
|
||||
railties (>= 7.2)
|
||||
solid_queue (1.1.5)
|
||||
activejob (>= 7.1)
|
||||
activerecord (>= 7.1)
|
||||
concurrent-ruby (>= 1.3.1)
|
||||
fugit (~> 1.11.0)
|
||||
railties (>= 7.1)
|
||||
thor (~> 1.3.1)
|
||||
sprockets (4.2.1)
|
||||
concurrent-ruby (~> 1.0)
|
||||
rack (>= 2.2.4, < 4)
|
||||
|
|
@ -461,12 +456,6 @@ GEM
|
|||
actionpack (>= 6.1)
|
||||
activesupport (>= 6.1)
|
||||
sprockets (>= 3.0.0)
|
||||
sqlite3 (2.6.0-aarch64-linux-gnu)
|
||||
sqlite3 (2.6.0-arm-linux-gnu)
|
||||
sqlite3 (2.6.0-arm64-darwin)
|
||||
sqlite3 (2.6.0-x86-linux-gnu)
|
||||
sqlite3 (2.6.0-x86_64-darwin)
|
||||
sqlite3 (2.6.0-x86_64-linux-gnu)
|
||||
stackprof (0.2.27)
|
||||
stimulus-rails (1.3.4)
|
||||
railties (>= 6.0.0)
|
||||
|
|
@ -493,6 +482,7 @@ GEM
|
|||
railties (>= 7.1.0)
|
||||
tzinfo (2.0.6)
|
||||
concurrent-ruby (~> 1.0)
|
||||
unicode (0.4.4.5)
|
||||
unicode-display_width (3.1.4)
|
||||
unicode-emoji (~> 4.0, >= 4.0.4)
|
||||
unicode-emoji (4.0.4)
|
||||
|
|
@ -538,6 +528,7 @@ DEPENDENCIES
|
|||
devise
|
||||
dotenv-rails
|
||||
factory_bot_rails
|
||||
fakeredis
|
||||
ffaker
|
||||
foreman
|
||||
geocoder!
|
||||
|
|
@ -548,7 +539,6 @@ DEPENDENCIES
|
|||
jwt
|
||||
kaminari
|
||||
lograge
|
||||
mission_control-jobs
|
||||
oj
|
||||
pg
|
||||
prometheus_exporter
|
||||
|
|
@ -557,6 +547,7 @@ DEPENDENCIES
|
|||
puma
|
||||
pundit
|
||||
rails (~> 8.0)
|
||||
redis
|
||||
rexml
|
||||
rgeo
|
||||
rgeo-activerecord
|
||||
|
|
@ -570,12 +561,11 @@ DEPENDENCIES
|
|||
sentry-rails
|
||||
sentry-ruby
|
||||
shoulda-matchers
|
||||
sidekiq
|
||||
sidekiq-cron
|
||||
sidekiq-limit_fetch
|
||||
simplecov
|
||||
solid_cable (~> 3.0)
|
||||
solid_cache (= 1.0.7)
|
||||
solid_queue (~> 1.1)
|
||||
sprockets-rails
|
||||
sqlite3 (~> 2.6)
|
||||
stackprof
|
||||
stimulus-rails
|
||||
strong_migrations
|
||||
|
|
|
|||
2
Procfile
2
Procfile
|
|
@ -1,2 +1,2 @@
|
|||
web: bundle exec puma -C config/puma.rb
|
||||
worker: bundle exec bin/jobs
|
||||
worker: bundle exec sidekiq -C config/sidekiq.yml
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class AppVersionCheckingJob < ApplicationJob
|
||||
queue_as :default
|
||||
sidekiq_options retry: false
|
||||
|
||||
def perform
|
||||
Rails.cache.delete(CheckAppVersion::VERSION_CACHE_KEY)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class AreaVisitsCalculatingJob < ApplicationJob
|
||||
queue_as :default
|
||||
sidekiq_options retry: false
|
||||
|
||||
def perform(user_id)
|
||||
user = User.find(user_id)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class AreaVisitsCalculationSchedulingJob < ApplicationJob
|
||||
queue_as :default
|
||||
sidekiq_options retry: false
|
||||
|
||||
def perform
|
||||
User.find_each { AreaVisitsCalculatingJob.perform_later(_1.id) }
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
# with the default timespan of 1 day.
|
||||
class BulkVisitsSuggestingJob < ApplicationJob
|
||||
queue_as :visit_suggesting
|
||||
sidekiq_options retry: false
|
||||
|
||||
# Passing timespan of more than 3 years somehow results in duplicated Places
|
||||
def perform(start_at: 1.day.ago.beginning_of_day, end_at: 1.day.ago.end_of_day, user_ids: [])
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class Import::GoogleTakeoutJob < ApplicationJob
|
||||
queue_as :imports
|
||||
sidekiq_options retry: false
|
||||
|
||||
def perform(import_id, locations, current_index)
|
||||
locations_batch = Oj.load(locations)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class Import::PhotoprismGeodataJob < ApplicationJob
|
||||
queue_as :imports
|
||||
sidekiq_options retry: false
|
||||
|
||||
def perform(user_id)
|
||||
user = User.find(user_id)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class Import::WatcherJob < ApplicationJob
|
||||
queue_as :imports
|
||||
sidekiq_options retry: false
|
||||
|
||||
def perform
|
||||
return unless DawarichSettings.self_hosted?
|
||||
|
|
|
|||
|
|
@ -1,9 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class Jobs::CleanFinishedJob < ApplicationJob
|
||||
queue_as :default
|
||||
|
||||
def perform
|
||||
SolidQueue::Job.clear_finished_in_batches
|
||||
end
|
||||
end
|
||||
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
class VisitSuggestingJob < ApplicationJob
|
||||
queue_as :visit_suggesting
|
||||
sidekiq_options retry: false
|
||||
|
||||
# Passing timespan of more than 3 years somehow results in duplicated Places
|
||||
def perform(user_id:, start_at:, end_at:)
|
||||
|
|
|
|||
|
|
@ -54,6 +54,6 @@ class Tasks::Imports::GoogleRecords
|
|||
end
|
||||
|
||||
def log_success
|
||||
Rails.logger.info("Imported #{@file_path} for #{@user.email} successfully! Wait for the processing to finish. You can check the status of the import in the Jobs UI (http://<your-dawarich-url>/jobs).")
|
||||
Rails.logger.info("Imported #{@file_path} for #{@user.email} successfully! Wait for the processing to finish. You can check the status of the import in the Sidekiq UI (http://<your-dawarich-url>/sidekiq).")
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
<% if notification.error? %>
|
||||
<div class="mt-2">
|
||||
Please, when reporting a bug to <a href="https://github.com/Freika/dawarich/issues" class="link hover:no-underline text-blue-600">Github Issues</a>, don't forget to include logs from <code>dawarich_app</code> docker container. Thank you!
|
||||
Please, when reporting a bug to <a href="https://github.com/Freika/dawarich/issues" class="link hover:no-underline text-blue-600">Github Issues</a>, don't forget to include logs from <code>dawarich_app</code> and <code>dawarich_sidekiq</code> docker containers. Thank you!
|
||||
</div>
|
||||
<% end %>
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@
|
|||
<h2 class="card-title">Background Jobs Dashboard</h2>
|
||||
<p>This will open the background jobs dashboard in a new tab.</p>
|
||||
<div class="card-actions justify-end">
|
||||
<%= link_to 'Open Dashboard', mission_control_jobs_url, target: '_blank', class: 'btn btn-primary' %>
|
||||
<%= link_to 'Open Dashboard', '/sidekiq', target: '_blank', class: 'btn btn-primary' %>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
|||
6
bin/jobs
6
bin/jobs
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
require_relative "../config/environment"
|
||||
require "solid_queue/cli"
|
||||
|
||||
SolidQueue::Cli.start(ARGV)
|
||||
|
|
@ -34,5 +34,7 @@ module Dawarich
|
|||
g.routing_specs false
|
||||
g.helper_specs false
|
||||
end
|
||||
|
||||
config.active_job.queue_adapter = :sidekiq
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -1,21 +1,11 @@
|
|||
# Async adapter only works within the same process, so for manually triggering cable updates from a console,
|
||||
# and seeing results in the browser, you must do so from the web console (running inside the dev process),
|
||||
# not a terminal started via bin/rails console! Add "console" to any action or any ERB template view
|
||||
# to make the web console appear.
|
||||
|
||||
default: &default
|
||||
adapter: solid_cable
|
||||
connects_to:
|
||||
database:
|
||||
writing: cable
|
||||
polling_interval: 0.1.seconds
|
||||
message_retention: 1.day
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
adapter: redis
|
||||
url: <%= "#{ENV.fetch("REDIS_URL")}/2" %>
|
||||
|
||||
test:
|
||||
adapter: test
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
adapter: redis
|
||||
url: <%= "#{ENV.fetch("REDIS_URL")}/2" %>
|
||||
channel_prefix: dawarich_production
|
||||
|
|
|
|||
|
|
@ -1,15 +0,0 @@
|
|||
default: &default
|
||||
store_options:
|
||||
# Cap age of oldest cache entry to fulfill retention policies
|
||||
max_age: <%= 60.days.to_i %>
|
||||
max_size: <%= 256.megabytes %>
|
||||
namespace: <%= Rails.env %>
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
|
|
@ -9,85 +9,18 @@ default: &default
|
|||
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
||||
timeout: 5000
|
||||
|
||||
sqlite_default: &sqlite_default
|
||||
adapter: sqlite3
|
||||
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
||||
timeout: 5000
|
||||
|
||||
development:
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_development_queue' %>
|
||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cache:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %>
|
||||
migrations_paths: db/cache_migrate
|
||||
cable:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||
|
||||
test:
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||
password: <%= ENV['DATABASE_PASSWORD'] %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_test_queue' %>
|
||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
||||
migrations_paths: db/queue_migrate
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||
|
||||
production:
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_production_queue' %>
|
||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cable:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
cache:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CACHE_DATABASE_PATH'] %>
|
||||
migrations_paths: db/cache_migrate
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||
|
||||
staging:
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %>
|
||||
password: <%= ENV['DATABASE_PASSWORD'] %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_staging_queue' %>
|
||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cache:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %>
|
||||
migrations_paths: db/cache_migrate
|
||||
cable:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %>
|
||||
|
|
|
|||
|
|
@ -26,8 +26,7 @@ Rails.application.configure do
|
|||
|
||||
# Enable/disable caching. By default caching is disabled.
|
||||
# Run rails dev:cache to toggle caching.
|
||||
config.cache_store = :solid_cache_store
|
||||
config.solid_cache.connects_to = { database: { writing: :cache } }
|
||||
config.cache_store = :redis_cache_store, { url: "#{ENV['REDIS_URL']}/0" }
|
||||
|
||||
if Rails.root.join('tmp/caching-dev.txt').exist?
|
||||
config.action_controller.perform_caching = true
|
||||
|
|
@ -68,14 +67,6 @@ Rails.application.configure do
|
|||
# Highlight code that enqueued background job in logs.
|
||||
config.active_job.verbose_enqueue_logs = true
|
||||
|
||||
config.active_job.queue_adapter = :solid_queue
|
||||
config.solid_queue.silence_polling = true
|
||||
# :queue is the name of the database connection
|
||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
||||
|
||||
config.mission_control.jobs.http_basic_auth_enabled = false
|
||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
||||
|
||||
# Suppress logger output for asset requests.
|
||||
config.assets.quiet = true
|
||||
|
||||
|
|
|
|||
|
|
@ -73,15 +73,10 @@ Rails.application.configure do
|
|||
config.log_level = ENV.fetch('RAILS_LOG_LEVEL', 'info')
|
||||
|
||||
# Use a different cache store in production.
|
||||
config.cache_store = :solid_cache_store
|
||||
config.solid_cache.connects_to = { database: { writing: :cache } }
|
||||
config.cache_store = :redis_cache_store, { url: "#{ENV['REDIS_URL']}/0" }
|
||||
|
||||
# Use a real queuing backend for Active Job (and separate queues per environment).
|
||||
config.active_job.queue_adapter = :solid_queue
|
||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
||||
config.solid_queue.silence_polling = true
|
||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
||||
# config.active_job.queue_name_prefix = "dawarich_production"
|
||||
config.active_job.queue_adapter = :sidekiq
|
||||
|
||||
config.action_mailer.perform_caching = false
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ settings = {
|
|||
debug_mode: true,
|
||||
timeout: 5,
|
||||
units: :km,
|
||||
cache: Geocoder::CacheStore::Generic.new(Rails.cache, {}),
|
||||
cache: Redis.new(url: "#{ENV['REDIS_URL']}/0"),
|
||||
always_raise: :all,
|
||||
http_headers: {
|
||||
'User-Agent' => "Dawarich #{APP_VERSION} (https://dawarich.app)"
|
||||
|
|
|
|||
|
|
@ -1,24 +1,30 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# Sidekiq.configure_server do |config|
|
||||
# if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
||||
# require 'prometheus_exporter/instrumentation'
|
||||
Sidekiq.configure_server do |config|
|
||||
config.redis = { url: "#{ENV['REDIS_URL']}/1" }
|
||||
config.logger = Sidekiq::Logger.new($stdout)
|
||||
|
||||
# # Add middleware for collecting job-level metrics
|
||||
# config.server_middleware do |chain|
|
||||
# chain.add PrometheusExporter::Instrumentation::Sidekiq
|
||||
# end
|
||||
if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
# Add middleware for collecting job-level metrics
|
||||
config.server_middleware do |chain|
|
||||
chain.add PrometheusExporter::Instrumentation::Sidekiq
|
||||
end
|
||||
|
||||
# # Capture metrics for failed jobs
|
||||
# config.death_handlers << PrometheusExporter::Instrumentation::Sidekiq.death_handler
|
||||
# Capture metrics for failed jobs
|
||||
config.death_handlers << PrometheusExporter::Instrumentation::Sidekiq.death_handler
|
||||
|
||||
# # Start Prometheus instrumentation
|
||||
# config.on :startup do
|
||||
# PrometheusExporter::Instrumentation::SidekiqProcess.start
|
||||
# PrometheusExporter::Instrumentation::SidekiqQueue.start
|
||||
# PrometheusExporter::Instrumentation::SidekiqStats.start
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
# Start Prometheus instrumentation
|
||||
config.on :startup do
|
||||
PrometheusExporter::Instrumentation::SidekiqProcess.start
|
||||
PrometheusExporter::Instrumentation::SidekiqQueue.start
|
||||
PrometheusExporter::Instrumentation::SidekiqStats.start
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Sidekiq::Queue['reverse_geocoding'].limit = 1 if Sidekiq.server? && DawarichSettings.photon_uses_komoot_io?
|
||||
Sidekiq.configure_client do |config|
|
||||
config.redis = { url: "#{ENV['REDIS_URL']}/1" }
|
||||
end
|
||||
|
||||
Sidekiq::Queue['reverse_geocoding'].limit = 1 if Sidekiq.server? && DawarichSettings.photon_uses_komoot_io?
|
||||
|
|
|
|||
|
|
@ -43,15 +43,6 @@ preload_app!
|
|||
# Allow puma to be restarted by `bin/rails restart` command.
|
||||
plugin :tmp_restart
|
||||
|
||||
# If env var is set or we're in development, solid_queue will run in puma
|
||||
if ENV['SOLID_QUEUE_IN_PUMA'] || ENV.fetch('RAILS_ENV', 'development') == 'development'
|
||||
begin
|
||||
plugin :solid_queue
|
||||
rescue => e
|
||||
puts "Failed to load solid_queue plugin: #{e.message}"
|
||||
end
|
||||
end
|
||||
|
||||
# Prometheus exporter
|
||||
if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
|
||||
default: &default
|
||||
dispatchers:
|
||||
- polling_interval: 1
|
||||
batch_size: 500
|
||||
workers:
|
||||
- queues: "*"
|
||||
threads: 3
|
||||
processes: <%= ENV['BACKGROUND_PROCESSING_CONCURRENCY'] || ENV.fetch("JOB_CONCURRENCY", 10) %>
|
||||
polling_interval: 2
|
||||
- queues: imports
|
||||
threads: 5
|
||||
processes: 1
|
||||
polling_interval: 1
|
||||
- queues: exports
|
||||
threads: 5
|
||||
processes: 1
|
||||
polling_interval: 2
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
periodic_cleanup:
|
||||
class: "Jobs::CleanFinishedJob"
|
||||
queue: default
|
||||
schedule: every month
|
||||
|
||||
bulk_stats_calculating_job:
|
||||
class: "BulkStatsCalculatingJob"
|
||||
queue: stats
|
||||
schedule: every hour
|
||||
|
||||
area_visits_calculation_scheduling_job:
|
||||
class: "AreaVisitsCalculationSchedulingJob"
|
||||
queue: visit_suggesting
|
||||
schedule: every day at 0:00
|
||||
|
||||
visit_suggesting_job:
|
||||
class: "BulkVisitsSuggestingJob"
|
||||
queue: visit_suggesting
|
||||
schedule: every day at 00:05
|
||||
|
||||
watcher_job:
|
||||
class: "Import::WatcherJob"
|
||||
queue: imports
|
||||
schedule: every hour
|
||||
|
||||
app_version_checking_job:
|
||||
class: "AppVersionCheckingJob"
|
||||
queue: default
|
||||
schedule: every 6 hours
|
||||
|
||||
cache_preheating_job:
|
||||
class: "Cache::PreheatingJob"
|
||||
queue: default
|
||||
schedule: every day at 0:00
|
||||
|
|
@ -1,19 +1,34 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'sidekiq/web'
|
||||
|
||||
Rails.application.routes.draw do
|
||||
mount ActionCable.server => '/cable'
|
||||
mount Rswag::Api::Engine => '/api-docs'
|
||||
mount Rswag::Ui::Engine => '/api-docs'
|
||||
|
||||
unless DawarichSettings.self_hosted?
|
||||
Sidekiq::Web.use(Rack::Auth::Basic) do |username, password|
|
||||
ActiveSupport::SecurityUtils.secure_compare(
|
||||
::Digest::SHA256.hexdigest(username),
|
||||
::Digest::SHA256.hexdigest(ENV['SIDEKIQ_USERNAME'])
|
||||
) &
|
||||
ActiveSupport::SecurityUtils.secure_compare(
|
||||
::Digest::SHA256.hexdigest(password),
|
||||
::Digest::SHA256.hexdigest(ENV['SIDEKIQ_PASSWORD'])
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
authenticate :user, lambda { |u|
|
||||
(u.admin? && DawarichSettings.self_hosted?) ||
|
||||
(u.admin? && ENV['SIDEKIQ_USERNAME'].present? && ENV['SIDEKIQ_PASSWORD'].present?)
|
||||
} do
|
||||
mount MissionControl::Jobs::Engine, at: '/jobs'
|
||||
mount Sidekiq::Web => '/sidekiq'
|
||||
end
|
||||
|
||||
# We want to return a nice error message if the user is not authorized to access Jobs
|
||||
match '/jobs' => redirect { |_, request|
|
||||
# We want to return a nice error message if the user is not authorized to access Sidekiq
|
||||
match '/sidekiq' => redirect { |_, request|
|
||||
request.flash[:error] = 'You are not authorized to perform this action.'
|
||||
'/'
|
||||
}, via: :get
|
||||
|
|
|
|||
10
config/sidekiq.yml
Normal file
10
config/sidekiq.yml
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
:concurrency: <%= ENV.fetch("BACKGROUND_PROCESSING_CONCURRENCY", 10) %>
|
||||
:queues:
|
||||
- points
|
||||
- default
|
||||
- imports
|
||||
- exports
|
||||
- stats
|
||||
- reverse_geocoding
|
||||
- visit_suggesting
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
create_table "solid_cable_messages", force: :cascade do |t|
|
||||
t.binary "channel", null: false
|
||||
t.binary "payload", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.bigint "channel_hash", null: false
|
||||
t.index ["channel"], name: "index_solid_cable_messages_on_channel"
|
||||
t.index ["channel_hash"], name: "index_solid_cable_messages_on_channel_hash"
|
||||
t.index ["created_at"], name: "index_solid_cable_messages_on_created_at"
|
||||
t.index ["id"], name: "index_solid_cable_messages_on_id", unique: true
|
||||
end
|
||||
end
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
create_table "solid_cache_entries", force: :cascade do |t|
|
||||
t.binary "key", null: false
|
||||
t.binary "value", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.bigint "key_hash", null: false
|
||||
t.integer "byte_size", null: false
|
||||
t.index ["byte_size"], name: "index_solid_cache_entries_on_byte_size"
|
||||
t.index ["key_hash", "byte_size"], name: "index_solid_cache_entries_on_key_hash_and_byte_size"
|
||||
t.index ["key_hash"], name: "index_solid_cache_entries_on_key_hash", unique: true
|
||||
end
|
||||
end
|
||||
|
|
@ -1,143 +0,0 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
enable_extension "pg_catalog.plpgsql"
|
||||
|
||||
create_table "solid_queue_blocked_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.string "concurrency_key", null: false
|
||||
t.datetime "expires_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["concurrency_key", "priority", "job_id"], name: "index_solid_queue_blocked_executions_for_release"
|
||||
t.index ["expires_at", "concurrency_key"], name: "index_solid_queue_blocked_executions_for_maintenance"
|
||||
t.index ["job_id"], name: "index_solid_queue_blocked_executions_on_job_id", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_claimed_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.bigint "process_id"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_claimed_executions_on_job_id", unique: true
|
||||
t.index ["process_id", "job_id"], name: "index_solid_queue_claimed_executions_on_process_id_and_job_id"
|
||||
end
|
||||
|
||||
create_table "solid_queue_failed_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.text "error"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_failed_executions_on_job_id", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_jobs", force: :cascade do |t|
|
||||
t.string "queue_name", null: false
|
||||
t.string "class_name", null: false
|
||||
t.text "arguments"
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.string "active_job_id"
|
||||
t.datetime "scheduled_at"
|
||||
t.datetime "finished_at"
|
||||
t.string "concurrency_key"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["active_job_id"], name: "index_solid_queue_jobs_on_active_job_id"
|
||||
t.index ["class_name"], name: "index_solid_queue_jobs_on_class_name"
|
||||
t.index ["finished_at"], name: "index_solid_queue_jobs_on_finished_at"
|
||||
t.index ["queue_name", "finished_at"], name: "index_solid_queue_jobs_for_filtering"
|
||||
t.index ["scheduled_at", "finished_at"], name: "index_solid_queue_jobs_for_alerting"
|
||||
end
|
||||
|
||||
create_table "solid_queue_pauses", force: :cascade do |t|
|
||||
t.string "queue_name", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["queue_name"], name: "index_solid_queue_pauses_on_queue_name", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_processes", force: :cascade do |t|
|
||||
t.string "kind", null: false
|
||||
t.datetime "last_heartbeat_at", null: false
|
||||
t.bigint "supervisor_id"
|
||||
t.integer "pid", null: false
|
||||
t.string "hostname"
|
||||
t.text "metadata"
|
||||
t.datetime "created_at", null: false
|
||||
t.string "name", null: false
|
||||
t.index ["last_heartbeat_at"], name: "index_solid_queue_processes_on_last_heartbeat_at"
|
||||
t.index ["name", "supervisor_id"], name: "index_solid_queue_processes_on_name_and_supervisor_id", unique: true
|
||||
t.index ["supervisor_id"], name: "index_solid_queue_processes_on_supervisor_id"
|
||||
end
|
||||
|
||||
create_table "solid_queue_ready_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_ready_executions_on_job_id", unique: true
|
||||
t.index ["priority", "job_id"], name: "index_solid_queue_poll_all"
|
||||
t.index ["queue_name", "priority", "job_id"], name: "index_solid_queue_poll_by_queue"
|
||||
end
|
||||
|
||||
create_table "solid_queue_recurring_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "task_key", null: false
|
||||
t.datetime "run_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_recurring_executions_on_job_id", unique: true
|
||||
t.index ["task_key", "run_at"], name: "index_solid_queue_recurring_executions_on_task_key_and_run_at", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_recurring_tasks", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.string "schedule", null: false
|
||||
t.string "command", limit: 2048
|
||||
t.string "class_name"
|
||||
t.text "arguments"
|
||||
t.string "queue_name"
|
||||
t.integer "priority", default: 0
|
||||
t.boolean "static", default: true, null: false
|
||||
t.text "description"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["key"], name: "index_solid_queue_recurring_tasks_on_key", unique: true
|
||||
t.index ["static"], name: "index_solid_queue_recurring_tasks_on_static"
|
||||
end
|
||||
|
||||
create_table "solid_queue_scheduled_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.datetime "scheduled_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_scheduled_executions_on_job_id", unique: true
|
||||
t.index ["scheduled_at", "priority", "job_id"], name: "index_solid_queue_dispatch_all"
|
||||
end
|
||||
|
||||
create_table "solid_queue_semaphores", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.integer "value", default: 1, null: false
|
||||
t.datetime "expires_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["expires_at"], name: "index_solid_queue_semaphores_on_expires_at"
|
||||
t.index ["key", "value"], name: "index_solid_queue_semaphores_on_key_and_value"
|
||||
t.index ["key"], name: "index_solid_queue_semaphores_on_key", unique: true
|
||||
end
|
||||
|
||||
add_foreign_key "solid_queue_blocked_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_claimed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_failed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_ready_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_recurring_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_scheduled_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
end
|
||||
|
|
@ -62,6 +62,9 @@ RUN mkdir -p $APP_PATH/tmp && touch $APP_PATH/tmp/caching-dev.txt
|
|||
COPY ./docker/web-entrypoint.sh /usr/local/bin/web-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/web-entrypoint.sh
|
||||
|
||||
COPY ./docker/sidekiq-entrypoint.sh /usr/local/bin/sidekiq-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/sidekiq-entrypoint.sh
|
||||
|
||||
EXPOSE $RAILS_PORT
|
||||
|
||||
ENTRYPOINT ["bundle", "exec"]
|
||||
|
|
|
|||
|
|
@ -61,6 +61,9 @@ RUN SECRET_KEY_BASE_DUMMY=1 bundle exec rake assets:precompile \
|
|||
COPY ./docker/web-entrypoint.sh /usr/local/bin/web-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/web-entrypoint.sh
|
||||
|
||||
COPY ./docker/sidekiq-entrypoint.sh /usr/local/bin/sidekiq-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/sidekiq-entrypoint.sh
|
||||
|
||||
EXPOSE $RAILS_PORT
|
||||
|
||||
ENTRYPOINT [ "bundle", "exec" ]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,21 @@
|
|||
networks:
|
||||
dawarich:
|
||||
services:
|
||||
dawarich_redis:
|
||||
image: redis:7.4-alpine
|
||||
container_name: dawarich_redis
|
||||
command: redis-server
|
||||
networks:
|
||||
- dawarich
|
||||
volumes:
|
||||
- dawarich_redis_data:/var/shared/redis
|
||||
restart: always
|
||||
healthcheck:
|
||||
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
|
||||
interval: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
dawarich_db:
|
||||
image: postgis/postgis:17-3.5-alpine
|
||||
shm_size: 1G
|
||||
|
|
@ -28,7 +43,6 @@ services:
|
|||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
- dawarich_db_data:/dawarich_db_data
|
||||
- dawarich_sqlite_data:/dawarich_sqlite_data
|
||||
networks:
|
||||
- dawarich
|
||||
ports:
|
||||
|
|
@ -41,20 +55,12 @@ services:
|
|||
restart: on-failure
|
||||
environment:
|
||||
RAILS_ENV: production
|
||||
REDIS_URL: redis://dawarich_redis:6379
|
||||
DATABASE_HOST: dawarich_db
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_production
|
||||
# PostgreSQL database name for solid_queue
|
||||
QUEUE_DATABASE_NAME: dawarich_production_queue
|
||||
QUEUE_DATABASE_PASSWORD: password
|
||||
QUEUE_DATABASE_USERNAME: postgres
|
||||
QUEUE_DATABASE_HOST: dawarich_db
|
||||
QUEUE_DATABASE_PORT: 5432
|
||||
# SQLite database paths for cache and cable databases
|
||||
CACHE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_production_cache.sqlite3
|
||||
CABLE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_production_cable.sqlite3
|
||||
MIN_MINUTES_SPENT_IN_CITY: 60
|
||||
APPLICATION_HOSTS: localhost,::1,127.0.0.1
|
||||
TIME_ZONE: Europe/London
|
||||
|
|
@ -80,15 +86,69 @@ services:
|
|||
dawarich_db:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
dawarich_redis:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.50' # Limit CPU usage to 50% of one core
|
||||
memory: '4G' # Limit memory usage to 2GB
|
||||
|
||||
dawarich_sidekiq:
|
||||
image: dawarich:prod
|
||||
container_name: dawarich_sidekiq
|
||||
volumes:
|
||||
- dawarich_public:/var/app/public
|
||||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
networks:
|
||||
- dawarich
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: sidekiq-entrypoint.sh
|
||||
command: ['bundle', 'exec', 'sidekiq']
|
||||
restart: on-failure
|
||||
environment:
|
||||
RAILS_ENV: production
|
||||
REDIS_URL: redis://dawarich_redis:6379
|
||||
DATABASE_HOST: dawarich_db
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_production
|
||||
APPLICATION_HOSTS: localhost,::1,127.0.0.1
|
||||
BACKGROUND_PROCESSING_CONCURRENCY: 10
|
||||
APPLICATION_PROTOCOL: http
|
||||
PROMETHEUS_EXPORTER_ENABLED: false
|
||||
PROMETHEUS_EXPORTER_HOST: dawarich_app
|
||||
PROMETHEUS_EXPORTER_PORT: 9394
|
||||
SECRET_KEY_BASE: 1234567890
|
||||
RAILS_LOG_TO_STDOUT: "true"
|
||||
STORE_GEODATA: "true"
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "100m"
|
||||
max-file: "5"
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pgrep -f sidekiq" ]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
depends_on:
|
||||
dawarich_db:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
dawarich_redis:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
dawarich_app:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
volumes:
|
||||
dawarich_db_data:
|
||||
dawarich_redis_data:
|
||||
dawarich_public:
|
||||
dawarich_watched:
|
||||
dawarich_storage:
|
||||
dawarich_sqlite_data:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,21 @@
|
|||
networks:
|
||||
dawarich:
|
||||
services:
|
||||
dawarich_redis:
|
||||
image: redis:7.4-alpine
|
||||
container_name: dawarich_redis
|
||||
command: redis-server
|
||||
networks:
|
||||
- dawarich
|
||||
volumes:
|
||||
- dawarich_shared:/data
|
||||
restart: always
|
||||
healthcheck:
|
||||
test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ]
|
||||
interval: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
dawarich_db:
|
||||
image: postgis/postgis:17-3.5-alpine
|
||||
shm_size: 1G
|
||||
|
|
@ -31,7 +46,6 @@ services:
|
|||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
- dawarich_db_data:/dawarich_db_data
|
||||
- dawarich_sqlite_data:/dawarich_sqlite_data
|
||||
networks:
|
||||
- dawarich
|
||||
ports:
|
||||
|
|
@ -44,19 +58,11 @@ services:
|
|||
restart: on-failure
|
||||
environment:
|
||||
RAILS_ENV: development
|
||||
REDIS_URL: redis://dawarich_redis:6379
|
||||
DATABASE_HOST: dawarich_db
|
||||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_development
|
||||
# PostgreSQL database name for solid_queue
|
||||
QUEUE_DATABASE_NAME: dawarich_development_queue
|
||||
QUEUE_DATABASE_PASSWORD: password
|
||||
QUEUE_DATABASE_USERNAME: postgres
|
||||
QUEUE_DATABASE_HOST: dawarich_db
|
||||
QUEUE_DATABASE_PORT: 5432
|
||||
# SQLite database paths for cache and cable databases
|
||||
CACHE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_development_cache.sqlite3
|
||||
CABLE_DATABASE_PATH: /dawarich_sqlite_data/dawarich_development_cable.sqlite3
|
||||
MIN_MINUTES_SPENT_IN_CITY: 60
|
||||
APPLICATION_HOSTS: localhost
|
||||
TIME_ZONE: Europe/London
|
||||
|
|
@ -81,15 +87,67 @@ services:
|
|||
dawarich_db:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
dawarich_redis:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.50' # Limit CPU usage to 50% of one core
|
||||
memory: '4G' # Limit memory usage to 4GB
|
||||
dawarich_sidekiq:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_sidekiq
|
||||
volumes:
|
||||
- dawarich_public:/var/app/public
|
||||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
networks:
|
||||
- dawarich
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: sidekiq-entrypoint.sh
|
||||
command: ['sidekiq']
|
||||
restart: on-failure
|
||||
environment:
|
||||
RAILS_ENV: development
|
||||
REDIS_URL: redis://dawarich_redis:6379
|
||||
DATABASE_HOST: dawarich_db
|
||||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_development
|
||||
APPLICATION_HOSTS: localhost
|
||||
BACKGROUND_PROCESSING_CONCURRENCY: 10
|
||||
APPLICATION_PROTOCOL: http
|
||||
PROMETHEUS_EXPORTER_ENABLED: false
|
||||
PROMETHEUS_EXPORTER_HOST: dawarich_app
|
||||
PROMETHEUS_EXPORTER_PORT: 9394
|
||||
SELF_HOSTED: "true"
|
||||
STORE_GEODATA: "true"
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "100m"
|
||||
max-file: "5"
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pgrep -f sidekiq" ]
|
||||
interval: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
depends_on:
|
||||
dawarich_db:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
dawarich_redis:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
dawarich_app:
|
||||
condition: service_healthy
|
||||
restart: true
|
||||
|
||||
volumes:
|
||||
dawarich_db_data:
|
||||
dawarich_sqlite_data:
|
||||
dawarich_shared:
|
||||
dawarich_public:
|
||||
dawarich_watched:
|
||||
|
|
|
|||
36
docker/sidekiq-entrypoint.sh
Normal file
36
docker/sidekiq-entrypoint.sh
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/sh
|
||||
|
||||
unset BUNDLE_PATH
|
||||
unset BUNDLE_BIN
|
||||
|
||||
set -e
|
||||
|
||||
echo "⚠️ Starting Sidekiq in $RAILS_ENV environment ⚠️"
|
||||
|
||||
# Parse DATABASE_URL if present, otherwise use individual variables
|
||||
if [ -n "$DATABASE_URL" ]; then
|
||||
# Extract components from DATABASE_URL
|
||||
DATABASE_HOST=$(echo $DATABASE_URL | awk -F[@/] '{print $4}')
|
||||
DATABASE_PORT=$(echo $DATABASE_URL | awk -F[@/:] '{print $5}')
|
||||
DATABASE_USERNAME=$(echo $DATABASE_URL | awk -F[:/@] '{print $4}')
|
||||
DATABASE_PASSWORD=$(echo $DATABASE_URL | awk -F[:/@] '{print $5}')
|
||||
DATABASE_NAME=$(echo $DATABASE_URL | awk -F[@/] '{print $5}')
|
||||
else
|
||||
# Use existing environment variables
|
||||
DATABASE_HOST=${DATABASE_HOST}
|
||||
DATABASE_PORT=${DATABASE_PORT}
|
||||
DATABASE_USERNAME=${DATABASE_USERNAME}
|
||||
DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
DATABASE_NAME=${DATABASE_NAME}
|
||||
fi
|
||||
|
||||
# Wait for the database to become available
|
||||
echo "⏳ Waiting for database to be ready..."
|
||||
until PGPASSWORD=$DATABASE_PASSWORD psql -h "$DATABASE_HOST" -p "$DATABASE_PORT" -U "$DATABASE_USERNAME" -d "$DATABASE_NAME" -c '\q'; do
|
||||
>&2 echo "Postgres is unavailable - retrying..."
|
||||
sleep 2
|
||||
done
|
||||
echo "✅ PostgreSQL is ready!"
|
||||
|
||||
# run sidekiq
|
||||
bundle exec sidekiq
|
||||
|
|
@ -31,19 +31,6 @@ export DATABASE_USERNAME
|
|||
export DATABASE_PASSWORD
|
||||
export DATABASE_NAME
|
||||
|
||||
# Set queue database name and connection parameters with defaults
|
||||
QUEUE_DATABASE_NAME=${QUEUE_DATABASE_NAME:-"${DATABASE_NAME}_queue"}
|
||||
QUEUE_DATABASE_PASSWORD=${QUEUE_DATABASE_PASSWORD:-"$DATABASE_PASSWORD"}
|
||||
QUEUE_DATABASE_USERNAME=${QUEUE_DATABASE_USERNAME:-"$DATABASE_USERNAME"}
|
||||
QUEUE_DATABASE_PORT=${QUEUE_DATABASE_PORT:-"$DATABASE_PORT"}
|
||||
QUEUE_DATABASE_HOST=${QUEUE_DATABASE_HOST:-"$DATABASE_HOST"}
|
||||
|
||||
export QUEUE_DATABASE_NAME
|
||||
export QUEUE_DATABASE_PASSWORD
|
||||
export QUEUE_DATABASE_USERNAME
|
||||
export QUEUE_DATABASE_PORT
|
||||
export QUEUE_DATABASE_HOST
|
||||
|
||||
# Remove pre-existing puma/passenger server.pid
|
||||
rm -f $APP_PATH/tmp/pids/server.pid
|
||||
|
||||
|
|
@ -67,34 +54,12 @@ create_database() {
|
|||
echo "✅ PostgreSQL database $db_name is ready!"
|
||||
}
|
||||
|
||||
# Set up SQLite database directory in the volume for cache and cable
|
||||
SQLITE_DB_DIR="/dawarich_sqlite_data"
|
||||
mkdir -p $SQLITE_DB_DIR
|
||||
echo "Created SQLite database directory at $SQLITE_DB_DIR"
|
||||
|
||||
# Step 1: Database Setup
|
||||
echo "Setting up all required databases..."
|
||||
|
||||
# Create primary PostgreSQL database
|
||||
create_database "$DATABASE_NAME" "$DATABASE_PASSWORD" "$DATABASE_HOST" "$DATABASE_PORT" "$DATABASE_USERNAME"
|
||||
|
||||
# Create PostgreSQL queue database for solid_queue
|
||||
create_database "$QUEUE_DATABASE_NAME" "$QUEUE_DATABASE_PASSWORD" "$QUEUE_DATABASE_HOST" "$QUEUE_DATABASE_PORT" "$QUEUE_DATABASE_USERNAME"
|
||||
|
||||
# Setup SQLite databases for cache and cable
|
||||
|
||||
# Setup Cache database with SQLite
|
||||
CACHE_DATABASE_PATH=${CACHE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cache.sqlite3"}
|
||||
export CACHE_DATABASE_PATH
|
||||
echo "✅ SQLite cache database configured at $CACHE_DATABASE_PATH"
|
||||
|
||||
# Setup Cable database with SQLite (only for production and staging)
|
||||
if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then
|
||||
CABLE_DATABASE_PATH=${CABLE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cable.sqlite3"}
|
||||
export CABLE_DATABASE_PATH
|
||||
echo "✅ SQLite cable database configured at $CABLE_DATABASE_PATH"
|
||||
fi
|
||||
|
||||
# Step 2: Run migrations for all databases
|
||||
echo "Running migrations for all databases..."
|
||||
|
||||
|
|
@ -102,20 +67,6 @@ echo "Running migrations for all databases..."
|
|||
echo "Running primary database migrations..."
|
||||
bundle exec rails db:migrate
|
||||
|
||||
# Run PostgreSQL queue database migrations
|
||||
echo "Running queue database migrations..."
|
||||
bundle exec rails db:migrate:queue
|
||||
|
||||
# Run SQLite database migrations
|
||||
echo "Running cache database migrations..."
|
||||
bundle exec rails db:migrate:cache
|
||||
|
||||
# Run cable migrations for production/staging
|
||||
if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then
|
||||
echo "Running cable database migrations..."
|
||||
bundle exec rails db:migrate:cable
|
||||
fi
|
||||
|
||||
# Run data migrations
|
||||
echo "Running DATA migrations..."
|
||||
bundle exec rake data:migrate
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
- Kubernetes cluster and basic kubectl knowledge.
|
||||
- Some persistent storage class prepared, in this example, Longhorn.
|
||||
- Working Postgres instance. In this example Postgres lives in 'db' namespace.
|
||||
- Working Postgres and Redis instances. In this example Postgres lives in 'db' namespace and Redis in 'redis' namespace.
|
||||
- Ngingx ingress controller with Letsencrypt integeation.
|
||||
- This example uses 'example.com' as a domain name, you want to change it to your own.
|
||||
- This will work on IPv4 and IPv6 Single Stack clusters, as well as Dual Stack deployments.
|
||||
|
|
@ -80,6 +80,8 @@ spec:
|
|||
value: "Europe/Prague"
|
||||
- name: RAILS_ENV
|
||||
value: development
|
||||
- name: REDIS_URL
|
||||
value: redis://redis-master.redis.svc.cluster.local:6379/10
|
||||
- name: DATABASE_HOST
|
||||
value: postgres-postgresql.db.svc.cluster.local
|
||||
- name: DATABASE_PORT
|
||||
|
|
@ -126,10 +128,73 @@ spec:
|
|||
cpu: "2000m"
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
- name: dawarich-sidekiq
|
||||
env:
|
||||
- name: RAILS_ENV
|
||||
value: development
|
||||
- name: REDIS_URL
|
||||
value: redis://redis-master.redis.svc.cluster.local:6379/10
|
||||
- name: DATABASE_HOST
|
||||
value: postgres-postgresql.db.svc.cluster.local
|
||||
- name: DATABASE_PORT
|
||||
value: "5432"
|
||||
- name: DATABASE_USERNAME
|
||||
value: postgres
|
||||
- name: DATABASE_PASSWORD
|
||||
value: Password123!
|
||||
- name: DATABASE_NAME
|
||||
value: dawarich_development
|
||||
- name: RAILS_MIN_THREADS
|
||||
value: "5"
|
||||
- name: RAILS_MAX_THREADS
|
||||
value: "10"
|
||||
- name: BACKGROUND_PROCESSING_CONCURRENCY
|
||||
value: "20"
|
||||
- name: APPLICATION_HOST
|
||||
value: localhost
|
||||
- name: APPLICATION_HOSTS
|
||||
value: "dawarich.example.com, localhost"
|
||||
- name: APPLICATION_PROTOCOL
|
||||
value: http
|
||||
- name: PHOTON_API_HOST
|
||||
value: photon.komoot.io
|
||||
- name: PHOTON_API_USE_HTTPS
|
||||
value: "true"
|
||||
image: freikin/dawarich:latest
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /var/app/public
|
||||
name: public
|
||||
- mountPath: /var/app/tmp/imports/watched
|
||||
name: watched
|
||||
command:
|
||||
- "sidekiq-entrypoint.sh"
|
||||
args:
|
||||
- "bundle exec sidekiq"
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "3Gi"
|
||||
cpu: "1500m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/v1/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 3000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 3
|
||||
volumes:
|
||||
- name: gem-cache
|
||||
persistentVolumeClaim:
|
||||
claimName: gem-cache
|
||||
- name: public
|
||||
persistentVolumeClaim:
|
||||
claimName: public
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ If you don't want to use dedicated share for projects installed by docker skip i
|
|||
### Dawarich root folder
|
||||
1. Open your [Docker root folder](#docker-root-share) in **File station**.
|
||||
2. Create new folder **dawarich** and open it.
|
||||
3. Create folders **db_data**, **db_shared** and **public** in **dawarich** folder.
|
||||
3. Create folders **redis**, **db_data**, **db_shared** and **public** in **dawarich** folder.
|
||||
4. Copy [docker compose](synology/docker-compose.yml) and [.env](synology/.env) files form **synology** repo folder into **dawarich** folder on your synology.
|
||||
|
||||
# Installation
|
||||
|
|
|
|||
|
|
@ -17,6 +17,17 @@ dawarich_app:
|
|||
APPLICATION_HOSTS: "yourhost.com,www.yourhost.com,127.0.0.1" <-- Edit this
|
||||
```
|
||||
|
||||
```yaml
|
||||
dawarich_sidekiq:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_sidekiq
|
||||
...
|
||||
environment:
|
||||
...
|
||||
APPLICATION_HOSTS: "yourhost.com,www.yourhost.com,127.0.0.1" <-- Edit this
|
||||
...
|
||||
```
|
||||
|
||||
For a Synology install, refer to **[Synology Install Tutorial](How_to_install_Dawarich_on_Synology.md)**. In this page, it is explained how to set the APPLICATION_HOSTS environment variable.
|
||||
|
||||
### Virtual Host
|
||||
|
|
|
|||
|
|
@ -1,6 +1,13 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
dawarich_redis:
|
||||
image: redis:7.4-alpine
|
||||
container_name: dawarich_redis
|
||||
command: redis-server
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./redis:/var/shared/redis
|
||||
dawarich_db:
|
||||
image: postgis/postgis:17-3.5-alpine
|
||||
container_name: dawarich_db
|
||||
|
|
@ -17,6 +24,7 @@ services:
|
|||
container_name: dawarich_app
|
||||
depends_on:
|
||||
- dawarich_db
|
||||
- dawarich_redis
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: web-entrypoint.sh
|
||||
|
|
@ -29,3 +37,19 @@ services:
|
|||
- ./app_storage:/var/app/storage
|
||||
ports:
|
||||
- 32568:3000
|
||||
|
||||
dawarich_sidekiq:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_sidekiq
|
||||
depends_on:
|
||||
- dawarich_db
|
||||
- dawarich_redis
|
||||
- dawarich_app
|
||||
entrypoint: sidekiq-entrypoint.sh
|
||||
command: ['sidekiq']
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./public:/var/app/public
|
||||
- ./app_storage:/var/app/storage
|
||||
|
|
|
|||
|
|
@ -8,9 +8,11 @@ RSpec.describe AreaVisitsCalculatingJob, type: :job do
|
|||
let(:area) { create(:area, user:) }
|
||||
|
||||
it 'calls the AreaVisitsCalculationService' do
|
||||
expect(Areas::Visits::Create).to receive(:new).with(user, [area]).and_call_original
|
||||
Sidekiq::Testing.inline! do
|
||||
expect(Areas::Visits::Create).to receive(:new).with(user, [area]).and_call_original
|
||||
|
||||
described_class.new.perform(user.id)
|
||||
described_class.new.perform(user.id)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ require_relative '../config/environment'
|
|||
abort('The Rails environment is running in production mode!') if Rails.env.production?
|
||||
require 'rspec/rails'
|
||||
require 'rswag/specs'
|
||||
require 'sidekiq/testing'
|
||||
require 'super_diff/rspec-rails'
|
||||
|
||||
require 'rake'
|
||||
|
|
|
|||
125
spec/requests/sidekiq_spec.rb
Normal file
125
spec/requests/sidekiq_spec.rb
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'rails_helper'
|
||||
require 'sidekiq/web'
|
||||
|
||||
RSpec.describe '/sidekiq', type: :request do
|
||||
before do
|
||||
# Allow any ENV key to be accessed and return nil by default
|
||||
allow(ENV).to receive(:[]).and_return(nil)
|
||||
|
||||
# Stub Sidekiq::Web with a simple Rack app for testing
|
||||
allow(Sidekiq::Web).to receive(:call) do |_env|
|
||||
[200, { 'Content-Type' => 'text/html' }, ['Sidekiq Web UI']]
|
||||
end
|
||||
end
|
||||
|
||||
context 'when Dawarich is in self-hosted mode' do
|
||||
before do
|
||||
allow(DawarichSettings).to receive(:self_hosted?).and_return(true)
|
||||
allow(ENV).to receive(:[]).with('SIDEKIQ_USERNAME').and_return(nil)
|
||||
allow(ENV).to receive(:[]).with('SIDEKIQ_PASSWORD').and_return(nil)
|
||||
end
|
||||
|
||||
context 'when user is not authenticated' do
|
||||
it 'redirects to sign in page' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to redirect_to('/users/sign_in')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is authenticated' do
|
||||
context 'when user is not admin' do
|
||||
before { sign_in create(:user) }
|
||||
|
||||
it 'redirects to root page' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to redirect_to(root_url)
|
||||
end
|
||||
|
||||
it 'shows flash message' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(flash[:error]).to eq('You are not authorized to perform this action.')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is admin' do
|
||||
before { sign_in create(:user, :admin) }
|
||||
|
||||
it 'renders a successful response' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to be_successful
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when Dawarich is not in self-hosted mode' do
|
||||
before do
|
||||
allow(DawarichSettings).to receive(:self_hosted?).and_return(false)
|
||||
allow(ENV).to receive(:[]).with('SIDEKIQ_USERNAME').and_return(nil)
|
||||
allow(ENV).to receive(:[]).with('SIDEKIQ_PASSWORD').and_return(nil)
|
||||
Rails.application.reload_routes!
|
||||
end
|
||||
|
||||
context 'when user is not authenticated' do
|
||||
it 'redirects to sign in page' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to redirect_to('/users/sign_in')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is authenticated' do
|
||||
before { sign_in create(:user, :admin) }
|
||||
|
||||
it 'redirects to root page' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to redirect_to(root_url)
|
||||
expect(flash[:error]).to eq('You are not authorized to perform this action.')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'when SIDEKIQ_USERNAME and SIDEKIQ_PASSWORD are set' do
|
||||
before do
|
||||
allow(DawarichSettings).to receive(:self_hosted?).and_return(false)
|
||||
allow(ENV).to receive(:[]).with('SIDEKIQ_USERNAME').and_return('admin')
|
||||
allow(ENV).to receive(:[]).with('SIDEKIQ_PASSWORD').and_return('password')
|
||||
end
|
||||
|
||||
context 'when user is not authenticated' do
|
||||
it 'redirects to sign in page' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to redirect_to('/users/sign_in')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is not admin' do
|
||||
before { sign_in create(:user) }
|
||||
|
||||
it 'redirects to root page' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to redirect_to(root_url)
|
||||
expect(flash[:error]).to eq('You are not authorized to perform this action.')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user is admin' do
|
||||
before { sign_in create(:user, :admin) }
|
||||
|
||||
it 'renders a successful response' do
|
||||
get sidekiq_url
|
||||
|
||||
expect(response).to be_successful
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -55,12 +55,16 @@ RSpec.describe Imports::Create do
|
|||
|
||||
context 'when import is successful' do
|
||||
it 'schedules stats creating' do
|
||||
expect { service.call }.to \
|
||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||
Sidekiq::Testing.inline! do
|
||||
expect { service.call }.to \
|
||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||
end
|
||||
end
|
||||
|
||||
it 'schedules visit suggesting' do
|
||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||
Sidekiq::Testing.inline! do
|
||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -9,9 +9,12 @@ RSpec.describe Imports::Watcher do
|
|||
let(:watched_dir_path) { Rails.root.join('spec/fixtures/files/watched') }
|
||||
|
||||
before do
|
||||
Sidekiq::Testing.inline!
|
||||
stub_const('Imports::Watcher::WATCHED_DIR_PATH', watched_dir_path)
|
||||
end
|
||||
|
||||
after { Sidekiq::Testing.fake! }
|
||||
|
||||
context 'when user exists' do
|
||||
let!(:user) { create(:user, email: 'user@domain.com') }
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue