mirror of
https://github.com/Freika/dawarich.git
synced 2026-01-09 08:47:11 -05:00
Remove solid trifecta
This commit is contained in:
parent
b76602d9c8
commit
767629b21e
19 changed files with 29 additions and 450 deletions
5
Gemfile
5
Gemfile
|
|
@ -20,7 +20,6 @@ gem 'httparty'
|
||||||
gem 'importmap-rails'
|
gem 'importmap-rails'
|
||||||
gem 'kaminari'
|
gem 'kaminari'
|
||||||
gem 'lograge'
|
gem 'lograge'
|
||||||
gem 'mission_control-jobs'
|
|
||||||
gem 'oj'
|
gem 'oj'
|
||||||
gem 'pg'
|
gem 'pg'
|
||||||
gem 'prometheus_exporter'
|
gem 'prometheus_exporter'
|
||||||
|
|
@ -37,7 +36,6 @@ gem 'rswag-api'
|
||||||
gem 'rswag-ui'
|
gem 'rswag-ui'
|
||||||
gem 'sentry-ruby'
|
gem 'sentry-ruby'
|
||||||
gem 'sentry-rails'
|
gem 'sentry-rails'
|
||||||
gem 'sqlite3', '~> 2.6'
|
|
||||||
gem 'stackprof'
|
gem 'stackprof'
|
||||||
gem 'sidekiq'
|
gem 'sidekiq'
|
||||||
gem 'sidekiq-cron'
|
gem 'sidekiq-cron'
|
||||||
|
|
@ -45,9 +43,6 @@ gem 'sidekiq-limit_fetch'
|
||||||
gem 'sprockets-rails'
|
gem 'sprockets-rails'
|
||||||
gem 'stimulus-rails'
|
gem 'stimulus-rails'
|
||||||
gem 'strong_migrations'
|
gem 'strong_migrations'
|
||||||
gem 'solid_cable', '~> 3.0'
|
|
||||||
gem 'solid_cache', '1.0.7'
|
|
||||||
gem 'solid_queue', '~> 1.1'
|
|
||||||
gem 'tailwindcss-rails'
|
gem 'tailwindcss-rails'
|
||||||
gem 'turbo-rails'
|
gem 'turbo-rails'
|
||||||
gem 'tzinfo-data', platforms: %i[mingw mswin x64_mingw jruby]
|
gem 'tzinfo-data', platforms: %i[mingw mswin x64_mingw jruby]
|
||||||
|
|
|
||||||
2
Procfile
2
Procfile
|
|
@ -1,2 +1,2 @@
|
||||||
web: bundle exec puma -C config/puma.rb
|
web: bundle exec puma -C config/puma.rb
|
||||||
worker: bundle exec bin/jobs
|
worker: bundle exec sidekiq -C config/sidekiq.yml
|
||||||
|
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
# frozen_string_literal: true
|
|
||||||
|
|
||||||
class Jobs::CleanFinishedJob < ApplicationJob
|
|
||||||
queue_as :default
|
|
||||||
|
|
||||||
def perform
|
|
||||||
SolidQueue::Job.clear_finished_in_batches
|
|
||||||
end
|
|
||||||
end
|
|
||||||
6
bin/jobs
6
bin/jobs
|
|
@ -1,6 +0,0 @@
|
||||||
#!/usr/bin/env ruby
|
|
||||||
|
|
||||||
require_relative "../config/environment"
|
|
||||||
require "solid_queue/cli"
|
|
||||||
|
|
||||||
SolidQueue::Cli.start(ARGV)
|
|
||||||
|
|
@ -1,21 +1,11 @@
|
||||||
# Async adapter only works within the same process, so for manually triggering cable updates from a console,
|
|
||||||
# and seeing results in the browser, you must do so from the web console (running inside the dev process),
|
|
||||||
# not a terminal started via bin/rails console! Add "console" to any action or any ERB template view
|
|
||||||
# to make the web console appear.
|
|
||||||
|
|
||||||
default: &default
|
|
||||||
adapter: solid_cable
|
|
||||||
connects_to:
|
|
||||||
database:
|
|
||||||
writing: cable
|
|
||||||
polling_interval: 0.1.seconds
|
|
||||||
message_retention: 1.day
|
|
||||||
|
|
||||||
development:
|
development:
|
||||||
<<: *default
|
adapter: redis
|
||||||
|
url: <%= ENV['REDIS_URL'] %>
|
||||||
|
|
||||||
test:
|
test:
|
||||||
adapter: test
|
adapter: test
|
||||||
|
|
||||||
production:
|
production:
|
||||||
<<: *default
|
adapter: redis
|
||||||
|
url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %>
|
||||||
|
channel_prefix: dawarich_production
|
||||||
|
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
default: &default
|
|
||||||
store_options:
|
|
||||||
# Cap age of oldest cache entry to fulfill retention policies
|
|
||||||
max_age: <%= 60.days.to_i %>
|
|
||||||
max_size: <%= 256.megabytes %>
|
|
||||||
namespace: <%= Rails.env %>
|
|
||||||
|
|
||||||
development:
|
|
||||||
<<: *default
|
|
||||||
|
|
||||||
test:
|
|
||||||
<<: *default
|
|
||||||
|
|
||||||
production:
|
|
||||||
<<: *default
|
|
||||||
|
|
@ -9,85 +9,18 @@ default: &default
|
||||||
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
||||||
timeout: 5000
|
timeout: 5000
|
||||||
|
|
||||||
sqlite_default: &sqlite_default
|
|
||||||
adapter: sqlite3
|
|
||||||
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
|
||||||
timeout: 5000
|
|
||||||
|
|
||||||
development:
|
development:
|
||||||
primary:
|
<<: *default
|
||||||
<<: *default
|
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
|
||||||
queue:
|
|
||||||
<<: *default
|
|
||||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_development_queue' %>
|
|
||||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
|
||||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
|
||||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
|
||||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
|
||||||
migrations_paths: db/queue_migrate
|
|
||||||
cache:
|
|
||||||
<<: *sqlite_default
|
|
||||||
database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %>
|
|
||||||
migrations_paths: db/cache_migrate
|
|
||||||
cable:
|
|
||||||
<<: *sqlite_default
|
|
||||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
|
||||||
migrations_paths: db/cable_migrate
|
|
||||||
|
|
||||||
test:
|
test:
|
||||||
primary:
|
<<: *default
|
||||||
<<: *default
|
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
|
||||||
password: <%= ENV['DATABASE_PASSWORD'] %>
|
|
||||||
queue:
|
|
||||||
<<: *default
|
|
||||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_test_queue' %>
|
|
||||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
|
||||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
|
||||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
|
||||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
|
||||||
migrations_paths: db/queue_migrate
|
|
||||||
|
|
||||||
production:
|
production:
|
||||||
primary:
|
<<: *default
|
||||||
<<: *default
|
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
|
||||||
queue:
|
|
||||||
<<: *default
|
|
||||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_production_queue' %>
|
|
||||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
|
||||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
|
||||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
|
||||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
|
||||||
migrations_paths: db/queue_migrate
|
|
||||||
cable:
|
|
||||||
<<: *sqlite_default
|
|
||||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
|
||||||
migrations_paths: db/cable_migrate
|
|
||||||
cache:
|
|
||||||
<<: *sqlite_default
|
|
||||||
database: <%= ENV['CACHE_DATABASE_PATH'] %>
|
|
||||||
migrations_paths: db/cache_migrate
|
|
||||||
|
|
||||||
staging:
|
staging:
|
||||||
primary:
|
<<: *default
|
||||||
<<: *default
|
database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %>
|
||||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %>
|
|
||||||
password: <%= ENV['DATABASE_PASSWORD'] %>
|
|
||||||
queue:
|
|
||||||
<<: *default
|
|
||||||
database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_staging_queue' %>
|
|
||||||
password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %>
|
|
||||||
username: <%= ENV['QUEUE_DATABASE_USERNAME'] %>
|
|
||||||
port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %>
|
|
||||||
host: <%= ENV['QUEUE_DATABASE_HOST'] %>
|
|
||||||
migrations_paths: db/queue_migrate
|
|
||||||
cache:
|
|
||||||
<<: *sqlite_default
|
|
||||||
database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %>
|
|
||||||
migrations_paths: db/cache_migrate
|
|
||||||
cable:
|
|
||||||
<<: *sqlite_default
|
|
||||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
|
||||||
migrations_paths: db/cable_migrate
|
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,7 @@ Rails.application.configure do
|
||||||
|
|
||||||
# Enable/disable caching. By default caching is disabled.
|
# Enable/disable caching. By default caching is disabled.
|
||||||
# Run rails dev:cache to toggle caching.
|
# Run rails dev:cache to toggle caching.
|
||||||
config.cache_store = :solid_cache_store
|
config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] }
|
||||||
config.solid_cache.connects_to = { database: { writing: :cache } }
|
|
||||||
|
|
||||||
if Rails.root.join('tmp/caching-dev.txt').exist?
|
if Rails.root.join('tmp/caching-dev.txt').exist?
|
||||||
config.action_controller.perform_caching = true
|
config.action_controller.perform_caching = true
|
||||||
|
|
|
||||||
|
|
@ -73,15 +73,10 @@ Rails.application.configure do
|
||||||
config.log_level = ENV.fetch('RAILS_LOG_LEVEL', 'info')
|
config.log_level = ENV.fetch('RAILS_LOG_LEVEL', 'info')
|
||||||
|
|
||||||
# Use a different cache store in production.
|
# Use a different cache store in production.
|
||||||
config.cache_store = :solid_cache_store
|
config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] }
|
||||||
config.solid_cache.connects_to = { database: { writing: :cache } }
|
|
||||||
|
|
||||||
# Use a real queuing backend for Active Job (and separate queues per environment).
|
# Use a real queuing backend for Active Job (and separate queues per environment).
|
||||||
config.active_job.queue_adapter = :solid_queue
|
config.active_job.queue_adapter = :sidekiq
|
||||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
|
||||||
config.solid_queue.silence_polling = true
|
|
||||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
|
||||||
# config.active_job.queue_name_prefix = "dawarich_production"
|
|
||||||
|
|
||||||
config.action_mailer.perform_caching = false
|
config.action_mailer.perform_caching = false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,15 +43,6 @@ preload_app!
|
||||||
# Allow puma to be restarted by `bin/rails restart` command.
|
# Allow puma to be restarted by `bin/rails restart` command.
|
||||||
plugin :tmp_restart
|
plugin :tmp_restart
|
||||||
|
|
||||||
# If env var is set or we're in development, solid_queue will run in puma
|
|
||||||
if ENV['SOLID_QUEUE_IN_PUMA'] || ENV.fetch('RAILS_ENV', 'development') == 'development'
|
|
||||||
begin
|
|
||||||
plugin :solid_queue
|
|
||||||
rescue => e
|
|
||||||
puts "Failed to load solid_queue plugin: #{e.message}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Prometheus exporter
|
# Prometheus exporter
|
||||||
if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
||||||
require 'prometheus_exporter/instrumentation'
|
require 'prometheus_exporter/instrumentation'
|
||||||
|
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
|
|
||||||
default: &default
|
|
||||||
dispatchers:
|
|
||||||
- polling_interval: 1
|
|
||||||
batch_size: 500
|
|
||||||
workers:
|
|
||||||
- queues: "*"
|
|
||||||
threads: 3
|
|
||||||
processes: <%= ENV['BACKGROUND_PROCESSING_CONCURRENCY'] || ENV.fetch("JOB_CONCURRENCY", 10) %>
|
|
||||||
polling_interval: 2
|
|
||||||
- queues: imports
|
|
||||||
threads: 5
|
|
||||||
processes: 1
|
|
||||||
polling_interval: 1
|
|
||||||
- queues: exports
|
|
||||||
threads: 5
|
|
||||||
processes: 1
|
|
||||||
polling_interval: 2
|
|
||||||
|
|
||||||
development:
|
|
||||||
<<: *default
|
|
||||||
|
|
||||||
test:
|
|
||||||
<<: *default
|
|
||||||
|
|
||||||
production:
|
|
||||||
<<: *default
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
periodic_cleanup:
|
|
||||||
class: "Jobs::CleanFinishedJob"
|
|
||||||
queue: default
|
|
||||||
schedule: every month
|
|
||||||
|
|
||||||
bulk_stats_calculating_job:
|
|
||||||
class: "BulkStatsCalculatingJob"
|
|
||||||
queue: stats
|
|
||||||
schedule: every hour
|
|
||||||
|
|
||||||
area_visits_calculation_scheduling_job:
|
|
||||||
class: "AreaVisitsCalculationSchedulingJob"
|
|
||||||
queue: visit_suggesting
|
|
||||||
schedule: every day at 0:00
|
|
||||||
|
|
||||||
visit_suggesting_job:
|
|
||||||
class: "BulkVisitsSuggestingJob"
|
|
||||||
queue: visit_suggesting
|
|
||||||
schedule: every day at 00:05
|
|
||||||
|
|
||||||
watcher_job:
|
|
||||||
class: "Import::WatcherJob"
|
|
||||||
queue: imports
|
|
||||||
schedule: every hour
|
|
||||||
|
|
||||||
app_version_checking_job:
|
|
||||||
class: "AppVersionCheckingJob"
|
|
||||||
queue: default
|
|
||||||
schedule: every 6 hours
|
|
||||||
|
|
||||||
cache_preheating_job:
|
|
||||||
class: "Cache::PreheatingJob"
|
|
||||||
queue: default
|
|
||||||
schedule: every day at 0:00
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
# This file is auto-generated from the current state of the database. Instead
|
|
||||||
# of editing this file, please use the migrations feature of Active Record to
|
|
||||||
# incrementally modify your database, and then regenerate this schema definition.
|
|
||||||
#
|
|
||||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
|
||||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
|
||||||
# be faster and is potentially less error prone than running all of your
|
|
||||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
|
||||||
# migrations use external dependencies or application code.
|
|
||||||
#
|
|
||||||
# It's strongly recommended that you check this file into your version control system.
|
|
||||||
|
|
||||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
|
||||||
create_table "solid_cable_messages", force: :cascade do |t|
|
|
||||||
t.binary "channel", null: false
|
|
||||||
t.binary "payload", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.bigint "channel_hash", null: false
|
|
||||||
t.index ["channel"], name: "index_solid_cable_messages_on_channel"
|
|
||||||
t.index ["channel_hash"], name: "index_solid_cable_messages_on_channel_hash"
|
|
||||||
t.index ["created_at"], name: "index_solid_cable_messages_on_created_at"
|
|
||||||
t.index ["id"], name: "index_solid_cable_messages_on_id", unique: true
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
# This file is auto-generated from the current state of the database. Instead
|
|
||||||
# of editing this file, please use the migrations feature of Active Record to
|
|
||||||
# incrementally modify your database, and then regenerate this schema definition.
|
|
||||||
#
|
|
||||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
|
||||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
|
||||||
# be faster and is potentially less error prone than running all of your
|
|
||||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
|
||||||
# migrations use external dependencies or application code.
|
|
||||||
#
|
|
||||||
# It's strongly recommended that you check this file into your version control system.
|
|
||||||
|
|
||||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
|
||||||
create_table "solid_cache_entries", force: :cascade do |t|
|
|
||||||
t.binary "key", null: false
|
|
||||||
t.binary "value", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.bigint "key_hash", null: false
|
|
||||||
t.integer "byte_size", null: false
|
|
||||||
t.index ["byte_size"], name: "index_solid_cache_entries_on_byte_size"
|
|
||||||
t.index ["key_hash", "byte_size"], name: "index_solid_cache_entries_on_key_hash_and_byte_size"
|
|
||||||
t.index ["key_hash"], name: "index_solid_cache_entries_on_key_hash", unique: true
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -1,143 +0,0 @@
|
||||||
# This file is auto-generated from the current state of the database. Instead
|
|
||||||
# of editing this file, please use the migrations feature of Active Record to
|
|
||||||
# incrementally modify your database, and then regenerate this schema definition.
|
|
||||||
#
|
|
||||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
|
||||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
|
||||||
# be faster and is potentially less error prone than running all of your
|
|
||||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
|
||||||
# migrations use external dependencies or application code.
|
|
||||||
#
|
|
||||||
# It's strongly recommended that you check this file into your version control system.
|
|
||||||
|
|
||||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
|
||||||
enable_extension "pg_catalog.plpgsql"
|
|
||||||
|
|
||||||
create_table "solid_queue_blocked_executions", force: :cascade do |t|
|
|
||||||
t.bigint "job_id", null: false
|
|
||||||
t.string "queue_name", null: false
|
|
||||||
t.integer "priority", default: 0, null: false
|
|
||||||
t.string "concurrency_key", null: false
|
|
||||||
t.datetime "expires_at", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["concurrency_key", "priority", "job_id"], name: "index_solid_queue_blocked_executions_for_release"
|
|
||||||
t.index ["expires_at", "concurrency_key"], name: "index_solid_queue_blocked_executions_for_maintenance"
|
|
||||||
t.index ["job_id"], name: "index_solid_queue_blocked_executions_on_job_id", unique: true
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_claimed_executions", force: :cascade do |t|
|
|
||||||
t.bigint "job_id", null: false
|
|
||||||
t.bigint "process_id"
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["job_id"], name: "index_solid_queue_claimed_executions_on_job_id", unique: true
|
|
||||||
t.index ["process_id", "job_id"], name: "index_solid_queue_claimed_executions_on_process_id_and_job_id"
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_failed_executions", force: :cascade do |t|
|
|
||||||
t.bigint "job_id", null: false
|
|
||||||
t.text "error"
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["job_id"], name: "index_solid_queue_failed_executions_on_job_id", unique: true
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_jobs", force: :cascade do |t|
|
|
||||||
t.string "queue_name", null: false
|
|
||||||
t.string "class_name", null: false
|
|
||||||
t.text "arguments"
|
|
||||||
t.integer "priority", default: 0, null: false
|
|
||||||
t.string "active_job_id"
|
|
||||||
t.datetime "scheduled_at"
|
|
||||||
t.datetime "finished_at"
|
|
||||||
t.string "concurrency_key"
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.datetime "updated_at", null: false
|
|
||||||
t.index ["active_job_id"], name: "index_solid_queue_jobs_on_active_job_id"
|
|
||||||
t.index ["class_name"], name: "index_solid_queue_jobs_on_class_name"
|
|
||||||
t.index ["finished_at"], name: "index_solid_queue_jobs_on_finished_at"
|
|
||||||
t.index ["queue_name", "finished_at"], name: "index_solid_queue_jobs_for_filtering"
|
|
||||||
t.index ["scheduled_at", "finished_at"], name: "index_solid_queue_jobs_for_alerting"
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_pauses", force: :cascade do |t|
|
|
||||||
t.string "queue_name", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["queue_name"], name: "index_solid_queue_pauses_on_queue_name", unique: true
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_processes", force: :cascade do |t|
|
|
||||||
t.string "kind", null: false
|
|
||||||
t.datetime "last_heartbeat_at", null: false
|
|
||||||
t.bigint "supervisor_id"
|
|
||||||
t.integer "pid", null: false
|
|
||||||
t.string "hostname"
|
|
||||||
t.text "metadata"
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.string "name", null: false
|
|
||||||
t.index ["last_heartbeat_at"], name: "index_solid_queue_processes_on_last_heartbeat_at"
|
|
||||||
t.index ["name", "supervisor_id"], name: "index_solid_queue_processes_on_name_and_supervisor_id", unique: true
|
|
||||||
t.index ["supervisor_id"], name: "index_solid_queue_processes_on_supervisor_id"
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_ready_executions", force: :cascade do |t|
|
|
||||||
t.bigint "job_id", null: false
|
|
||||||
t.string "queue_name", null: false
|
|
||||||
t.integer "priority", default: 0, null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["job_id"], name: "index_solid_queue_ready_executions_on_job_id", unique: true
|
|
||||||
t.index ["priority", "job_id"], name: "index_solid_queue_poll_all"
|
|
||||||
t.index ["queue_name", "priority", "job_id"], name: "index_solid_queue_poll_by_queue"
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_recurring_executions", force: :cascade do |t|
|
|
||||||
t.bigint "job_id", null: false
|
|
||||||
t.string "task_key", null: false
|
|
||||||
t.datetime "run_at", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["job_id"], name: "index_solid_queue_recurring_executions_on_job_id", unique: true
|
|
||||||
t.index ["task_key", "run_at"], name: "index_solid_queue_recurring_executions_on_task_key_and_run_at", unique: true
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_recurring_tasks", force: :cascade do |t|
|
|
||||||
t.string "key", null: false
|
|
||||||
t.string "schedule", null: false
|
|
||||||
t.string "command", limit: 2048
|
|
||||||
t.string "class_name"
|
|
||||||
t.text "arguments"
|
|
||||||
t.string "queue_name"
|
|
||||||
t.integer "priority", default: 0
|
|
||||||
t.boolean "static", default: true, null: false
|
|
||||||
t.text "description"
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.datetime "updated_at", null: false
|
|
||||||
t.index ["key"], name: "index_solid_queue_recurring_tasks_on_key", unique: true
|
|
||||||
t.index ["static"], name: "index_solid_queue_recurring_tasks_on_static"
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_scheduled_executions", force: :cascade do |t|
|
|
||||||
t.bigint "job_id", null: false
|
|
||||||
t.string "queue_name", null: false
|
|
||||||
t.integer "priority", default: 0, null: false
|
|
||||||
t.datetime "scheduled_at", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.index ["job_id"], name: "index_solid_queue_scheduled_executions_on_job_id", unique: true
|
|
||||||
t.index ["scheduled_at", "priority", "job_id"], name: "index_solid_queue_dispatch_all"
|
|
||||||
end
|
|
||||||
|
|
||||||
create_table "solid_queue_semaphores", force: :cascade do |t|
|
|
||||||
t.string "key", null: false
|
|
||||||
t.integer "value", default: 1, null: false
|
|
||||||
t.datetime "expires_at", null: false
|
|
||||||
t.datetime "created_at", null: false
|
|
||||||
t.datetime "updated_at", null: false
|
|
||||||
t.index ["expires_at"], name: "index_solid_queue_semaphores_on_expires_at"
|
|
||||||
t.index ["key", "value"], name: "index_solid_queue_semaphores_on_key_and_value"
|
|
||||||
t.index ["key"], name: "index_solid_queue_semaphores_on_key", unique: true
|
|
||||||
end
|
|
||||||
|
|
||||||
add_foreign_key "solid_queue_blocked_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
|
||||||
add_foreign_key "solid_queue_claimed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
|
||||||
add_foreign_key "solid_queue_failed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
|
||||||
add_foreign_key "solid_queue_ready_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
|
||||||
add_foreign_key "solid_queue_recurring_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
|
||||||
add_foreign_key "solid_queue_scheduled_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
|
||||||
end
|
|
||||||
|
|
@ -31,19 +31,6 @@ export DATABASE_USERNAME
|
||||||
export DATABASE_PASSWORD
|
export DATABASE_PASSWORD
|
||||||
export DATABASE_NAME
|
export DATABASE_NAME
|
||||||
|
|
||||||
# Set queue database name and connection parameters with defaults
|
|
||||||
QUEUE_DATABASE_NAME=${QUEUE_DATABASE_NAME:-"${DATABASE_NAME}_queue"}
|
|
||||||
QUEUE_DATABASE_PASSWORD=${QUEUE_DATABASE_PASSWORD:-"$DATABASE_PASSWORD"}
|
|
||||||
QUEUE_DATABASE_USERNAME=${QUEUE_DATABASE_USERNAME:-"$DATABASE_USERNAME"}
|
|
||||||
QUEUE_DATABASE_PORT=${QUEUE_DATABASE_PORT:-"$DATABASE_PORT"}
|
|
||||||
QUEUE_DATABASE_HOST=${QUEUE_DATABASE_HOST:-"$DATABASE_HOST"}
|
|
||||||
|
|
||||||
export QUEUE_DATABASE_NAME
|
|
||||||
export QUEUE_DATABASE_PASSWORD
|
|
||||||
export QUEUE_DATABASE_USERNAME
|
|
||||||
export QUEUE_DATABASE_PORT
|
|
||||||
export QUEUE_DATABASE_HOST
|
|
||||||
|
|
||||||
# Remove pre-existing puma/passenger server.pid
|
# Remove pre-existing puma/passenger server.pid
|
||||||
rm -f $APP_PATH/tmp/pids/server.pid
|
rm -f $APP_PATH/tmp/pids/server.pid
|
||||||
|
|
||||||
|
|
@ -67,34 +54,12 @@ create_database() {
|
||||||
echo "✅ PostgreSQL database $db_name is ready!"
|
echo "✅ PostgreSQL database $db_name is ready!"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Set up SQLite database directory in the volume for cache and cable
|
|
||||||
SQLITE_DB_DIR="/dawarich_sqlite_data"
|
|
||||||
mkdir -p $SQLITE_DB_DIR
|
|
||||||
echo "Created SQLite database directory at $SQLITE_DB_DIR"
|
|
||||||
|
|
||||||
# Step 1: Database Setup
|
# Step 1: Database Setup
|
||||||
echo "Setting up all required databases..."
|
echo "Setting up all required databases..."
|
||||||
|
|
||||||
# Create primary PostgreSQL database
|
# Create primary PostgreSQL database
|
||||||
create_database "$DATABASE_NAME" "$DATABASE_PASSWORD" "$DATABASE_HOST" "$DATABASE_PORT" "$DATABASE_USERNAME"
|
create_database "$DATABASE_NAME" "$DATABASE_PASSWORD" "$DATABASE_HOST" "$DATABASE_PORT" "$DATABASE_USERNAME"
|
||||||
|
|
||||||
# Create PostgreSQL queue database for solid_queue
|
|
||||||
create_database "$QUEUE_DATABASE_NAME" "$QUEUE_DATABASE_PASSWORD" "$QUEUE_DATABASE_HOST" "$QUEUE_DATABASE_PORT" "$QUEUE_DATABASE_USERNAME"
|
|
||||||
|
|
||||||
# Setup SQLite databases for cache and cable
|
|
||||||
|
|
||||||
# Setup Cache database with SQLite
|
|
||||||
CACHE_DATABASE_PATH=${CACHE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cache.sqlite3"}
|
|
||||||
export CACHE_DATABASE_PATH
|
|
||||||
echo "✅ SQLite cache database configured at $CACHE_DATABASE_PATH"
|
|
||||||
|
|
||||||
# Setup Cable database with SQLite (only for production and staging)
|
|
||||||
if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then
|
|
||||||
CABLE_DATABASE_PATH=${CABLE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cable.sqlite3"}
|
|
||||||
export CABLE_DATABASE_PATH
|
|
||||||
echo "✅ SQLite cable database configured at $CABLE_DATABASE_PATH"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Step 2: Run migrations for all databases
|
# Step 2: Run migrations for all databases
|
||||||
echo "Running migrations for all databases..."
|
echo "Running migrations for all databases..."
|
||||||
|
|
||||||
|
|
@ -102,20 +67,6 @@ echo "Running migrations for all databases..."
|
||||||
echo "Running primary database migrations..."
|
echo "Running primary database migrations..."
|
||||||
bundle exec rails db:migrate
|
bundle exec rails db:migrate
|
||||||
|
|
||||||
# Run PostgreSQL queue database migrations
|
|
||||||
echo "Running queue database migrations..."
|
|
||||||
bundle exec rails db:migrate:queue
|
|
||||||
|
|
||||||
# Run SQLite database migrations
|
|
||||||
echo "Running cache database migrations..."
|
|
||||||
bundle exec rails db:migrate:cache
|
|
||||||
|
|
||||||
# Run cable migrations for production/staging
|
|
||||||
if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then
|
|
||||||
echo "Running cable database migrations..."
|
|
||||||
bundle exec rails db:migrate:cable
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Run data migrations
|
# Run data migrations
|
||||||
echo "Running DATA migrations..."
|
echo "Running DATA migrations..."
|
||||||
bundle exec rake data:migrate
|
bundle exec rake data:migrate
|
||||||
|
|
|
||||||
|
|
@ -8,9 +8,11 @@ RSpec.describe AreaVisitsCalculatingJob, type: :job do
|
||||||
let(:area) { create(:area, user:) }
|
let(:area) { create(:area, user:) }
|
||||||
|
|
||||||
it 'calls the AreaVisitsCalculationService' do
|
it 'calls the AreaVisitsCalculationService' do
|
||||||
expect(Areas::Visits::Create).to receive(:new).with(user, [area]).and_call_original
|
Sidekiq::Testing.inline! do
|
||||||
|
expect(Areas::Visits::Create).to receive(:new).with(user, [area]).and_call_original
|
||||||
|
|
||||||
described_class.new.perform(user.id)
|
described_class.new.perform(user.id)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -55,12 +55,16 @@ RSpec.describe Imports::Create do
|
||||||
|
|
||||||
context 'when import is successful' do
|
context 'when import is successful' do
|
||||||
it 'schedules stats creating' do
|
it 'schedules stats creating' do
|
||||||
expect { service.call }.to \
|
Sidekiq::Testing.inline! do
|
||||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
expect { service.call }.to \
|
||||||
|
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'schedules visit suggesting' do
|
it 'schedules visit suggesting' do
|
||||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
Sidekiq::Testing.inline! do
|
||||||
|
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ RSpec.describe Imports::Watcher do
|
||||||
let(:watched_dir_path) { Rails.root.join('spec/fixtures/files/watched') }
|
let(:watched_dir_path) { Rails.root.join('spec/fixtures/files/watched') }
|
||||||
|
|
||||||
before do
|
before do
|
||||||
|
Sidekiq::Testing.inline!
|
||||||
stub_const('Imports::Watcher::WATCHED_DIR_PATH', watched_dir_path)
|
stub_const('Imports::Watcher::WATCHED_DIR_PATH', watched_dir_path)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue