diff --git a/Gemfile b/Gemfile index 688eb883..832d25cd 100644 --- a/Gemfile +++ b/Gemfile @@ -20,7 +20,6 @@ gem 'httparty' gem 'importmap-rails' gem 'kaminari' gem 'lograge' -gem 'mission_control-jobs' gem 'oj' gem 'pg' gem 'prometheus_exporter' @@ -37,7 +36,6 @@ gem 'rswag-api' gem 'rswag-ui' gem 'sentry-ruby' gem 'sentry-rails' -gem 'sqlite3', '~> 2.6' gem 'stackprof' gem 'sidekiq' gem 'sidekiq-cron' @@ -45,9 +43,6 @@ gem 'sidekiq-limit_fetch' gem 'sprockets-rails' gem 'stimulus-rails' gem 'strong_migrations' -gem 'solid_cable', '~> 3.0' -gem 'solid_cache', '1.0.7' -gem 'solid_queue', '~> 1.1' gem 'tailwindcss-rails' gem 'turbo-rails' gem 'tzinfo-data', platforms: %i[mingw mswin x64_mingw jruby] diff --git a/Procfile b/Procfile index d6f4d818..fd4fe014 100644 --- a/Procfile +++ b/Procfile @@ -1,2 +1,2 @@ web: bundle exec puma -C config/puma.rb -worker: bundle exec bin/jobs +worker: bundle exec sidekiq -C config/sidekiq.yml diff --git a/app/jobs/jobs/clean_finished_job.rb b/app/jobs/jobs/clean_finished_job.rb deleted file mode 100644 index c5fc2037..00000000 --- a/app/jobs/jobs/clean_finished_job.rb +++ /dev/null @@ -1,9 +0,0 @@ -# frozen_string_literal: true - -class Jobs::CleanFinishedJob < ApplicationJob - queue_as :default - - def perform - SolidQueue::Job.clear_finished_in_batches - end -end diff --git a/bin/jobs b/bin/jobs deleted file mode 100755 index dcf59f30..00000000 --- a/bin/jobs +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env ruby - -require_relative "../config/environment" -require "solid_queue/cli" - -SolidQueue::Cli.start(ARGV) diff --git a/config/cable.yml b/config/cable.yml index 7ca155ef..c3738c80 100644 --- a/config/cable.yml +++ b/config/cable.yml @@ -1,21 +1,11 @@ -# Async adapter only works within the same process, so for manually triggering cable updates from a console, -# and seeing results in the browser, you must do so from the web console (running inside the dev process), -# not a terminal started via bin/rails console! Add "console" to any action or any ERB template view -# to make the web console appear. - -default: &default - adapter: solid_cable - connects_to: - database: - writing: cable - polling_interval: 0.1.seconds - message_retention: 1.day - development: - <<: *default + adapter: redis + url: <%= ENV['REDIS_URL'] %> test: adapter: test production: - <<: *default + adapter: redis + url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %> + channel_prefix: dawarich_production diff --git a/config/cache.yml b/config/cache.yml deleted file mode 100644 index 040a2f5e..00000000 --- a/config/cache.yml +++ /dev/null @@ -1,15 +0,0 @@ -default: &default - store_options: - # Cap age of oldest cache entry to fulfill retention policies - max_age: <%= 60.days.to_i %> - max_size: <%= 256.megabytes %> - namespace: <%= Rails.env %> - -development: - <<: *default - -test: - <<: *default - -production: - <<: *default diff --git a/config/database.yml b/config/database.yml index f82b2d8a..374dfa53 100644 --- a/config/database.yml +++ b/config/database.yml @@ -9,85 +9,18 @@ default: &default pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %> timeout: 5000 -sqlite_default: &sqlite_default - adapter: sqlite3 - pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %> - timeout: 5000 - development: - primary: - <<: *default - database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %> - queue: - <<: *default - database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_development_queue' %> - password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %> - username: <%= ENV['QUEUE_DATABASE_USERNAME'] %> - port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %> - host: <%= ENV['QUEUE_DATABASE_HOST'] %> - migrations_paths: db/queue_migrate - cache: - <<: *sqlite_default - database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %> - migrations_paths: db/cache_migrate - cable: - <<: *sqlite_default - database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %> - migrations_paths: db/cable_migrate + <<: *default + database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %> test: - primary: - <<: *default - database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %> - password: <%= ENV['DATABASE_PASSWORD'] %> - queue: - <<: *default - database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_test_queue' %> - password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %> - username: <%= ENV['QUEUE_DATABASE_USERNAME'] %> - port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %> - host: <%= ENV['QUEUE_DATABASE_HOST'] %> - migrations_paths: db/queue_migrate + <<: *default + database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %> production: - primary: - <<: *default - database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %> - queue: - <<: *default - database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_production_queue' %> - password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %> - username: <%= ENV['QUEUE_DATABASE_USERNAME'] %> - port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %> - host: <%= ENV['QUEUE_DATABASE_HOST'] %> - migrations_paths: db/queue_migrate - cable: - <<: *sqlite_default - database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %> - migrations_paths: db/cable_migrate - cache: - <<: *sqlite_default - database: <%= ENV['CACHE_DATABASE_PATH'] %> - migrations_paths: db/cache_migrate + <<: *default + database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %> staging: - primary: - <<: *default - database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %> - password: <%= ENV['DATABASE_PASSWORD'] %> - queue: - <<: *default - database: <%= ENV['QUEUE_DATABASE_NAME'] || 'dawarich_staging_queue' %> - password: <%= ENV['QUEUE_DATABASE_PASSWORD'] %> - username: <%= ENV['QUEUE_DATABASE_USERNAME'] %> - port: <%= ENV['QUEUE_DATABASE_PORT'] || '5432' %> - host: <%= ENV['QUEUE_DATABASE_HOST'] %> - migrations_paths: db/queue_migrate - cache: - <<: *sqlite_default - database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %> - migrations_paths: db/cache_migrate - cable: - <<: *sqlite_default - database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %> - migrations_paths: db/cable_migrate + <<: *default + database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %> diff --git a/config/environments/development.rb b/config/environments/development.rb index 1ee6dff5..dfad5b2f 100644 --- a/config/environments/development.rb +++ b/config/environments/development.rb @@ -26,8 +26,7 @@ Rails.application.configure do # Enable/disable caching. By default caching is disabled. # Run rails dev:cache to toggle caching. - config.cache_store = :solid_cache_store - config.solid_cache.connects_to = { database: { writing: :cache } } + config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] } if Rails.root.join('tmp/caching-dev.txt').exist? config.action_controller.perform_caching = true diff --git a/config/environments/production.rb b/config/environments/production.rb index aac8634c..26913253 100644 --- a/config/environments/production.rb +++ b/config/environments/production.rb @@ -73,15 +73,10 @@ Rails.application.configure do config.log_level = ENV.fetch('RAILS_LOG_LEVEL', 'info') # Use a different cache store in production. - config.cache_store = :solid_cache_store - config.solid_cache.connects_to = { database: { writing: :cache } } + config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] } # Use a real queuing backend for Active Job (and separate queues per environment). - config.active_job.queue_adapter = :solid_queue - config.solid_queue.connects_to = { database: { writing: :queue } } - config.solid_queue.silence_polling = true - config.solid_queue.logger = ActiveSupport::Logger.new($stdout) - # config.active_job.queue_name_prefix = "dawarich_production" + config.active_job.queue_adapter = :sidekiq config.action_mailer.perform_caching = false diff --git a/config/puma.rb b/config/puma.rb index d0e2b212..e0eb3db7 100644 --- a/config/puma.rb +++ b/config/puma.rb @@ -43,15 +43,6 @@ preload_app! # Allow puma to be restarted by `bin/rails restart` command. plugin :tmp_restart -# If env var is set or we're in development, solid_queue will run in puma -if ENV['SOLID_QUEUE_IN_PUMA'] || ENV.fetch('RAILS_ENV', 'development') == 'development' - begin - plugin :solid_queue - rescue => e - puts "Failed to load solid_queue plugin: #{e.message}" - end -end - # Prometheus exporter if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true' require 'prometheus_exporter/instrumentation' diff --git a/config/queue.yml b/config/queue.yml deleted file mode 100644 index 50e1f5b4..00000000 --- a/config/queue.yml +++ /dev/null @@ -1,27 +0,0 @@ - -default: &default - dispatchers: - - polling_interval: 1 - batch_size: 500 - workers: - - queues: "*" - threads: 3 - processes: <%= ENV['BACKGROUND_PROCESSING_CONCURRENCY'] || ENV.fetch("JOB_CONCURRENCY", 10) %> - polling_interval: 2 - - queues: imports - threads: 5 - processes: 1 - polling_interval: 1 - - queues: exports - threads: 5 - processes: 1 - polling_interval: 2 - -development: - <<: *default - -test: - <<: *default - -production: - <<: *default diff --git a/config/recurring.yml b/config/recurring.yml deleted file mode 100644 index 22f57d3f..00000000 --- a/config/recurring.yml +++ /dev/null @@ -1,34 +0,0 @@ -periodic_cleanup: - class: "Jobs::CleanFinishedJob" - queue: default - schedule: every month - -bulk_stats_calculating_job: - class: "BulkStatsCalculatingJob" - queue: stats - schedule: every hour - -area_visits_calculation_scheduling_job: - class: "AreaVisitsCalculationSchedulingJob" - queue: visit_suggesting - schedule: every day at 0:00 - -visit_suggesting_job: - class: "BulkVisitsSuggestingJob" - queue: visit_suggesting - schedule: every day at 00:05 - -watcher_job: - class: "Import::WatcherJob" - queue: imports - schedule: every hour - -app_version_checking_job: - class: "AppVersionCheckingJob" - queue: default - schedule: every 6 hours - -cache_preheating_job: - class: "Cache::PreheatingJob" - queue: default - schedule: every day at 0:00 diff --git a/db/cable_schema.rb b/db/cable_schema.rb deleted file mode 100644 index 55cdb550..00000000 --- a/db/cable_schema.rb +++ /dev/null @@ -1,24 +0,0 @@ -# This file is auto-generated from the current state of the database. Instead -# of editing this file, please use the migrations feature of Active Record to -# incrementally modify your database, and then regenerate this schema definition. -# -# This file is the source Rails uses to define your schema when running `bin/rails -# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to -# be faster and is potentially less error prone than running all of your -# migrations from scratch. Old migrations may fail to apply correctly if those -# migrations use external dependencies or application code. -# -# It's strongly recommended that you check this file into your version control system. - -ActiveRecord::Schema[8.0].define(version: 1) do - create_table "solid_cable_messages", force: :cascade do |t| - t.binary "channel", null: false - t.binary "payload", null: false - t.datetime "created_at", null: false - t.bigint "channel_hash", null: false - t.index ["channel"], name: "index_solid_cable_messages_on_channel" - t.index ["channel_hash"], name: "index_solid_cable_messages_on_channel_hash" - t.index ["created_at"], name: "index_solid_cable_messages_on_created_at" - t.index ["id"], name: "index_solid_cable_messages_on_id", unique: true - end -end diff --git a/db/cache_schema.rb b/db/cache_schema.rb deleted file mode 100644 index fe62ea4a..00000000 --- a/db/cache_schema.rb +++ /dev/null @@ -1,24 +0,0 @@ -# This file is auto-generated from the current state of the database. Instead -# of editing this file, please use the migrations feature of Active Record to -# incrementally modify your database, and then regenerate this schema definition. -# -# This file is the source Rails uses to define your schema when running `bin/rails -# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to -# be faster and is potentially less error prone than running all of your -# migrations from scratch. Old migrations may fail to apply correctly if those -# migrations use external dependencies or application code. -# -# It's strongly recommended that you check this file into your version control system. - -ActiveRecord::Schema[8.0].define(version: 1) do - create_table "solid_cache_entries", force: :cascade do |t| - t.binary "key", null: false - t.binary "value", null: false - t.datetime "created_at", null: false - t.bigint "key_hash", null: false - t.integer "byte_size", null: false - t.index ["byte_size"], name: "index_solid_cache_entries_on_byte_size" - t.index ["key_hash", "byte_size"], name: "index_solid_cache_entries_on_key_hash_and_byte_size" - t.index ["key_hash"], name: "index_solid_cache_entries_on_key_hash", unique: true - end -end diff --git a/db/queue_schema.rb b/db/queue_schema.rb deleted file mode 100644 index 30f375a5..00000000 --- a/db/queue_schema.rb +++ /dev/null @@ -1,143 +0,0 @@ -# This file is auto-generated from the current state of the database. Instead -# of editing this file, please use the migrations feature of Active Record to -# incrementally modify your database, and then regenerate this schema definition. -# -# This file is the source Rails uses to define your schema when running `bin/rails -# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to -# be faster and is potentially less error prone than running all of your -# migrations from scratch. Old migrations may fail to apply correctly if those -# migrations use external dependencies or application code. -# -# It's strongly recommended that you check this file into your version control system. - -ActiveRecord::Schema[8.0].define(version: 1) do - enable_extension "pg_catalog.plpgsql" - - create_table "solid_queue_blocked_executions", force: :cascade do |t| - t.bigint "job_id", null: false - t.string "queue_name", null: false - t.integer "priority", default: 0, null: false - t.string "concurrency_key", null: false - t.datetime "expires_at", null: false - t.datetime "created_at", null: false - t.index ["concurrency_key", "priority", "job_id"], name: "index_solid_queue_blocked_executions_for_release" - t.index ["expires_at", "concurrency_key"], name: "index_solid_queue_blocked_executions_for_maintenance" - t.index ["job_id"], name: "index_solid_queue_blocked_executions_on_job_id", unique: true - end - - create_table "solid_queue_claimed_executions", force: :cascade do |t| - t.bigint "job_id", null: false - t.bigint "process_id" - t.datetime "created_at", null: false - t.index ["job_id"], name: "index_solid_queue_claimed_executions_on_job_id", unique: true - t.index ["process_id", "job_id"], name: "index_solid_queue_claimed_executions_on_process_id_and_job_id" - end - - create_table "solid_queue_failed_executions", force: :cascade do |t| - t.bigint "job_id", null: false - t.text "error" - t.datetime "created_at", null: false - t.index ["job_id"], name: "index_solid_queue_failed_executions_on_job_id", unique: true - end - - create_table "solid_queue_jobs", force: :cascade do |t| - t.string "queue_name", null: false - t.string "class_name", null: false - t.text "arguments" - t.integer "priority", default: 0, null: false - t.string "active_job_id" - t.datetime "scheduled_at" - t.datetime "finished_at" - t.string "concurrency_key" - t.datetime "created_at", null: false - t.datetime "updated_at", null: false - t.index ["active_job_id"], name: "index_solid_queue_jobs_on_active_job_id" - t.index ["class_name"], name: "index_solid_queue_jobs_on_class_name" - t.index ["finished_at"], name: "index_solid_queue_jobs_on_finished_at" - t.index ["queue_name", "finished_at"], name: "index_solid_queue_jobs_for_filtering" - t.index ["scheduled_at", "finished_at"], name: "index_solid_queue_jobs_for_alerting" - end - - create_table "solid_queue_pauses", force: :cascade do |t| - t.string "queue_name", null: false - t.datetime "created_at", null: false - t.index ["queue_name"], name: "index_solid_queue_pauses_on_queue_name", unique: true - end - - create_table "solid_queue_processes", force: :cascade do |t| - t.string "kind", null: false - t.datetime "last_heartbeat_at", null: false - t.bigint "supervisor_id" - t.integer "pid", null: false - t.string "hostname" - t.text "metadata" - t.datetime "created_at", null: false - t.string "name", null: false - t.index ["last_heartbeat_at"], name: "index_solid_queue_processes_on_last_heartbeat_at" - t.index ["name", "supervisor_id"], name: "index_solid_queue_processes_on_name_and_supervisor_id", unique: true - t.index ["supervisor_id"], name: "index_solid_queue_processes_on_supervisor_id" - end - - create_table "solid_queue_ready_executions", force: :cascade do |t| - t.bigint "job_id", null: false - t.string "queue_name", null: false - t.integer "priority", default: 0, null: false - t.datetime "created_at", null: false - t.index ["job_id"], name: "index_solid_queue_ready_executions_on_job_id", unique: true - t.index ["priority", "job_id"], name: "index_solid_queue_poll_all" - t.index ["queue_name", "priority", "job_id"], name: "index_solid_queue_poll_by_queue" - end - - create_table "solid_queue_recurring_executions", force: :cascade do |t| - t.bigint "job_id", null: false - t.string "task_key", null: false - t.datetime "run_at", null: false - t.datetime "created_at", null: false - t.index ["job_id"], name: "index_solid_queue_recurring_executions_on_job_id", unique: true - t.index ["task_key", "run_at"], name: "index_solid_queue_recurring_executions_on_task_key_and_run_at", unique: true - end - - create_table "solid_queue_recurring_tasks", force: :cascade do |t| - t.string "key", null: false - t.string "schedule", null: false - t.string "command", limit: 2048 - t.string "class_name" - t.text "arguments" - t.string "queue_name" - t.integer "priority", default: 0 - t.boolean "static", default: true, null: false - t.text "description" - t.datetime "created_at", null: false - t.datetime "updated_at", null: false - t.index ["key"], name: "index_solid_queue_recurring_tasks_on_key", unique: true - t.index ["static"], name: "index_solid_queue_recurring_tasks_on_static" - end - - create_table "solid_queue_scheduled_executions", force: :cascade do |t| - t.bigint "job_id", null: false - t.string "queue_name", null: false - t.integer "priority", default: 0, null: false - t.datetime "scheduled_at", null: false - t.datetime "created_at", null: false - t.index ["job_id"], name: "index_solid_queue_scheduled_executions_on_job_id", unique: true - t.index ["scheduled_at", "priority", "job_id"], name: "index_solid_queue_dispatch_all" - end - - create_table "solid_queue_semaphores", force: :cascade do |t| - t.string "key", null: false - t.integer "value", default: 1, null: false - t.datetime "expires_at", null: false - t.datetime "created_at", null: false - t.datetime "updated_at", null: false - t.index ["expires_at"], name: "index_solid_queue_semaphores_on_expires_at" - t.index ["key", "value"], name: "index_solid_queue_semaphores_on_key_and_value" - t.index ["key"], name: "index_solid_queue_semaphores_on_key", unique: true - end - - add_foreign_key "solid_queue_blocked_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade - add_foreign_key "solid_queue_claimed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade - add_foreign_key "solid_queue_failed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade - add_foreign_key "solid_queue_ready_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade - add_foreign_key "solid_queue_recurring_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade - add_foreign_key "solid_queue_scheduled_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade -end diff --git a/docker/web-entrypoint.sh b/docker/web-entrypoint.sh index 228d89b7..9642055f 100644 --- a/docker/web-entrypoint.sh +++ b/docker/web-entrypoint.sh @@ -31,19 +31,6 @@ export DATABASE_USERNAME export DATABASE_PASSWORD export DATABASE_NAME -# Set queue database name and connection parameters with defaults -QUEUE_DATABASE_NAME=${QUEUE_DATABASE_NAME:-"${DATABASE_NAME}_queue"} -QUEUE_DATABASE_PASSWORD=${QUEUE_DATABASE_PASSWORD:-"$DATABASE_PASSWORD"} -QUEUE_DATABASE_USERNAME=${QUEUE_DATABASE_USERNAME:-"$DATABASE_USERNAME"} -QUEUE_DATABASE_PORT=${QUEUE_DATABASE_PORT:-"$DATABASE_PORT"} -QUEUE_DATABASE_HOST=${QUEUE_DATABASE_HOST:-"$DATABASE_HOST"} - -export QUEUE_DATABASE_NAME -export QUEUE_DATABASE_PASSWORD -export QUEUE_DATABASE_USERNAME -export QUEUE_DATABASE_PORT -export QUEUE_DATABASE_HOST - # Remove pre-existing puma/passenger server.pid rm -f $APP_PATH/tmp/pids/server.pid @@ -67,34 +54,12 @@ create_database() { echo "✅ PostgreSQL database $db_name is ready!" } -# Set up SQLite database directory in the volume for cache and cable -SQLITE_DB_DIR="/dawarich_sqlite_data" -mkdir -p $SQLITE_DB_DIR -echo "Created SQLite database directory at $SQLITE_DB_DIR" - # Step 1: Database Setup echo "Setting up all required databases..." # Create primary PostgreSQL database create_database "$DATABASE_NAME" "$DATABASE_PASSWORD" "$DATABASE_HOST" "$DATABASE_PORT" "$DATABASE_USERNAME" -# Create PostgreSQL queue database for solid_queue -create_database "$QUEUE_DATABASE_NAME" "$QUEUE_DATABASE_PASSWORD" "$QUEUE_DATABASE_HOST" "$QUEUE_DATABASE_PORT" "$QUEUE_DATABASE_USERNAME" - -# Setup SQLite databases for cache and cable - -# Setup Cache database with SQLite -CACHE_DATABASE_PATH=${CACHE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cache.sqlite3"} -export CACHE_DATABASE_PATH -echo "✅ SQLite cache database configured at $CACHE_DATABASE_PATH" - -# Setup Cable database with SQLite (only for production and staging) -if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then - CABLE_DATABASE_PATH=${CABLE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cable.sqlite3"} - export CABLE_DATABASE_PATH - echo "✅ SQLite cable database configured at $CABLE_DATABASE_PATH" -fi - # Step 2: Run migrations for all databases echo "Running migrations for all databases..." @@ -102,20 +67,6 @@ echo "Running migrations for all databases..." echo "Running primary database migrations..." bundle exec rails db:migrate -# Run PostgreSQL queue database migrations -echo "Running queue database migrations..." -bundle exec rails db:migrate:queue - -# Run SQLite database migrations -echo "Running cache database migrations..." -bundle exec rails db:migrate:cache - -# Run cable migrations for production/staging -if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then - echo "Running cable database migrations..." - bundle exec rails db:migrate:cable -fi - # Run data migrations echo "Running DATA migrations..." bundle exec rake data:migrate diff --git a/spec/jobs/area_visits_calculating_job_spec.rb b/spec/jobs/area_visits_calculating_job_spec.rb index 46185a76..629c145c 100644 --- a/spec/jobs/area_visits_calculating_job_spec.rb +++ b/spec/jobs/area_visits_calculating_job_spec.rb @@ -8,9 +8,11 @@ RSpec.describe AreaVisitsCalculatingJob, type: :job do let(:area) { create(:area, user:) } it 'calls the AreaVisitsCalculationService' do - expect(Areas::Visits::Create).to receive(:new).with(user, [area]).and_call_original + Sidekiq::Testing.inline! do + expect(Areas::Visits::Create).to receive(:new).with(user, [area]).and_call_original - described_class.new.perform(user.id) + described_class.new.perform(user.id) + end end end end diff --git a/spec/services/imports/create_spec.rb b/spec/services/imports/create_spec.rb index 176043b6..69634149 100644 --- a/spec/services/imports/create_spec.rb +++ b/spec/services/imports/create_spec.rb @@ -55,12 +55,16 @@ RSpec.describe Imports::Create do context 'when import is successful' do it 'schedules stats creating' do - expect { service.call }.to \ - have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3) + Sidekiq::Testing.inline! do + expect { service.call }.to \ + have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3) + end end it 'schedules visit suggesting' do - expect { service.call }.to have_enqueued_job(VisitSuggestingJob) + Sidekiq::Testing.inline! do + expect { service.call }.to have_enqueued_job(VisitSuggestingJob) + end end end diff --git a/spec/services/imports/watcher_spec.rb b/spec/services/imports/watcher_spec.rb index 94c04053..fadbea84 100644 --- a/spec/services/imports/watcher_spec.rb +++ b/spec/services/imports/watcher_spec.rb @@ -9,6 +9,7 @@ RSpec.describe Imports::Watcher do let(:watched_dir_path) { Rails.root.join('spec/fixtures/files/watched') } before do + Sidekiq::Testing.inline! stub_const('Imports::Watcher::WATCHED_DIR_PATH', watched_dir_path) end