mirror of
https://github.com/Freika/dawarich.git
synced 2026-01-10 01:01:39 -05:00
Move jobs to solid queue
This commit is contained in:
parent
54fc5a371f
commit
927bf62447
19 changed files with 576 additions and 250 deletions
|
|
@ -4,6 +4,14 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](http://keepachangelog.com/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# 0.25.7 - UNRELEASED
|
||||
|
||||
## TODO:
|
||||
|
||||
## Changed
|
||||
|
||||
- Background jobs are now using SolidQueue instead of Sidekiq.
|
||||
|
||||
|
||||
# 0.25.6 - 2025-04-23
|
||||
|
||||
|
|
|
|||
3
Gemfile
3
Gemfile
|
|
@ -37,9 +37,12 @@ gem 'sentry-rails'
|
|||
gem 'sidekiq'
|
||||
gem 'sidekiq-cron'
|
||||
gem 'sidekiq-limit_fetch'
|
||||
gem 'solid_cable', '~> 3.0'
|
||||
gem 'solid_queue', '~> 1.1'
|
||||
gem 'sprockets-rails'
|
||||
gem 'stimulus-rails'
|
||||
gem 'strong_migrations'
|
||||
gem 'mission_control-jobs'
|
||||
gem 'tailwindcss-rails'
|
||||
gem 'turbo-rails'
|
||||
gem 'tzinfo-data', platforms: %i[mingw mswin x64_mingw jruby]
|
||||
|
|
|
|||
25
Gemfile.lock
25
Gemfile.lock
|
|
@ -218,6 +218,16 @@ GEM
|
|||
mini_mime (1.1.5)
|
||||
mini_portile2 (2.8.8)
|
||||
minitest (5.25.5)
|
||||
mission_control-jobs (1.0.2)
|
||||
actioncable (>= 7.1)
|
||||
actionpack (>= 7.1)
|
||||
activejob (>= 7.1)
|
||||
activerecord (>= 7.1)
|
||||
importmap-rails (>= 1.2.1)
|
||||
irb (~> 1.13)
|
||||
railties (>= 7.1)
|
||||
stimulus-rails
|
||||
turbo-rails
|
||||
msgpack (1.7.3)
|
||||
multi_xml (0.7.1)
|
||||
bigdecimal (~> 3.1)
|
||||
|
|
@ -416,6 +426,18 @@ GEM
|
|||
simplecov_json_formatter (~> 0.1)
|
||||
simplecov-html (0.13.1)
|
||||
simplecov_json_formatter (0.1.4)
|
||||
solid_cable (3.0.7)
|
||||
actioncable (>= 7.2)
|
||||
activejob (>= 7.2)
|
||||
activerecord (>= 7.2)
|
||||
railties (>= 7.2)
|
||||
solid_queue (1.1.5)
|
||||
activejob (>= 7.1)
|
||||
activerecord (>= 7.1)
|
||||
concurrent-ruby (>= 1.3.1)
|
||||
fugit (~> 1.11.0)
|
||||
railties (>= 7.1)
|
||||
thor (~> 1.3.1)
|
||||
sprockets (4.2.1)
|
||||
concurrent-ruby (~> 1.0)
|
||||
rack (>= 2.2.4, < 4)
|
||||
|
|
@ -499,6 +521,7 @@ DEPENDENCIES
|
|||
jwt
|
||||
kaminari
|
||||
lograge
|
||||
mission_control-jobs
|
||||
oj
|
||||
pg
|
||||
prometheus_exporter
|
||||
|
|
@ -523,6 +546,8 @@ DEPENDENCIES
|
|||
sidekiq-cron
|
||||
sidekiq-limit_fetch
|
||||
simplecov
|
||||
solid_cable (~> 3.0)
|
||||
solid_queue (~> 1.1)
|
||||
sprockets-rails
|
||||
stimulus-rails
|
||||
strong_migrations
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class ApplicationJob < ActiveJob::Base
|
||||
# Automatically retry jobs that encountered a deadlock
|
||||
# retry_on ActiveRecord::Deadlocked
|
||||
retry_on Exception, wait: :polynomially_longer, attempts: 25
|
||||
|
||||
# Most jobs are safe to ignore if the underlying records are no longer available
|
||||
# discard_on ActiveJob::DeserializationError
|
||||
|
|
|
|||
9
app/jobs/jobs/clean_finished_job.rb
Normal file
9
app/jobs/jobs/clean_finished_job.rb
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class Jobs::CleanFinishedJob < ApplicationJob
|
||||
queue_as :default
|
||||
|
||||
def perform
|
||||
SolidQueue::Job.clear_finished_in_batches
|
||||
end
|
||||
end
|
||||
6
bin/jobs
Executable file
6
bin/jobs
Executable file
|
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
require_relative "../config/environment"
|
||||
require "solid_queue/cli"
|
||||
|
||||
SolidQueue::Cli.start(ARGV)
|
||||
|
|
@ -1,11 +1,23 @@
|
|||
# Async adapter only works within the same process, so for manually triggering cable updates from a console,
|
||||
# and seeing results in the browser, you must do so from the web console (running inside the dev process),
|
||||
# not a terminal started via bin/rails console! Add "console" to any action or any ERB template view
|
||||
# to make the web console appear.
|
||||
development:
|
||||
adapter: redis
|
||||
url: <%= ENV['REDIS_URL'] %>
|
||||
adapter: solid_cable
|
||||
connects_to:
|
||||
database:
|
||||
writing: cable
|
||||
polling_interval: 0.1.seconds
|
||||
message_retention: 1.day
|
||||
|
||||
|
||||
test:
|
||||
adapter: test
|
||||
|
||||
production:
|
||||
adapter: redis
|
||||
url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %>
|
||||
channel_prefix: dawarich_production
|
||||
adapter: solid_cable
|
||||
connects_to:
|
||||
database:
|
||||
writing: cable
|
||||
polling_interval: 0.1.seconds
|
||||
message_retention: 1.day
|
||||
|
|
|
|||
|
|
@ -10,13 +10,40 @@ default: &default
|
|||
timeout: 5000
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_QUEUE_NAME'] || 'dawarich_development_queue' %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cable:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_CABLE_NAME'] || 'dawarich_development_cable' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_QUEUE_NAME'] || 'dawarich_test_queue' %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cable:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_CABLE_NAME'] || 'dawarich_test_cable' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||
queue:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_QUEUE_NAME'] || 'dawarich_production_queue' %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cable:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_CABLE_NAME'] || 'dawarich_production_cable' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
|
|
|
|||
|
|
@ -68,6 +68,13 @@ Rails.application.configure do
|
|||
# Highlight code that enqueued background job in logs.
|
||||
config.active_job.verbose_enqueue_logs = true
|
||||
|
||||
config.active_job.queue_adapter = :solid_queue
|
||||
config.solid_queue.silence_polling = true
|
||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
||||
|
||||
config.mission_control.jobs.http_basic_auth_enabled = false
|
||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
||||
|
||||
# Suppress logger output for asset requests.
|
||||
config.assets.quiet = true
|
||||
|
||||
|
|
@ -95,7 +102,7 @@ Rails.application.configure do
|
|||
config.force_ssl = ENV.fetch('APPLICATION_PROTOCOL', 'http').downcase == 'https'
|
||||
|
||||
# Direct logs to STDOUT
|
||||
config.logger = Logger.new($stdout)
|
||||
config.logger = ActiveSupport::Logger.new($stdout)
|
||||
config.lograge.enabled = true
|
||||
config.lograge.formatter = Lograge::Formatters::Json.new
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ Rails.application.configure do
|
|||
config.force_ssl = ENV.fetch('APPLICATION_PROTOCOL', 'http').downcase == 'https'
|
||||
|
||||
# Direct logs to STDOUT
|
||||
config.logger = Logger.new($stdout)
|
||||
config.logger = ActiveSupport::Logger.new($stdout)
|
||||
config.lograge.enabled = true
|
||||
config.lograge.formatter = Lograge::Formatters::Json.new
|
||||
|
||||
|
|
@ -77,7 +77,9 @@ Rails.application.configure do
|
|||
config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] }
|
||||
|
||||
# Use a real queuing backend for Active Job (and separate queues per environment).
|
||||
# config.active_job.queue_adapter = :resque
|
||||
config.solid_queue.silence_polling = true
|
||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
||||
# config.active_job.queue_name_prefix = "dawarich_production"
|
||||
|
||||
config.action_mailer.perform_caching = false
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ preload_app!
|
|||
# Allow puma to be restarted by `bin/rails restart` command.
|
||||
plugin :tmp_restart
|
||||
|
||||
plugin :solid_queue if ENV['SOLID_QUEUE_IN_PUMA'] || Rails.env.development?
|
||||
|
||||
# Prometheus exporter
|
||||
if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
|
|
|
|||
26
config/queue.yml
Normal file
26
config/queue.yml
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
default: &default
|
||||
dispatchers:
|
||||
- polling_interval: 1
|
||||
batch_size: 500
|
||||
workers:
|
||||
- queues: "*"
|
||||
threads: 3
|
||||
processes: <%= ENV.fetch("JOB_CONCURRENCY", 1) %>
|
||||
polling_interval: 2
|
||||
- queues: imports
|
||||
threads: 5
|
||||
processes: 1
|
||||
polling_interval: 1
|
||||
- queues: exports
|
||||
threads: 5
|
||||
processes: 1
|
||||
polling_interval: 2
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
34
config/recurring.yml
Normal file
34
config/recurring.yml
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
periodic_cleanup:
|
||||
class: "Jobs::CleanFinishedJob"
|
||||
queue: default
|
||||
schedule: every month
|
||||
|
||||
bulk_stats_calculating_job:
|
||||
class: "BulkStatsCalculatingJob"
|
||||
queue: stats
|
||||
schedule: every hour
|
||||
|
||||
area_visits_calculation_scheduling_job:
|
||||
class: "AreaVisitsCalculationSchedulingJob"
|
||||
queue: visit_suggesting
|
||||
schedule: every day at 0:00
|
||||
|
||||
visit_suggesting_job:
|
||||
class: "BulkVisitsSuggestingJob"
|
||||
queue: visit_suggesting
|
||||
schedule: every day at 00:05
|
||||
|
||||
watcher_job:
|
||||
class: "Import::WatcherJob"
|
||||
queue: imports
|
||||
schedule: every hour
|
||||
|
||||
app_version_checking_job:
|
||||
class: "AppVersionCheckingJob"
|
||||
queue: default
|
||||
schedule: every 6 hours
|
||||
|
||||
cache_preheating_job:
|
||||
class: "Cache::PreheatingJob"
|
||||
queue: default
|
||||
schedule: every day at 0:00
|
||||
|
|
@ -6,6 +6,7 @@ Rails.application.routes.draw do
|
|||
mount ActionCable.server => '/cable'
|
||||
mount Rswag::Api::Engine => '/api-docs'
|
||||
mount Rswag::Ui::Engine => '/api-docs'
|
||||
mount MissionControl::Jobs::Engine, at: '/jobs'
|
||||
|
||||
Sidekiq::Web.use(Rack::Auth::Basic) do |username, password|
|
||||
ActiveSupport::SecurityUtils.secure_compare(
|
||||
|
|
|
|||
26
db/cable_schema.rb
Normal file
26
db/cable_schema.rb
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
# These are extensions that must be enabled in order to support this database
|
||||
enable_extension "pg_catalog.plpgsql"
|
||||
|
||||
create_table "solid_cable_messages", force: :cascade do |t|
|
||||
t.binary "channel", null: false
|
||||
t.binary "payload", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.bigint "channel_hash", null: false
|
||||
t.index ["channel"], name: "index_solid_cable_messages_on_channel"
|
||||
t.index ["channel_hash"], name: "index_solid_cable_messages_on_channel_hash"
|
||||
t.index ["created_at"], name: "index_solid_cable_messages_on_created_at"
|
||||
end
|
||||
end
|
||||
144
db/queue_schema.rb
Normal file
144
db/queue_schema.rb
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
# These are extensions that must be enabled in order to support this database
|
||||
enable_extension "pg_catalog.plpgsql"
|
||||
|
||||
create_table "solid_queue_blocked_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.string "concurrency_key", null: false
|
||||
t.datetime "expires_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["concurrency_key", "priority", "job_id"], name: "index_solid_queue_blocked_executions_for_release"
|
||||
t.index ["expires_at", "concurrency_key"], name: "index_solid_queue_blocked_executions_for_maintenance"
|
||||
t.index ["job_id"], name: "index_solid_queue_blocked_executions_on_job_id", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_claimed_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.bigint "process_id"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_claimed_executions_on_job_id", unique: true
|
||||
t.index ["process_id", "job_id"], name: "index_solid_queue_claimed_executions_on_process_id_and_job_id"
|
||||
end
|
||||
|
||||
create_table "solid_queue_failed_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.text "error"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_failed_executions_on_job_id", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_jobs", force: :cascade do |t|
|
||||
t.string "queue_name", null: false
|
||||
t.string "class_name", null: false
|
||||
t.text "arguments"
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.string "active_job_id"
|
||||
t.datetime "scheduled_at"
|
||||
t.datetime "finished_at"
|
||||
t.string "concurrency_key"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["active_job_id"], name: "index_solid_queue_jobs_on_active_job_id"
|
||||
t.index ["class_name"], name: "index_solid_queue_jobs_on_class_name"
|
||||
t.index ["finished_at"], name: "index_solid_queue_jobs_on_finished_at"
|
||||
t.index ["queue_name", "finished_at"], name: "index_solid_queue_jobs_for_filtering"
|
||||
t.index ["scheduled_at", "finished_at"], name: "index_solid_queue_jobs_for_alerting"
|
||||
end
|
||||
|
||||
create_table "solid_queue_pauses", force: :cascade do |t|
|
||||
t.string "queue_name", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["queue_name"], name: "index_solid_queue_pauses_on_queue_name", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_processes", force: :cascade do |t|
|
||||
t.string "kind", null: false
|
||||
t.datetime "last_heartbeat_at", null: false
|
||||
t.bigint "supervisor_id"
|
||||
t.integer "pid", null: false
|
||||
t.string "hostname"
|
||||
t.text "metadata"
|
||||
t.datetime "created_at", null: false
|
||||
t.string "name", null: false
|
||||
t.index ["last_heartbeat_at"], name: "index_solid_queue_processes_on_last_heartbeat_at"
|
||||
t.index ["name", "supervisor_id"], name: "index_solid_queue_processes_on_name_and_supervisor_id", unique: true
|
||||
t.index ["supervisor_id"], name: "index_solid_queue_processes_on_supervisor_id"
|
||||
end
|
||||
|
||||
create_table "solid_queue_ready_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_ready_executions_on_job_id", unique: true
|
||||
t.index ["priority", "job_id"], name: "index_solid_queue_poll_all"
|
||||
t.index ["queue_name", "priority", "job_id"], name: "index_solid_queue_poll_by_queue"
|
||||
end
|
||||
|
||||
create_table "solid_queue_recurring_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "task_key", null: false
|
||||
t.datetime "run_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_recurring_executions_on_job_id", unique: true
|
||||
t.index ["task_key", "run_at"], name: "index_solid_queue_recurring_executions_on_task_key_and_run_at", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_recurring_tasks", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.string "schedule", null: false
|
||||
t.string "command", limit: 2048
|
||||
t.string "class_name"
|
||||
t.text "arguments"
|
||||
t.string "queue_name"
|
||||
t.integer "priority", default: 0
|
||||
t.boolean "static", default: true, null: false
|
||||
t.text "description"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["key"], name: "index_solid_queue_recurring_tasks_on_key", unique: true
|
||||
t.index ["static"], name: "index_solid_queue_recurring_tasks_on_static"
|
||||
end
|
||||
|
||||
create_table "solid_queue_scheduled_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.datetime "scheduled_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_scheduled_executions_on_job_id", unique: true
|
||||
t.index ["scheduled_at", "priority", "job_id"], name: "index_solid_queue_dispatch_all"
|
||||
end
|
||||
|
||||
create_table "solid_queue_semaphores", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.integer "value", default: 1, null: false
|
||||
t.datetime "expires_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["expires_at"], name: "index_solid_queue_semaphores_on_expires_at"
|
||||
t.index ["key", "value"], name: "index_solid_queue_semaphores_on_key_and_value"
|
||||
t.index ["key"], name: "index_solid_queue_semaphores_on_key", unique: true
|
||||
end
|
||||
|
||||
add_foreign_key "solid_queue_blocked_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_claimed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_failed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_ready_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_recurring_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_scheduled_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
end
|
||||
447
db/schema.rb
generated
447
db/schema.rb
generated
|
|
@ -10,264 +10,261 @@
|
|||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 20_250_404_182_437) do
|
||||
ActiveRecord::Schema[8.0].define(version: 2025_04_04_182437) do
|
||||
# These are extensions that must be enabled in order to support this database
|
||||
enable_extension 'pg_catalog.plpgsql'
|
||||
enable_extension 'postgis'
|
||||
enable_extension "pg_catalog.plpgsql"
|
||||
enable_extension "postgis"
|
||||
|
||||
create_table 'action_text_rich_texts', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.text 'body'
|
||||
t.string 'record_type', null: false
|
||||
t.bigint 'record_id', null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.index %w[record_type record_id name], name: 'index_action_text_rich_texts_uniqueness', unique: true
|
||||
create_table "action_text_rich_texts", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.text "body"
|
||||
t.string "record_type", null: false
|
||||
t.bigint "record_id", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["record_type", "record_id", "name"], name: "index_action_text_rich_texts_uniqueness", unique: true
|
||||
end
|
||||
|
||||
create_table 'active_storage_attachments', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.string 'record_type', null: false
|
||||
t.bigint 'record_id', null: false
|
||||
t.bigint 'blob_id', null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.index ['blob_id'], name: 'index_active_storage_attachments_on_blob_id'
|
||||
t.index %w[record_type record_id name blob_id], name: 'index_active_storage_attachments_uniqueness',
|
||||
unique: true
|
||||
create_table "active_storage_attachments", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.string "record_type", null: false
|
||||
t.bigint "record_id", null: false
|
||||
t.bigint "blob_id", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["blob_id"], name: "index_active_storage_attachments_on_blob_id"
|
||||
t.index ["record_type", "record_id", "name", "blob_id"], name: "index_active_storage_attachments_uniqueness", unique: true
|
||||
end
|
||||
|
||||
create_table 'active_storage_blobs', force: :cascade do |t|
|
||||
t.string 'key', null: false
|
||||
t.string 'filename', null: false
|
||||
t.string 'content_type'
|
||||
t.text 'metadata'
|
||||
t.string 'service_name', null: false
|
||||
t.bigint 'byte_size', null: false
|
||||
t.string 'checksum'
|
||||
t.datetime 'created_at', null: false
|
||||
t.index ['key'], name: 'index_active_storage_blobs_on_key', unique: true
|
||||
create_table "active_storage_blobs", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.string "filename", null: false
|
||||
t.string "content_type"
|
||||
t.text "metadata"
|
||||
t.string "service_name", null: false
|
||||
t.bigint "byte_size", null: false
|
||||
t.string "checksum"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["key"], name: "index_active_storage_blobs_on_key", unique: true
|
||||
end
|
||||
|
||||
create_table 'active_storage_variant_records', force: :cascade do |t|
|
||||
t.bigint 'blob_id', null: false
|
||||
t.string 'variation_digest', null: false
|
||||
t.index %w[blob_id variation_digest], name: 'index_active_storage_variant_records_uniqueness', unique: true
|
||||
create_table "active_storage_variant_records", force: :cascade do |t|
|
||||
t.bigint "blob_id", null: false
|
||||
t.string "variation_digest", null: false
|
||||
t.index ["blob_id", "variation_digest"], name: "index_active_storage_variant_records_uniqueness", unique: true
|
||||
end
|
||||
|
||||
create_table 'areas', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.bigint 'user_id', null: false
|
||||
t.decimal 'longitude', precision: 10, scale: 6, null: false
|
||||
t.decimal 'latitude', precision: 10, scale: 6, null: false
|
||||
t.integer 'radius', null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.index ['user_id'], name: 'index_areas_on_user_id'
|
||||
create_table "areas", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.bigint "user_id", null: false
|
||||
t.decimal "longitude", precision: 10, scale: 6, null: false
|
||||
t.decimal "latitude", precision: 10, scale: 6, null: false
|
||||
t.integer "radius", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["user_id"], name: "index_areas_on_user_id"
|
||||
end
|
||||
|
||||
create_table 'data_migrations', primary_key: 'version', id: :string, force: :cascade do |t|
|
||||
create_table "data_migrations", primary_key: "version", id: :string, force: :cascade do |t|
|
||||
end
|
||||
|
||||
create_table 'exports', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.string 'url'
|
||||
t.integer 'status', default: 0, null: false
|
||||
t.bigint 'user_id', null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.integer 'file_format', default: 0
|
||||
t.datetime 'start_at'
|
||||
t.datetime 'end_at'
|
||||
t.index ['status'], name: 'index_exports_on_status'
|
||||
t.index ['user_id'], name: 'index_exports_on_user_id'
|
||||
create_table "exports", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.string "url"
|
||||
t.integer "status", default: 0, null: false
|
||||
t.bigint "user_id", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.integer "file_format", default: 0
|
||||
t.datetime "start_at"
|
||||
t.datetime "end_at"
|
||||
t.index ["status"], name: "index_exports_on_status"
|
||||
t.index ["user_id"], name: "index_exports_on_user_id"
|
||||
end
|
||||
|
||||
create_table 'imports', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.bigint 'user_id', null: false
|
||||
t.integer 'source', default: 0
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.integer 'raw_points', default: 0
|
||||
t.integer 'doubles', default: 0
|
||||
t.integer 'processed', default: 0
|
||||
t.jsonb 'raw_data'
|
||||
t.integer 'points_count', default: 0
|
||||
t.index ['source'], name: 'index_imports_on_source'
|
||||
t.index ['user_id'], name: 'index_imports_on_user_id'
|
||||
create_table "imports", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.bigint "user_id", null: false
|
||||
t.integer "source", default: 0
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.integer "raw_points", default: 0
|
||||
t.integer "doubles", default: 0
|
||||
t.integer "processed", default: 0
|
||||
t.jsonb "raw_data"
|
||||
t.integer "points_count", default: 0
|
||||
t.index ["source"], name: "index_imports_on_source"
|
||||
t.index ["user_id"], name: "index_imports_on_user_id"
|
||||
end
|
||||
|
||||
create_table 'notifications', force: :cascade do |t|
|
||||
t.string 'title', null: false
|
||||
t.text 'content', null: false
|
||||
t.bigint 'user_id', null: false
|
||||
t.integer 'kind', default: 0, null: false
|
||||
t.datetime 'read_at'
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.index ['kind'], name: 'index_notifications_on_kind'
|
||||
t.index ['user_id'], name: 'index_notifications_on_user_id'
|
||||
create_table "notifications", force: :cascade do |t|
|
||||
t.string "title", null: false
|
||||
t.text "content", null: false
|
||||
t.bigint "user_id", null: false
|
||||
t.integer "kind", default: 0, null: false
|
||||
t.datetime "read_at"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["kind"], name: "index_notifications_on_kind"
|
||||
t.index ["user_id"], name: "index_notifications_on_user_id"
|
||||
end
|
||||
|
||||
create_table 'place_visits', force: :cascade do |t|
|
||||
t.bigint 'place_id', null: false
|
||||
t.bigint 'visit_id', null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.index ['place_id'], name: 'index_place_visits_on_place_id'
|
||||
t.index ['visit_id'], name: 'index_place_visits_on_visit_id'
|
||||
create_table "place_visits", force: :cascade do |t|
|
||||
t.bigint "place_id", null: false
|
||||
t.bigint "visit_id", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["place_id"], name: "index_place_visits_on_place_id"
|
||||
t.index ["visit_id"], name: "index_place_visits_on_visit_id"
|
||||
end
|
||||
|
||||
create_table 'places', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.decimal 'longitude', precision: 10, scale: 6, null: false
|
||||
t.decimal 'latitude', precision: 10, scale: 6, null: false
|
||||
t.string 'city'
|
||||
t.string 'country'
|
||||
t.integer 'source', default: 0
|
||||
t.jsonb 'geodata', default: {}, null: false
|
||||
t.datetime 'reverse_geocoded_at'
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.geography 'lonlat', limit: { srid: 4326, type: 'st_point', geographic: true }
|
||||
t.index ['lonlat'], name: 'index_places_on_lonlat', using: :gist
|
||||
create_table "places", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.decimal "longitude", precision: 10, scale: 6, null: false
|
||||
t.decimal "latitude", precision: 10, scale: 6, null: false
|
||||
t.string "city"
|
||||
t.string "country"
|
||||
t.integer "source", default: 0
|
||||
t.jsonb "geodata", default: {}, null: false
|
||||
t.datetime "reverse_geocoded_at"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.geography "lonlat", limit: {srid: 4326, type: "st_point", geographic: true}
|
||||
t.index ["lonlat"], name: "index_places_on_lonlat", using: :gist
|
||||
end
|
||||
|
||||
create_table 'points', force: :cascade do |t|
|
||||
t.integer 'battery_status'
|
||||
t.string 'ping'
|
||||
t.integer 'battery'
|
||||
t.string 'tracker_id'
|
||||
t.string 'topic'
|
||||
t.integer 'altitude'
|
||||
t.decimal 'longitude', precision: 10, scale: 6
|
||||
t.string 'velocity'
|
||||
t.integer 'trigger'
|
||||
t.string 'bssid'
|
||||
t.string 'ssid'
|
||||
t.integer 'connection'
|
||||
t.integer 'vertical_accuracy'
|
||||
t.integer 'accuracy'
|
||||
t.integer 'timestamp'
|
||||
t.decimal 'latitude', precision: 10, scale: 6
|
||||
t.integer 'mode'
|
||||
t.text 'inrids', default: [], array: true
|
||||
t.text 'in_regions', default: [], array: true
|
||||
t.jsonb 'raw_data', default: {}
|
||||
t.bigint 'import_id'
|
||||
t.string 'city'
|
||||
t.string 'country'
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.bigint 'user_id'
|
||||
t.jsonb 'geodata', default: {}, null: false
|
||||
t.bigint 'visit_id'
|
||||
t.datetime 'reverse_geocoded_at'
|
||||
t.decimal 'course', precision: 8, scale: 5
|
||||
t.decimal 'course_accuracy', precision: 8, scale: 5
|
||||
t.string 'external_track_id'
|
||||
t.geography 'lonlat', limit: { srid: 4326, type: 'st_point', geographic: true }
|
||||
t.index ['altitude'], name: 'index_points_on_altitude'
|
||||
t.index ['battery'], name: 'index_points_on_battery'
|
||||
t.index ['battery_status'], name: 'index_points_on_battery_status'
|
||||
t.index ['city'], name: 'index_points_on_city'
|
||||
t.index ['connection'], name: 'index_points_on_connection'
|
||||
t.index ['country'], name: 'index_points_on_country'
|
||||
t.index ['external_track_id'], name: 'index_points_on_external_track_id'
|
||||
t.index ['geodata'], name: 'index_points_on_geodata', using: :gin
|
||||
t.index ['import_id'], name: 'index_points_on_import_id'
|
||||
t.index %w[latitude longitude], name: 'index_points_on_latitude_and_longitude'
|
||||
t.index %w[lonlat timestamp user_id], name: 'index_points_on_lonlat_timestamp_user_id', unique: true
|
||||
t.index ['lonlat'], name: 'index_points_on_lonlat', using: :gist
|
||||
t.index ['reverse_geocoded_at'], name: 'index_points_on_reverse_geocoded_at'
|
||||
t.index ['timestamp'], name: 'index_points_on_timestamp'
|
||||
t.index ['trigger'], name: 'index_points_on_trigger'
|
||||
t.index ['user_id'], name: 'index_points_on_user_id'
|
||||
t.index ['visit_id'], name: 'index_points_on_visit_id'
|
||||
create_table "points", force: :cascade do |t|
|
||||
t.integer "battery_status"
|
||||
t.string "ping"
|
||||
t.integer "battery"
|
||||
t.string "tracker_id"
|
||||
t.string "topic"
|
||||
t.integer "altitude"
|
||||
t.decimal "longitude", precision: 10, scale: 6
|
||||
t.string "velocity"
|
||||
t.integer "trigger"
|
||||
t.string "bssid"
|
||||
t.string "ssid"
|
||||
t.integer "connection"
|
||||
t.integer "vertical_accuracy"
|
||||
t.integer "accuracy"
|
||||
t.integer "timestamp"
|
||||
t.decimal "latitude", precision: 10, scale: 6
|
||||
t.integer "mode"
|
||||
t.text "inrids", default: [], array: true
|
||||
t.text "in_regions", default: [], array: true
|
||||
t.jsonb "raw_data", default: {}
|
||||
t.bigint "import_id"
|
||||
t.string "city"
|
||||
t.string "country"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.bigint "user_id"
|
||||
t.jsonb "geodata", default: {}, null: false
|
||||
t.bigint "visit_id"
|
||||
t.datetime "reverse_geocoded_at"
|
||||
t.decimal "course", precision: 8, scale: 5
|
||||
t.decimal "course_accuracy", precision: 8, scale: 5
|
||||
t.string "external_track_id"
|
||||
t.geography "lonlat", limit: {srid: 4326, type: "st_point", geographic: true}
|
||||
t.index ["altitude"], name: "index_points_on_altitude"
|
||||
t.index ["battery"], name: "index_points_on_battery"
|
||||
t.index ["battery_status"], name: "index_points_on_battery_status"
|
||||
t.index ["city"], name: "index_points_on_city"
|
||||
t.index ["connection"], name: "index_points_on_connection"
|
||||
t.index ["country"], name: "index_points_on_country"
|
||||
t.index ["external_track_id"], name: "index_points_on_external_track_id"
|
||||
t.index ["geodata"], name: "index_points_on_geodata", using: :gin
|
||||
t.index ["import_id"], name: "index_points_on_import_id"
|
||||
t.index ["latitude", "longitude"], name: "index_points_on_latitude_and_longitude"
|
||||
t.index ["lonlat", "timestamp", "user_id"], name: "index_points_on_lonlat_timestamp_user_id", unique: true
|
||||
t.index ["lonlat"], name: "index_points_on_lonlat", using: :gist
|
||||
t.index ["reverse_geocoded_at"], name: "index_points_on_reverse_geocoded_at"
|
||||
t.index ["timestamp"], name: "index_points_on_timestamp"
|
||||
t.index ["trigger"], name: "index_points_on_trigger"
|
||||
t.index ["user_id"], name: "index_points_on_user_id"
|
||||
t.index ["visit_id"], name: "index_points_on_visit_id"
|
||||
end
|
||||
|
||||
create_table 'stats', force: :cascade do |t|
|
||||
t.integer 'year', null: false
|
||||
t.integer 'month', null: false
|
||||
t.integer 'distance', null: false
|
||||
t.jsonb 'toponyms'
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.bigint 'user_id', null: false
|
||||
t.jsonb 'daily_distance', default: {}
|
||||
t.index ['distance'], name: 'index_stats_on_distance'
|
||||
t.index ['month'], name: 'index_stats_on_month'
|
||||
t.index ['user_id'], name: 'index_stats_on_user_id'
|
||||
t.index ['year'], name: 'index_stats_on_year'
|
||||
create_table "stats", force: :cascade do |t|
|
||||
t.integer "year", null: false
|
||||
t.integer "month", null: false
|
||||
t.integer "distance", null: false
|
||||
t.jsonb "toponyms"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.bigint "user_id", null: false
|
||||
t.jsonb "daily_distance", default: {}
|
||||
t.index ["distance"], name: "index_stats_on_distance"
|
||||
t.index ["month"], name: "index_stats_on_month"
|
||||
t.index ["user_id"], name: "index_stats_on_user_id"
|
||||
t.index ["year"], name: "index_stats_on_year"
|
||||
end
|
||||
|
||||
create_table 'trips', force: :cascade do |t|
|
||||
t.string 'name', null: false
|
||||
t.datetime 'started_at', null: false
|
||||
t.datetime 'ended_at', null: false
|
||||
t.integer 'distance'
|
||||
t.bigint 'user_id', null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.geometry 'path', limit: { srid: 3857, type: 'line_string' }
|
||||
t.index ['user_id'], name: 'index_trips_on_user_id'
|
||||
create_table "trips", force: :cascade do |t|
|
||||
t.string "name", null: false
|
||||
t.datetime "started_at", null: false
|
||||
t.datetime "ended_at", null: false
|
||||
t.integer "distance"
|
||||
t.bigint "user_id", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.geometry "path", limit: {srid: 3857, type: "line_string"}
|
||||
t.index ["user_id"], name: "index_trips_on_user_id"
|
||||
end
|
||||
|
||||
create_table 'users', force: :cascade do |t|
|
||||
t.string 'email', default: '', null: false
|
||||
t.string 'encrypted_password', default: '', null: false
|
||||
t.string 'reset_password_token'
|
||||
t.datetime 'reset_password_sent_at'
|
||||
t.datetime 'remember_created_at'
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.string 'api_key', default: '', null: false
|
||||
t.string 'theme', default: 'dark', null: false
|
||||
t.jsonb 'settings',
|
||||
default: { 'fog_of_war_meters' => '100', 'meters_between_routes' => '1000',
|
||||
'minutes_between_routes' => '60' }
|
||||
t.boolean 'admin', default: false
|
||||
t.integer 'sign_in_count', default: 0, null: false
|
||||
t.datetime 'current_sign_in_at'
|
||||
t.datetime 'last_sign_in_at'
|
||||
t.string 'current_sign_in_ip'
|
||||
t.string 'last_sign_in_ip'
|
||||
t.integer 'status', default: 0
|
||||
t.datetime 'active_until'
|
||||
t.index ['email'], name: 'index_users_on_email', unique: true
|
||||
t.index ['reset_password_token'], name: 'index_users_on_reset_password_token', unique: true
|
||||
create_table "users", force: :cascade do |t|
|
||||
t.string "email", default: "", null: false
|
||||
t.string "encrypted_password", default: "", null: false
|
||||
t.string "reset_password_token"
|
||||
t.datetime "reset_password_sent_at"
|
||||
t.datetime "remember_created_at"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.string "api_key", default: "", null: false
|
||||
t.string "theme", default: "dark", null: false
|
||||
t.jsonb "settings", default: {"fog_of_war_meters" => "100", "meters_between_routes" => "1000", "minutes_between_routes" => "60"}
|
||||
t.boolean "admin", default: false
|
||||
t.integer "sign_in_count", default: 0, null: false
|
||||
t.datetime "current_sign_in_at"
|
||||
t.datetime "last_sign_in_at"
|
||||
t.string "current_sign_in_ip"
|
||||
t.string "last_sign_in_ip"
|
||||
t.integer "status", default: 0
|
||||
t.datetime "active_until"
|
||||
t.index ["email"], name: "index_users_on_email", unique: true
|
||||
t.index ["reset_password_token"], name: "index_users_on_reset_password_token", unique: true
|
||||
end
|
||||
|
||||
add_check_constraint 'users', 'admin IS NOT NULL', name: 'users_admin_null', validate: false
|
||||
add_check_constraint "users", "admin IS NOT NULL", name: "users_admin_null", validate: false
|
||||
|
||||
create_table 'visits', force: :cascade do |t|
|
||||
t.bigint 'area_id'
|
||||
t.bigint 'user_id', null: false
|
||||
t.datetime 'started_at', null: false
|
||||
t.datetime 'ended_at', null: false
|
||||
t.integer 'duration', null: false
|
||||
t.string 'name', null: false
|
||||
t.integer 'status', default: 0, null: false
|
||||
t.datetime 'created_at', null: false
|
||||
t.datetime 'updated_at', null: false
|
||||
t.bigint 'place_id'
|
||||
t.index ['area_id'], name: 'index_visits_on_area_id'
|
||||
t.index ['place_id'], name: 'index_visits_on_place_id'
|
||||
t.index ['started_at'], name: 'index_visits_on_started_at'
|
||||
t.index ['user_id'], name: 'index_visits_on_user_id'
|
||||
create_table "visits", force: :cascade do |t|
|
||||
t.bigint "area_id"
|
||||
t.bigint "user_id", null: false
|
||||
t.datetime "started_at", null: false
|
||||
t.datetime "ended_at", null: false
|
||||
t.integer "duration", null: false
|
||||
t.string "name", null: false
|
||||
t.integer "status", default: 0, null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.bigint "place_id"
|
||||
t.index ["area_id"], name: "index_visits_on_area_id"
|
||||
t.index ["place_id"], name: "index_visits_on_place_id"
|
||||
t.index ["started_at"], name: "index_visits_on_started_at"
|
||||
t.index ["user_id"], name: "index_visits_on_user_id"
|
||||
end
|
||||
|
||||
add_foreign_key 'active_storage_attachments', 'active_storage_blobs', column: 'blob_id'
|
||||
add_foreign_key 'active_storage_variant_records', 'active_storage_blobs', column: 'blob_id'
|
||||
add_foreign_key 'areas', 'users'
|
||||
add_foreign_key 'notifications', 'users'
|
||||
add_foreign_key 'place_visits', 'places'
|
||||
add_foreign_key 'place_visits', 'visits'
|
||||
add_foreign_key 'points', 'users'
|
||||
add_foreign_key 'points', 'visits'
|
||||
add_foreign_key 'stats', 'users'
|
||||
add_foreign_key 'trips', 'users'
|
||||
add_foreign_key 'visits', 'areas'
|
||||
add_foreign_key 'visits', 'places'
|
||||
add_foreign_key 'visits', 'users'
|
||||
add_foreign_key "active_storage_attachments", "active_storage_blobs", column: "blob_id"
|
||||
add_foreign_key "active_storage_variant_records", "active_storage_blobs", column: "blob_id"
|
||||
add_foreign_key "areas", "users"
|
||||
add_foreign_key "notifications", "users"
|
||||
add_foreign_key "place_visits", "places"
|
||||
add_foreign_key "place_visits", "visits"
|
||||
add_foreign_key "points", "users"
|
||||
add_foreign_key "points", "visits"
|
||||
add_foreign_key "stats", "users"
|
||||
add_foreign_key "trips", "users"
|
||||
add_foreign_key "visits", "areas"
|
||||
add_foreign_key "visits", "places"
|
||||
add_foreign_key "visits", "users"
|
||||
end
|
||||
|
|
|
|||
|
|
@ -8,11 +8,9 @@ RSpec.describe AreaVisitsCalculationSchedulingJob, type: :job do
|
|||
let(:user) { create(:user) }
|
||||
|
||||
it 'calls the AreaVisitsCalculationService' do
|
||||
Sidekiq::Testing.inline! do
|
||||
expect(AreaVisitsCalculatingJob).to receive(:perform_later).with(user.id).and_call_original
|
||||
expect(AreaVisitsCalculatingJob).to receive(:perform_later).with(user.id).and_call_original
|
||||
|
||||
described_class.new.perform
|
||||
end
|
||||
described_class.new.perform
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -55,16 +55,12 @@ RSpec.describe Imports::Create do
|
|||
|
||||
context 'when import is successful' do
|
||||
it 'schedules stats creating' do
|
||||
Sidekiq::Testing.inline! do
|
||||
expect { service.call }.to \
|
||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||
end
|
||||
expect { service.call }.to \
|
||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||
end
|
||||
|
||||
it 'schedules visit suggesting' do
|
||||
Sidekiq::Testing.inline! do
|
||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||
end
|
||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue