mirror of
https://github.com/Freika/dawarich.git
synced 2026-01-10 17:21:38 -05:00
commit
c6ba487617
37 changed files with 652 additions and 85 deletions
|
|
@ -1 +1 @@
|
|||
0.26.7
|
||||
0.27.0
|
||||
|
|
|
|||
30
.github/workflows/build_and_push.yml
vendored
30
.github/workflows/build_and_push.yml
vendored
|
|
@ -51,12 +51,34 @@ jobs:
|
|||
- name: Set Docker tags
|
||||
id: docker_meta
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
# Debug output
|
||||
echo "GITHUB_REF: $GITHUB_REF"
|
||||
echo "GITHUB_REF_NAME: $GITHUB_REF_NAME"
|
||||
|
||||
# Extract version from GITHUB_REF or use GITHUB_REF_NAME
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
else
|
||||
VERSION=$GITHUB_REF_NAME
|
||||
fi
|
||||
|
||||
# Additional safety check - if VERSION is empty, use a default
|
||||
if [ -z "$VERSION" ]; then
|
||||
VERSION="rc"
|
||||
fi
|
||||
|
||||
echo "Using VERSION: $VERSION"
|
||||
|
||||
TAGS="freikin/dawarich:${VERSION}"
|
||||
|
||||
# Set platforms based on release type
|
||||
PLATFORMS="linux/amd64,linux/arm64,linux/arm/v8,linux/arm/v7,linux/arm/v6"
|
||||
|
||||
# Add :rc tag for pre-releases
|
||||
if [ "${{ github.event.release.prerelease }}" = "true" ]; then
|
||||
TAGS="${TAGS},freikin/dawarich:rc"
|
||||
# For RC builds, only use amd64
|
||||
PLATFORMS="linux/amd64"
|
||||
fi
|
||||
|
||||
# Add :latest tag only if release is not a pre-release
|
||||
|
|
@ -64,7 +86,11 @@ jobs:
|
|||
TAGS="${TAGS},freikin/dawarich:latest"
|
||||
fi
|
||||
|
||||
echo "Final TAGS: $TAGS"
|
||||
echo "PLATFORMS: $PLATFORMS"
|
||||
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
echo "platforms=${PLATFORMS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
|
|
@ -74,6 +100,6 @@ jobs:
|
|||
push: true
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v8,linux/arm/v7,linux/arm/v6
|
||||
platforms: ${{ steps.docker_meta.outputs.platforms }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
|
|
|||
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -72,3 +72,7 @@
|
|||
/config/credentials/staging.yml.enc
|
||||
|
||||
Makefile
|
||||
|
||||
/db/*.sqlite3
|
||||
/db/*.sqlite3-shm
|
||||
/db/*.sqlite3-wal
|
||||
|
|
|
|||
48
CHANGELOG.md
48
CHANGELOG.md
|
|
@ -4,8 +4,54 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](http://keepachangelog.com/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# 0.27.0 - 2025-05-31
|
||||
|
||||
# 0.26.7 - 2025-05-26
|
||||
⚠️ This release includes a breaking change. ⚠️
|
||||
|
||||
Starting 0.27.0, Dawarich is using SolidQueue and SolidCache to run background jobs and cache data. Before updating, make sure your Sidekiq queues (https://your_dawarich_app/sidekiq) are empty.
|
||||
|
||||
Moving to SolidQueue and SolidCache will require creating new SQLite databases, which will be created automatically when you start the app. They will be stored in the `dawarich_db_data` volume.
|
||||
|
||||
Background jobs interface is now available at `/jobs` page.
|
||||
|
||||
Please, update your `docker-compose.yml` and add the following:
|
||||
|
||||
```diff
|
||||
dawarich_app:
|
||||
image: freikin/dawarich:latest
|
||||
container_name: dawarich_app
|
||||
volumes:
|
||||
- dawarich_public:/var/app/public
|
||||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
+ - dawarich_db_data:/dawarich_db_data
|
||||
...
|
||||
environment:
|
||||
...
|
||||
DATABASE_NAME: dawarich_development
|
||||
# SQLite database paths for secondary databases
|
||||
+ QUEUE_DATABASE_PATH: /dawarich_db_data/dawarich_development_queue.sqlite3
|
||||
+ CACHE_DATABASE_PATH: /dawarich_db_data/dawarich_development_cache.sqlite3
|
||||
+ CABLE_DATABASE_PATH: /dawarich_db_data/dawarich_development_cable.sqlite3
|
||||
```
|
||||
|
||||
|
||||
## Fixed
|
||||
|
||||
- Enable caching in development for the docker image to improve performance.
|
||||
|
||||
## Changed
|
||||
|
||||
- SolidCache is now being used for caching instead of Redis.
|
||||
- SolidQueue is now being used for background jobs instead of Sidekiq.
|
||||
- SolidCable is now being used as ActionCable adapter.
|
||||
- Background jobs are now being run as Puma plugin instead of separate Docker container.
|
||||
- The `rc` docker image is now being built for amd64 architecture only to speed up the build process.
|
||||
- Deleting an import with many points now works significantly faster.
|
||||
|
||||
|
||||
|
||||
# 0.26.7 - 2025-05-29
|
||||
|
||||
## Fixed
|
||||
|
||||
|
|
|
|||
5
Gemfile
5
Gemfile
|
|
@ -20,6 +20,7 @@ gem 'httparty'
|
|||
gem 'importmap-rails'
|
||||
gem 'kaminari'
|
||||
gem 'lograge'
|
||||
gem 'mission_control-jobs'
|
||||
gem 'oj'
|
||||
gem 'pg'
|
||||
gem 'prometheus_exporter'
|
||||
|
|
@ -35,6 +36,7 @@ gem 'rswag-api'
|
|||
gem 'rswag-ui'
|
||||
gem 'sentry-ruby'
|
||||
gem 'sentry-rails'
|
||||
gem 'sqlite3', '~> 2.6'
|
||||
gem 'stackprof'
|
||||
gem 'sidekiq'
|
||||
gem 'sidekiq-cron'
|
||||
|
|
@ -42,6 +44,9 @@ gem 'sidekiq-limit_fetch'
|
|||
gem 'sprockets-rails'
|
||||
gem 'stimulus-rails'
|
||||
gem 'strong_migrations'
|
||||
gem 'solid_cable', '~> 3.0'
|
||||
gem 'solid_cache', '1.0.7'
|
||||
gem 'solid_queue', '~> 1.1'
|
||||
gem 'tailwindcss-rails'
|
||||
gem 'turbo-rails'
|
||||
gem 'tzinfo-data', platforms: %i[mingw mswin x64_mingw jruby]
|
||||
|
|
|
|||
39
Gemfile.lock
39
Gemfile.lock
|
|
@ -232,7 +232,17 @@ GEM
|
|||
mini_mime (1.1.5)
|
||||
mini_portile2 (2.8.9)
|
||||
minitest (5.25.5)
|
||||
msgpack (1.8.0)
|
||||
mission_control-jobs (1.0.2)
|
||||
actioncable (>= 7.1)
|
||||
actionpack (>= 7.1)
|
||||
activejob (>= 7.1)
|
||||
activerecord (>= 7.1)
|
||||
importmap-rails (>= 1.2.1)
|
||||
irb (~> 1.13)
|
||||
railties (>= 7.1)
|
||||
stimulus-rails
|
||||
turbo-rails
|
||||
msgpack (1.7.3)
|
||||
multi_json (1.15.0)
|
||||
multi_xml (0.7.1)
|
||||
bigdecimal (~> 3.1)
|
||||
|
|
@ -442,6 +452,22 @@ GEM
|
|||
simplecov_json_formatter (~> 0.1)
|
||||
simplecov-html (0.13.1)
|
||||
simplecov_json_formatter (0.1.4)
|
||||
solid_cable (3.0.8)
|
||||
actioncable (>= 7.2)
|
||||
activejob (>= 7.2)
|
||||
activerecord (>= 7.2)
|
||||
railties (>= 7.2)
|
||||
solid_cache (1.0.7)
|
||||
activejob (>= 7.2)
|
||||
activerecord (>= 7.2)
|
||||
railties (>= 7.2)
|
||||
solid_queue (1.1.5)
|
||||
activejob (>= 7.1)
|
||||
activerecord (>= 7.1)
|
||||
concurrent-ruby (>= 1.3.1)
|
||||
fugit (~> 1.11.0)
|
||||
railties (>= 7.1)
|
||||
thor (~> 1.3.1)
|
||||
sprockets (4.2.1)
|
||||
concurrent-ruby (~> 1.0)
|
||||
rack (>= 2.2.4, < 4)
|
||||
|
|
@ -449,6 +475,12 @@ GEM
|
|||
actionpack (>= 6.1)
|
||||
activesupport (>= 6.1)
|
||||
sprockets (>= 3.0.0)
|
||||
sqlite3 (2.6.0-aarch64-linux-gnu)
|
||||
sqlite3 (2.6.0-arm-linux-gnu)
|
||||
sqlite3 (2.6.0-arm64-darwin)
|
||||
sqlite3 (2.6.0-x86-linux-gnu)
|
||||
sqlite3 (2.6.0-x86_64-darwin)
|
||||
sqlite3 (2.6.0-x86_64-linux-gnu)
|
||||
stackprof (0.2.27)
|
||||
stimulus-rails (1.3.4)
|
||||
railties (>= 6.0.0)
|
||||
|
|
@ -532,6 +564,7 @@ DEPENDENCIES
|
|||
jwt
|
||||
kaminari
|
||||
lograge
|
||||
mission_control-jobs
|
||||
oj
|
||||
pg
|
||||
prometheus_exporter
|
||||
|
|
@ -558,7 +591,11 @@ DEPENDENCIES
|
|||
sidekiq-cron
|
||||
sidekiq-limit_fetch
|
||||
simplecov
|
||||
solid_cable (~> 3.0)
|
||||
solid_cache (= 1.0.7)
|
||||
solid_queue (~> 1.1)
|
||||
sprockets-rails
|
||||
sqlite3 (~> 2.6)
|
||||
stackprof
|
||||
stimulus-rails
|
||||
strong_migrations
|
||||
|
|
|
|||
2
Procfile
2
Procfile
|
|
@ -1,2 +1,2 @@
|
|||
web: bundle exec puma -C config/puma.rb
|
||||
worker: bundle exec sidekiq -C config/sidekiq.yml
|
||||
worker: bundle exec bin/jobs
|
||||
|
|
|
|||
|
|
@ -1,7 +1,11 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class ApplicationJob < ActiveJob::Base
|
||||
# Automatically retry jobs that encountered a deadlock
|
||||
# retry_on ActiveRecord::Deadlocked
|
||||
|
||||
retry_on Exception, wait: :polynomially_longer, attempts: 25
|
||||
|
||||
# Most jobs are safe to ignore if the underlying records are no longer available
|
||||
# discard_on ActiveJob::DeserializationError
|
||||
end
|
||||
|
|
|
|||
9
app/jobs/jobs/clean_finished_job.rb
Normal file
9
app/jobs/jobs/clean_finished_job.rb
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class Jobs::CleanFinishedJob < ApplicationJob
|
||||
queue_as :default
|
||||
|
||||
def perform
|
||||
SolidQueue::Job.clear_finished_in_batches
|
||||
end
|
||||
end
|
||||
|
|
@ -9,7 +9,10 @@ class Imports::Destroy
|
|||
end
|
||||
|
||||
def call
|
||||
@import.destroy!
|
||||
ActiveRecord::Base.transaction do
|
||||
@import.points.delete_all
|
||||
@import.destroy!
|
||||
end
|
||||
|
||||
Stats::BulkCalculator.new(@user.id).call
|
||||
end
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ class Jobs::Create
|
|||
raise InvalidJobName, 'Invalid job name'
|
||||
end
|
||||
|
||||
# TODO: bulk enqueue reverse geocoding with ActiveJob
|
||||
points.find_each(&:async_reverse_geocode)
|
||||
end
|
||||
end
|
||||
|
|
|
|||
6
bin/jobs
Executable file
6
bin/jobs
Executable file
|
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
require_relative "../config/environment"
|
||||
require "solid_queue/cli"
|
||||
|
||||
SolidQueue::Cli.start(ARGV)
|
||||
|
|
@ -1,11 +1,21 @@
|
|||
# Async adapter only works within the same process, so for manually triggering cable updates from a console,
|
||||
# and seeing results in the browser, you must do so from the web console (running inside the dev process),
|
||||
# not a terminal started via bin/rails console! Add "console" to any action or any ERB template view
|
||||
# to make the web console appear.
|
||||
|
||||
default: &default
|
||||
adapter: solid_cable
|
||||
connects_to:
|
||||
database:
|
||||
writing: cable
|
||||
polling_interval: 0.1.seconds
|
||||
message_retention: 1.day
|
||||
|
||||
development:
|
||||
adapter: redis
|
||||
url: <%= ENV['REDIS_URL'] %>
|
||||
<<: *default
|
||||
|
||||
test:
|
||||
adapter: test
|
||||
|
||||
production:
|
||||
adapter: redis
|
||||
url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %>
|
||||
channel_prefix: dawarich_production
|
||||
<<: *default
|
||||
|
|
|
|||
15
config/cache.yml
Normal file
15
config/cache.yml
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
default: &default
|
||||
store_options:
|
||||
# Cap age of oldest cache entry to fulfill retention policies
|
||||
max_age: <%= 60.days.to_i %>
|
||||
max_size: <%= 256.megabytes %>
|
||||
namespace: <%= Rails.env %>
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
|
|
@ -9,18 +9,64 @@ default: &default
|
|||
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
||||
timeout: 5000
|
||||
|
||||
sqlite_default: &sqlite_default
|
||||
adapter: sqlite3
|
||||
pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 10 } %>
|
||||
timeout: 5000
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_development' %>
|
||||
queue:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['QUEUE_DATABASE_PATH'] || 'db/queue.sqlite3' %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cache:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %>
|
||||
migrations_paths: db/cache_migrate
|
||||
cable:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_test' %>
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_production' %>
|
||||
queue:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['QUEUE_DATABASE_PATH'] || 'db/queue.sqlite3' %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cable:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
cache:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CACHE_DATABASE_PATH'] %>
|
||||
migrations_paths: db/cache_migrate
|
||||
|
||||
staging:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %>
|
||||
primary:
|
||||
<<: *default
|
||||
database: <%= ENV['DATABASE_NAME'] || 'dawarich_staging' %>
|
||||
password: <%= ENV['DATABASE_PASSWORD'] %>
|
||||
cache:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CACHE_DATABASE_PATH'] || 'db/cache.sqlite3' %>
|
||||
migrations_paths: db/cache_migrate
|
||||
queue:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['QUEUE_DATABASE_PATH'] || 'db/queue.sqlite3' %>
|
||||
migrations_paths: db/queue_migrate
|
||||
cable:
|
||||
<<: *sqlite_default
|
||||
database: <%= ENV['CABLE_DATABASE_PATH'] || 'db/cable.sqlite3' %>
|
||||
migrations_paths: db/cable_migrate
|
||||
|
|
|
|||
|
|
@ -26,18 +26,18 @@ Rails.application.configure do
|
|||
|
||||
# Enable/disable caching. By default caching is disabled.
|
||||
# Run rails dev:cache to toggle caching.
|
||||
config.cache_store = :solid_cache_store
|
||||
config.solid_cache.connects_to = { database: { writing: :cache } }
|
||||
|
||||
if Rails.root.join('tmp/caching-dev.txt').exist?
|
||||
config.action_controller.perform_caching = true
|
||||
config.action_controller.enable_fragment_cache_logging = true
|
||||
|
||||
config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] }
|
||||
config.public_file_server.headers = {
|
||||
'Cache-Control' => "public, max-age=#{2.days.to_i}"
|
||||
}
|
||||
else
|
||||
config.action_controller.perform_caching = false
|
||||
|
||||
config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] }
|
||||
end
|
||||
|
||||
config.public_file_server.enabled = true
|
||||
|
|
@ -68,6 +68,14 @@ Rails.application.configure do
|
|||
# Highlight code that enqueued background job in logs.
|
||||
config.active_job.verbose_enqueue_logs = true
|
||||
|
||||
config.active_job.queue_adapter = :solid_queue
|
||||
config.solid_queue.silence_polling = true
|
||||
# :queue is the name of the database connection
|
||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
||||
|
||||
config.mission_control.jobs.http_basic_auth_enabled = false
|
||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
||||
|
||||
# Suppress logger output for asset requests.
|
||||
config.assets.quiet = true
|
||||
|
||||
|
|
@ -95,7 +103,7 @@ Rails.application.configure do
|
|||
config.force_ssl = ENV.fetch('APPLICATION_PROTOCOL', 'http').downcase == 'https'
|
||||
|
||||
# Direct logs to STDOUT
|
||||
config.logger = Logger.new($stdout)
|
||||
config.logger = ActiveSupport::Logger.new($stdout)
|
||||
config.lograge.enabled = true
|
||||
config.lograge.formatter = Lograge::Formatters::Json.new
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ Rails.application.configure do
|
|||
config.force_ssl = ENV.fetch('APPLICATION_PROTOCOL', 'http').downcase == 'https'
|
||||
|
||||
# Direct logs to STDOUT
|
||||
config.logger = Logger.new($stdout)
|
||||
config.logger = ActiveSupport::Logger.new($stdout)
|
||||
config.lograge.enabled = true
|
||||
config.lograge.formatter = Lograge::Formatters::Json.new
|
||||
|
||||
|
|
@ -73,11 +73,14 @@ Rails.application.configure do
|
|||
config.log_level = ENV.fetch('RAILS_LOG_LEVEL', 'info')
|
||||
|
||||
# Use a different cache store in production.
|
||||
# config.cache_store = :mem_cache_store
|
||||
config.cache_store = :redis_cache_store, { url: ENV['REDIS_URL'] }
|
||||
config.cache_store = :solid_cache_store
|
||||
config.solid_cache.connects_to = { database: { writing: :cache } }
|
||||
|
||||
# Use a real queuing backend for Active Job (and separate queues per environment).
|
||||
# config.active_job.queue_adapter = :resque
|
||||
config.active_job.queue_adapter = :solid_queue
|
||||
config.solid_queue.connects_to = { database: { writing: :queue } }
|
||||
config.solid_queue.silence_polling = true
|
||||
config.solid_queue.logger = ActiveSupport::Logger.new($stdout)
|
||||
# config.active_job.queue_name_prefix = "dawarich_production"
|
||||
|
||||
config.action_mailer.perform_caching = false
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
Rails.cache.delete('dawarich/app-version-check')
|
||||
|
|
@ -43,6 +43,9 @@ preload_app!
|
|||
# Allow puma to be restarted by `bin/rails restart` command.
|
||||
plugin :tmp_restart
|
||||
|
||||
# If env var is set or we're in development, solid_queue will run in puma
|
||||
plugin :solid_queue if ENV['SOLID_QUEUE_IN_PUMA'] || Rails.env.development?
|
||||
|
||||
# Prometheus exporter
|
||||
if ENV['PROMETHEUS_EXPORTER_ENABLED'].to_s == 'true'
|
||||
require 'prometheus_exporter/instrumentation'
|
||||
|
|
|
|||
27
config/queue.yml
Normal file
27
config/queue.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
|
||||
default: &default
|
||||
dispatchers:
|
||||
- polling_interval: 1
|
||||
batch_size: 500
|
||||
workers:
|
||||
- queues: "*"
|
||||
threads: 3
|
||||
processes: <%= ENV['BACKGROUND_PROCESSING_CONCURRENCY'] || ENV.fetch("JOB_CONCURRENCY", 10) %>
|
||||
polling_interval: 2
|
||||
- queues: imports
|
||||
threads: 5
|
||||
processes: 1
|
||||
polling_interval: 1
|
||||
- queues: exports
|
||||
threads: 5
|
||||
processes: 1
|
||||
polling_interval: 2
|
||||
|
||||
development:
|
||||
<<: *default
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
|
||||
production:
|
||||
<<: *default
|
||||
34
config/recurring.yml
Normal file
34
config/recurring.yml
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
periodic_cleanup:
|
||||
class: "Jobs::CleanFinishedJob"
|
||||
queue: default
|
||||
schedule: every month
|
||||
|
||||
bulk_stats_calculating_job:
|
||||
class: "BulkStatsCalculatingJob"
|
||||
queue: stats
|
||||
schedule: every hour
|
||||
|
||||
area_visits_calculation_scheduling_job:
|
||||
class: "AreaVisitsCalculationSchedulingJob"
|
||||
queue: visit_suggesting
|
||||
schedule: every day at 0:00
|
||||
|
||||
visit_suggesting_job:
|
||||
class: "BulkVisitsSuggestingJob"
|
||||
queue: visit_suggesting
|
||||
schedule: every day at 00:05
|
||||
|
||||
watcher_job:
|
||||
class: "Import::WatcherJob"
|
||||
queue: imports
|
||||
schedule: every hour
|
||||
|
||||
app_version_checking_job:
|
||||
class: "AppVersionCheckingJob"
|
||||
queue: default
|
||||
schedule: every 6 hours
|
||||
|
||||
cache_preheating_job:
|
||||
class: "Cache::PreheatingJob"
|
||||
queue: default
|
||||
schedule: every day at 0:00
|
||||
|
|
@ -25,14 +25,20 @@ Rails.application.routes.draw do
|
|||
(u.admin? && ENV['SIDEKIQ_USERNAME'].present? && ENV['SIDEKIQ_PASSWORD'].present?)
|
||||
} do
|
||||
mount Sidekiq::Web => '/sidekiq'
|
||||
mount MissionControl::Jobs::Engine, at: '/jobs'
|
||||
end
|
||||
|
||||
# We want to return a nice error message if the user is not authorized to access Sidekiq
|
||||
# We want to return a nice error message if the user is not authorized to access Sidekiq or Jobs
|
||||
match '/sidekiq' => redirect { |_, request|
|
||||
request.flash[:error] = 'You are not authorized to perform this action.'
|
||||
'/'
|
||||
}, via: :get
|
||||
|
||||
match '/jobs' => redirect { |_, request|
|
||||
request.flash[:error] = 'You are not authorized to perform this action.'
|
||||
'/'
|
||||
}, via: :get
|
||||
|
||||
resources :settings, only: :index
|
||||
namespace :settings do
|
||||
resources :background_jobs, only: %i[index create destroy]
|
||||
|
|
|
|||
24
db/cable_schema.rb
Normal file
24
db/cable_schema.rb
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
create_table "solid_cable_messages", force: :cascade do |t|
|
||||
t.binary "channel", null: false
|
||||
t.binary "payload", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.bigint "channel_hash", null: false
|
||||
t.index ["channel"], name: "index_solid_cable_messages_on_channel"
|
||||
t.index ["channel_hash"], name: "index_solid_cable_messages_on_channel_hash"
|
||||
t.index ["created_at"], name: "index_solid_cable_messages_on_created_at"
|
||||
t.index ["id"], name: "index_solid_cable_messages_on_id", unique: true
|
||||
end
|
||||
end
|
||||
24
db/cache_schema.rb
Normal file
24
db/cache_schema.rb
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
create_table "solid_cache_entries", force: :cascade do |t|
|
||||
t.binary "key", null: false
|
||||
t.binary "value", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.bigint "key_hash", null: false
|
||||
t.integer "byte_size", null: false
|
||||
t.index ["byte_size"], name: "index_solid_cache_entries_on_byte_size"
|
||||
t.index ["key_hash", "byte_size"], name: "index_solid_cache_entries_on_key_hash_and_byte_size"
|
||||
t.index ["key_hash"], name: "index_solid_cache_entries_on_key_hash", unique: true
|
||||
end
|
||||
end
|
||||
141
db/queue_schema.rb
Normal file
141
db/queue_schema.rb
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
# This file is auto-generated from the current state of the database. Instead
|
||||
# of editing this file, please use the migrations feature of Active Record to
|
||||
# incrementally modify your database, and then regenerate this schema definition.
|
||||
#
|
||||
# This file is the source Rails uses to define your schema when running `bin/rails
|
||||
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
|
||||
# be faster and is potentially less error prone than running all of your
|
||||
# migrations from scratch. Old migrations may fail to apply correctly if those
|
||||
# migrations use external dependencies or application code.
|
||||
#
|
||||
# It's strongly recommended that you check this file into your version control system.
|
||||
|
||||
ActiveRecord::Schema[8.0].define(version: 1) do
|
||||
create_table "solid_queue_blocked_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.string "concurrency_key", null: false
|
||||
t.datetime "expires_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["concurrency_key", "priority", "job_id"], name: "index_solid_queue_blocked_executions_for_release"
|
||||
t.index ["expires_at", "concurrency_key"], name: "index_solid_queue_blocked_executions_for_maintenance"
|
||||
t.index ["job_id"], name: "index_solid_queue_blocked_executions_on_job_id", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_claimed_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.bigint "process_id"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_claimed_executions_on_job_id", unique: true
|
||||
t.index ["process_id", "job_id"], name: "index_solid_queue_claimed_executions_on_process_id_and_job_id"
|
||||
end
|
||||
|
||||
create_table "solid_queue_failed_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.text "error"
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_failed_executions_on_job_id", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_jobs", force: :cascade do |t|
|
||||
t.string "queue_name", null: false
|
||||
t.string "class_name", null: false
|
||||
t.text "arguments"
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.string "active_job_id"
|
||||
t.datetime "scheduled_at"
|
||||
t.datetime "finished_at"
|
||||
t.string "concurrency_key"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["active_job_id"], name: "index_solid_queue_jobs_on_active_job_id"
|
||||
t.index ["class_name"], name: "index_solid_queue_jobs_on_class_name"
|
||||
t.index ["finished_at"], name: "index_solid_queue_jobs_on_finished_at"
|
||||
t.index ["queue_name", "finished_at"], name: "index_solid_queue_jobs_for_filtering"
|
||||
t.index ["scheduled_at", "finished_at"], name: "index_solid_queue_jobs_for_alerting"
|
||||
end
|
||||
|
||||
create_table "solid_queue_pauses", force: :cascade do |t|
|
||||
t.string "queue_name", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["queue_name"], name: "index_solid_queue_pauses_on_queue_name", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_processes", force: :cascade do |t|
|
||||
t.string "kind", null: false
|
||||
t.datetime "last_heartbeat_at", null: false
|
||||
t.bigint "supervisor_id"
|
||||
t.integer "pid", null: false
|
||||
t.string "hostname"
|
||||
t.text "metadata"
|
||||
t.datetime "created_at", null: false
|
||||
t.string "name", null: false
|
||||
t.index ["last_heartbeat_at"], name: "index_solid_queue_processes_on_last_heartbeat_at"
|
||||
t.index ["name", "supervisor_id"], name: "index_solid_queue_processes_on_name_and_supervisor_id", unique: true
|
||||
t.index ["supervisor_id"], name: "index_solid_queue_processes_on_supervisor_id"
|
||||
end
|
||||
|
||||
create_table "solid_queue_ready_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_ready_executions_on_job_id", unique: true
|
||||
t.index ["priority", "job_id"], name: "index_solid_queue_poll_all"
|
||||
t.index ["queue_name", "priority", "job_id"], name: "index_solid_queue_poll_by_queue"
|
||||
end
|
||||
|
||||
create_table "solid_queue_recurring_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "task_key", null: false
|
||||
t.datetime "run_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_recurring_executions_on_job_id", unique: true
|
||||
t.index ["task_key", "run_at"], name: "index_solid_queue_recurring_executions_on_task_key_and_run_at", unique: true
|
||||
end
|
||||
|
||||
create_table "solid_queue_recurring_tasks", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.string "schedule", null: false
|
||||
t.string "command", limit: 2048
|
||||
t.string "class_name"
|
||||
t.text "arguments"
|
||||
t.string "queue_name"
|
||||
t.integer "priority", default: 0
|
||||
t.boolean "static", default: true, null: false
|
||||
t.text "description"
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["key"], name: "index_solid_queue_recurring_tasks_on_key", unique: true
|
||||
t.index ["static"], name: "index_solid_queue_recurring_tasks_on_static"
|
||||
end
|
||||
|
||||
create_table "solid_queue_scheduled_executions", force: :cascade do |t|
|
||||
t.bigint "job_id", null: false
|
||||
t.string "queue_name", null: false
|
||||
t.integer "priority", default: 0, null: false
|
||||
t.datetime "scheduled_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.index ["job_id"], name: "index_solid_queue_scheduled_executions_on_job_id", unique: true
|
||||
t.index ["scheduled_at", "priority", "job_id"], name: "index_solid_queue_dispatch_all"
|
||||
end
|
||||
|
||||
create_table "solid_queue_semaphores", force: :cascade do |t|
|
||||
t.string "key", null: false
|
||||
t.integer "value", default: 1, null: false
|
||||
t.datetime "expires_at", null: false
|
||||
t.datetime "created_at", null: false
|
||||
t.datetime "updated_at", null: false
|
||||
t.index ["expires_at"], name: "index_solid_queue_semaphores_on_expires_at"
|
||||
t.index ["key", "value"], name: "index_solid_queue_semaphores_on_key_and_value"
|
||||
t.index ["key"], name: "index_solid_queue_semaphores_on_key", unique: true
|
||||
end
|
||||
|
||||
add_foreign_key "solid_queue_blocked_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_claimed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_failed_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_ready_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_recurring_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
add_foreign_key "solid_queue_scheduled_executions", "solid_queue_jobs", column: "job_id", on_delete: :cascade
|
||||
end
|
||||
|
|
@ -54,6 +54,9 @@ RUN bundle config set --local path 'vendor/bundle' \
|
|||
|
||||
COPY ../. ./
|
||||
|
||||
# Create caching-dev.txt file to enable Rails caching in development
|
||||
RUN mkdir -p $APP_PATH/tmp && touch $APP_PATH/tmp/caching-dev.txt
|
||||
|
||||
COPY ./docker/web-entrypoint.sh /usr/local/bin/web-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/web-entrypoint.sh
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ services:
|
|||
- dawarich_public:/var/app/public
|
||||
- dawarich_watched:/var/app/tmp/imports/watched
|
||||
- dawarich_storage:/var/app/storage
|
||||
- dawarich_db_data:/dawarich_db_data
|
||||
networks:
|
||||
- dawarich
|
||||
ports:
|
||||
|
|
@ -62,6 +63,10 @@ services:
|
|||
DATABASE_USERNAME: postgres
|
||||
DATABASE_PASSWORD: password
|
||||
DATABASE_NAME: dawarich_development
|
||||
# SQLite database paths for secondary databases
|
||||
QUEUE_DATABASE_PATH: /dawarich_db_data/dawarich_development_queue.sqlite3
|
||||
CACHE_DATABASE_PATH: /dawarich_db_data/dawarich_development_cache.sqlite3
|
||||
CABLE_DATABASE_PATH: /dawarich_db_data/dawarich_development_cable.sqlite3
|
||||
MIN_MINUTES_SPENT_IN_CITY: 60
|
||||
APPLICATION_HOSTS: localhost
|
||||
TIME_ZONE: Europe/London
|
||||
|
|
|
|||
|
|
@ -24,32 +24,89 @@ else
|
|||
DATABASE_NAME=${DATABASE_NAME}
|
||||
fi
|
||||
|
||||
# Export main database variables to ensure they're available
|
||||
export DATABASE_HOST
|
||||
export DATABASE_PORT
|
||||
export DATABASE_USERNAME
|
||||
export DATABASE_PASSWORD
|
||||
export DATABASE_NAME
|
||||
|
||||
# Remove pre-existing puma/passenger server.pid
|
||||
rm -f $APP_PATH/tmp/pids/server.pid
|
||||
|
||||
echo "Attempting to create database $DATABASE_NAME if it doesn't exist..."
|
||||
PGPASSWORD=$DATABASE_PASSWORD createdb -h "$DATABASE_HOST" -p "$DATABASE_PORT" -U "$DATABASE_USERNAME" "$DATABASE_NAME" 2>/dev/null || echo "Note: Database may already exist or couldn't be created now"
|
||||
# Function to check and create a PostgreSQL database
|
||||
create_database() {
|
||||
local db_name=$1
|
||||
local db_password=$2
|
||||
|
||||
# Wait for the database to become available
|
||||
echo "⏳ Waiting for database to be ready..."
|
||||
until PGPASSWORD=$DATABASE_PASSWORD psql -h "$DATABASE_HOST" -p "$DATABASE_PORT" -U "$DATABASE_USERNAME" -d "$DATABASE_NAME" -c '\q' 2>/dev/null; do
|
||||
>&2 echo "Postgres is unavailable - retrying..."
|
||||
sleep 2
|
||||
done
|
||||
echo "✅ PostgreSQL is ready!"
|
||||
echo "Attempting to create database $db_name if it doesn't exist..."
|
||||
PGPASSWORD=$db_password createdb -h "$DATABASE_HOST" -p "$DATABASE_PORT" -U "$DATABASE_USERNAME" "$db_name" 2>/dev/null || echo "Note: Database $db_name may already exist or couldn't be created now"
|
||||
|
||||
# Run database migrations
|
||||
echo "PostgreSQL is ready. Running database migrations..."
|
||||
# Wait for the database to become available
|
||||
echo "⏳ Waiting for database $db_name to be ready..."
|
||||
until PGPASSWORD=$db_password psql -h "$DATABASE_HOST" -p "$DATABASE_PORT" -U "$DATABASE_USERNAME" -d "$db_name" -c '\q' 2>/dev/null; do
|
||||
>&2 echo "Postgres database $db_name is unavailable - retrying..."
|
||||
sleep 2
|
||||
done
|
||||
echo "✅ PostgreSQL database $db_name is ready!"
|
||||
}
|
||||
|
||||
# Set up SQLite database directory in the volume
|
||||
SQLITE_DB_DIR="/dawarich_db_data"
|
||||
mkdir -p $SQLITE_DB_DIR
|
||||
echo "Created SQLite database directory at $SQLITE_DB_DIR"
|
||||
|
||||
# Step 1: Database Setup
|
||||
echo "Setting up all required databases..."
|
||||
|
||||
# Create primary PostgreSQL database
|
||||
create_database "$DATABASE_NAME" "$DATABASE_PASSWORD"
|
||||
|
||||
# Setup SQLite databases based on environment
|
||||
|
||||
# Setup Queue database with SQLite
|
||||
QUEUE_DATABASE_PATH=${QUEUE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_queue.sqlite3"}
|
||||
export QUEUE_DATABASE_PATH
|
||||
echo "✅ SQLite queue database configured at $QUEUE_DATABASE_PATH"
|
||||
|
||||
# Setup Cache database with SQLite
|
||||
CACHE_DATABASE_PATH=${CACHE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cache.sqlite3"}
|
||||
export CACHE_DATABASE_PATH
|
||||
echo "✅ SQLite cache database configured at $CACHE_DATABASE_PATH"
|
||||
|
||||
# Setup Cable database with SQLite (only for production and staging)
|
||||
if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then
|
||||
CABLE_DATABASE_PATH=${CABLE_DATABASE_PATH:-"$SQLITE_DB_DIR/${DATABASE_NAME}_cable.sqlite3"}
|
||||
export CABLE_DATABASE_PATH
|
||||
echo "✅ SQLite cable database configured at $CABLE_DATABASE_PATH"
|
||||
fi
|
||||
|
||||
# Step 2: Run migrations for all databases
|
||||
echo "Running migrations for all databases..."
|
||||
|
||||
# Run primary database migrations first (needed before SQLite migrations)
|
||||
echo "Running primary database migrations..."
|
||||
bundle exec rails db:migrate
|
||||
|
||||
# Run SQLite database migrations
|
||||
echo "Running cache database migrations..."
|
||||
bundle exec rails db:migrate:cache
|
||||
|
||||
echo "Running queue database migrations..."
|
||||
bundle exec rails db:migrate:queue
|
||||
|
||||
# Run cable migrations for production/staging
|
||||
if [ "$RAILS_ENV" = "production" ] || [ "$RAILS_ENV" = "staging" ]; then
|
||||
echo "Running cable database migrations..."
|
||||
bundle exec rails db:migrate:cable
|
||||
fi
|
||||
|
||||
# Run data migrations
|
||||
echo "Running DATA migrations..."
|
||||
bundle exec rake data:migrate
|
||||
|
||||
# if [ "$RAILS_ENV" != "production" ]; then
|
||||
echo "Running seeds..."
|
||||
bundle exec rails db:seed
|
||||
# fi
|
||||
echo "Running seeds..."
|
||||
bundle exec rails db:seed
|
||||
|
||||
# run passed commands
|
||||
bundle exec ${@}
|
||||
|
|
|
|||
51
package-lock.json
generated
51
package-lock.json
generated
|
|
@ -10,7 +10,7 @@
|
|||
"daisyui": "^4.7.3",
|
||||
"leaflet": "^1.9.4",
|
||||
"postcss": "^8.4.49",
|
||||
"trix": "^2.1.8"
|
||||
"trix": "^2.1.15"
|
||||
},
|
||||
"engines": {
|
||||
"node": "18.17.1",
|
||||
|
|
@ -58,6 +58,13 @@
|
|||
"spark-md5": "^3.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/trusted-types": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
|
||||
"integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
|
||||
"license": "MIT",
|
||||
"optional": true
|
||||
},
|
||||
"node_modules/camelcase-css": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
|
||||
|
|
@ -112,6 +119,15 @@
|
|||
"url": "https://opencollective.com/daisyui"
|
||||
}
|
||||
},
|
||||
"node_modules/dompurify": {
|
||||
"version": "3.2.6",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz",
|
||||
"integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==",
|
||||
"license": "(MPL-2.0 OR Apache-2.0)",
|
||||
"optionalDependencies": {
|
||||
"@types/trusted-types": "^2.0.7"
|
||||
}
|
||||
},
|
||||
"node_modules/fastparse": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/fastparse/-/fastparse-1.1.2.tgz",
|
||||
|
|
@ -203,9 +219,13 @@
|
|||
"integrity": "sha512-wcFzz9cDfbuqe0FZzfi2or1sgyIrsDwmPwfZC4hiNidPdPINjeUwNfv5kldczoEAcjl9Y1L3SM7Uz2PUEQzxQw=="
|
||||
},
|
||||
"node_modules/trix": {
|
||||
"version": "2.1.8",
|
||||
"resolved": "https://registry.npmjs.org/trix/-/trix-2.1.8.tgz",
|
||||
"integrity": "sha512-y1h5mKQcjMsZDsUOqOgyIUfw+Z31u4Fe9JqXtKGUzIC7FM9cTpxZFFWxQggwXBo18ccIKYx1Fn9toVO5mCpn9g=="
|
||||
"version": "2.1.15",
|
||||
"resolved": "https://registry.npmjs.org/trix/-/trix-2.1.15.tgz",
|
||||
"integrity": "sha512-LoaXWczdTUV8+3Box92B9b1iaDVbxD14dYemZRxi3PwY+AuDm97BUJV2aHLBUFPuDABhxp0wzcbf0CxHCVmXiw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"dompurify": "^3.2.5"
|
||||
}
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
|
|
@ -244,6 +264,12 @@
|
|||
"spark-md5": "^3.0.1"
|
||||
}
|
||||
},
|
||||
"@types/trusted-types": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
|
||||
"integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
|
||||
"optional": true
|
||||
},
|
||||
"camelcase-css": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
|
||||
|
|
@ -279,6 +305,14 @@
|
|||
"postcss-js": "^4"
|
||||
}
|
||||
},
|
||||
"dompurify": {
|
||||
"version": "3.2.6",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz",
|
||||
"integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==",
|
||||
"requires": {
|
||||
"@types/trusted-types": "^2.0.7"
|
||||
}
|
||||
},
|
||||
"fastparse": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/fastparse/-/fastparse-1.1.2.tgz",
|
||||
|
|
@ -328,9 +362,12 @@
|
|||
"integrity": "sha512-wcFzz9cDfbuqe0FZzfi2or1sgyIrsDwmPwfZC4hiNidPdPINjeUwNfv5kldczoEAcjl9Y1L3SM7Uz2PUEQzxQw=="
|
||||
},
|
||||
"trix": {
|
||||
"version": "2.1.8",
|
||||
"resolved": "https://registry.npmjs.org/trix/-/trix-2.1.8.tgz",
|
||||
"integrity": "sha512-y1h5mKQcjMsZDsUOqOgyIUfw+Z31u4Fe9JqXtKGUzIC7FM9cTpxZFFWxQggwXBo18ccIKYx1Fn9toVO5mCpn9g=="
|
||||
"version": "2.1.15",
|
||||
"resolved": "https://registry.npmjs.org/trix/-/trix-2.1.15.tgz",
|
||||
"integrity": "sha512-LoaXWczdTUV8+3Box92B9b1iaDVbxD14dYemZRxi3PwY+AuDm97BUJV2aHLBUFPuDABhxp0wzcbf0CxHCVmXiw==",
|
||||
"requires": {
|
||||
"dompurify": "^3.2.5"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
"daisyui": "^4.7.3",
|
||||
"leaflet": "^1.9.4",
|
||||
"postcss": "^8.4.49",
|
||||
"trix": "^2.1.8"
|
||||
"trix": "^2.1.15"
|
||||
},
|
||||
"engines": {
|
||||
"node": "18.17.1",
|
||||
|
|
|
|||
|
|
@ -5,5 +5,11 @@ FactoryBot.define do
|
|||
user
|
||||
name { 'owntracks_export.json' }
|
||||
source { Import.sources[:owntracks] }
|
||||
|
||||
trait :with_points do
|
||||
after(:create) do |import|
|
||||
create_list(:point, 10, import:)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -8,11 +8,9 @@ RSpec.describe AreaVisitsCalculationSchedulingJob, type: :job do
|
|||
let(:user) { create(:user) }
|
||||
|
||||
it 'calls the AreaVisitsCalculationService' do
|
||||
Sidekiq::Testing.inline! do
|
||||
expect(AreaVisitsCalculatingJob).to receive(:perform_later).with(user.id).and_call_original
|
||||
expect(AreaVisitsCalculatingJob).to receive(:perform_later).with(user.id).and_call_original
|
||||
|
||||
described_class.new.perform
|
||||
end
|
||||
described_class.new.perform
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -52,7 +52,6 @@ RSpec.describe DataMigrations::MigratePlacesLonlatJob, type: :job do
|
|||
described_class.perform_now(user.id)
|
||||
place1.reload
|
||||
|
||||
# SRID should be 4326 (WGS84)
|
||||
expect(place1.lonlat.srid).to eq(4326)
|
||||
end
|
||||
end
|
||||
|
|
@ -64,14 +63,6 @@ RSpec.describe DataMigrations::MigratePlacesLonlatJob, type: :job do
|
|||
end.not_to raise_error
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user does not exist' do
|
||||
it 'raises ActiveRecord::RecordNotFound' do
|
||||
expect do
|
||||
described_class.perform_now(-1)
|
||||
end.to raise_error(ActiveRecord::RecordNotFound)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'queue' do
|
||||
|
|
|
|||
|
|
@ -63,14 +63,6 @@ RSpec.describe VisitSuggestingJob, type: :job do
|
|||
end
|
||||
end
|
||||
|
||||
context 'when user not found' do
|
||||
it 'raises an error' do
|
||||
expect do
|
||||
described_class.perform_now(user_id: -1, start_at: start_at, end_at: end_at)
|
||||
end.to raise_error(ActiveRecord::RecordNotFound)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with string dates' do
|
||||
let(:string_start) { start_at.to_s }
|
||||
let(:string_end) { end_at.to_s }
|
||||
|
|
|
|||
|
|
@ -55,16 +55,12 @@ RSpec.describe Imports::Create do
|
|||
|
||||
context 'when import is successful' do
|
||||
it 'schedules stats creating' do
|
||||
Sidekiq::Testing.inline! do
|
||||
expect { service.call }.to \
|
||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||
end
|
||||
expect { service.call }.to \
|
||||
have_enqueued_job(Stats::CalculatingJob).with(user.id, 2024, 3)
|
||||
end
|
||||
|
||||
it 'schedules visit suggesting' do
|
||||
Sidekiq::Testing.inline! do
|
||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||
end
|
||||
expect { service.call }.to have_enqueued_job(VisitSuggestingJob)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -5,13 +5,17 @@ require 'rails_helper'
|
|||
RSpec.describe Imports::Destroy do
|
||||
describe '#call' do
|
||||
let!(:user) { create(:user) }
|
||||
let!(:import) { create(:import, user: user) }
|
||||
let!(:import) { create(:import, :with_points, user: user) }
|
||||
let(:service) { described_class.new(user, import) }
|
||||
|
||||
it 'destroys the import' do
|
||||
expect { service.call }.to change { Import.count }.by(-1)
|
||||
end
|
||||
|
||||
it 'destroys the points' do
|
||||
expect { service.call }.to change { Point.count }.by(-import.points.count)
|
||||
end
|
||||
|
||||
it 'enqueues a BulkStatsCalculatingJob' do
|
||||
expect(Stats::BulkCalculator).to receive(:new).with(user.id).and_return(double(call: nil))
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ RSpec.describe Imports::Watcher do
|
|||
|
||||
before do
|
||||
stub_const('Imports::Watcher::WATCHED_DIR_PATH', watched_dir_path)
|
||||
Sidekiq::Testing.inline!
|
||||
end
|
||||
|
||||
after { Sidekiq::Testing.fake! }
|
||||
|
|
|
|||
Loading…
Reference in a new issue