Merge pull request #1746 from Freika/fix/daily-tracks-gen

Implement a new daily track generation job to replace the old cleanup…
This commit is contained in:
Evgenii Burmakin 2025-09-09 00:08:58 +02:00 committed by GitHub
commit 4c5e2b787a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 255 additions and 1267 deletions

View file

@ -71,9 +71,21 @@ jobs:
TAGS="freikin/dawarich:${VERSION}"
# Set platforms based on release type
# Set platforms based on version type and release type
PLATFORMS="linux/amd64,linux/arm64,linux/arm/v8,linux/arm/v7,linux/arm/v6"
# Check if this is a patch version (x.y.z where z > 0)
if [[ $VERSION =~ ^[0-9]+\.[0-9]+\.[1-9][0-9]*$ ]]; then
echo "Detected patch version ($VERSION) - building for AMD64 only"
PLATFORMS="linux/amd64"
elif [[ $VERSION =~ ^[0-9]+\.[0-9]+\.0$ ]]; then
echo "Detected minor version ($VERSION) - building for all platforms"
PLATFORMS="linux/amd64,linux/arm64,linux/arm/v8,linux/arm/v7,linux/arm/v6"
else
echo "Version format not recognized or non-semver - using AMD64 only for safety"
PLATFORMS="linux/amd64"
fi
# Add :rc tag for pre-releases
if [ "${{ github.event.release.prerelease }}" = "true" ]; then
TAGS="${TAGS},freikin/dawarich:rc"

View file

@ -4,6 +4,16 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
# [UNRELEASED]
## Added
- A cron job to generate daily tracks for users with new points since their last track generation. Being run every 4 hours.
## Changed
- Minor versions are now being built only for amd64 architecture to speed up the build process.
# [0.31.0] - 2025-09-04
The Search release

View file

@ -1,29 +0,0 @@
# frozen_string_literal: true
# Lightweight cleanup job that runs weekly to catch any missed track generation.
#
# This provides a safety net while avoiding the overhead of daily bulk processing.
class Tracks::CleanupJob < ApplicationJob
queue_as :tracks
sidekiq_options retry: false
def perform(older_than: 1.day.ago)
users_with_old_untracked_points(older_than).find_each do |user|
# Process only the old untracked points
Tracks::Generator.new(
user,
end_at: older_than,
mode: :incremental
).call
end
end
private
def users_with_old_untracked_points(older_than)
User.active.joins(:points)
.where(points: { track_id: nil, timestamp: ..older_than.to_i })
.having('COUNT(points.id) >= 2') # Only users with enough points for tracks
.group(:id)
end
end

View file

@ -1,13 +0,0 @@
# frozen_string_literal: true
class Tracks::CreateJob < ApplicationJob
queue_as :tracks
def perform(user_id, start_at: nil, end_at: nil, mode: :daily)
user = User.find(user_id)
Tracks::Generator.new(user, start_at:, end_at:, mode:).call
rescue StandardError => e
ExceptionReporter.call(e, 'Failed to create tracks for user')
end
end

View file

@ -0,0 +1,58 @@
# frozen_string_literal: true
# Daily Track Generation Job
#
# Automatically processes new location points for all active/trial users on a regular schedule.
# This job runs periodically (recommended: every 2-4 hours) to generate tracks from newly
# received location data.
#
# Process:
# 1. Iterates through all active or trial users
# 2. For each user, finds the timestamp of their last track's end_at
# 3. Checks if there are new points since that timestamp
# 4. If new points exist, triggers parallel track generation using the existing system
# 5. Uses the parallel generator with 'daily' mode for optimal performance
#
# The job leverages the existing parallel track generation infrastructure,
# ensuring consistency with bulk operations while providing automatic daily processing.
class Tracks::DailyGenerationJob < ApplicationJob
queue_as :tracks
def perform
User.active_or_trial.find_each do |user|
next if user.points_count.zero?
process_user_daily_tracks(user)
rescue StandardError => e
ExceptionReporter.call(e, "Failed to process daily tracks for user #{user.id}")
end
end
private
def process_user_daily_tracks(user)
last_processed_timestamp = find_last_processed_timestamp(user)
new_points_count =
user.points.where('timestamp > ?', last_processed_timestamp).count
return if new_points_count.zero?
Tracks::ParallelGeneratorJob.perform_later(
user.id,
start_at: last_processed_timestamp,
end_at: Time.current.to_i,
mode: 'daily'
)
end
def find_last_processed_timestamp(user)
last_track_end = user.tracks.maximum(:end_at)&.to_i
return last_track_end if last_track_end
first_point_timestamp = user.points.minimum(:timestamp)
first_point_timestamp || 1.week.ago.to_i
end
end

View file

@ -1,12 +0,0 @@
# frozen_string_literal: true
class Tracks::IncrementalCheckJob < ApplicationJob
queue_as :tracks
def perform(user_id, point_id)
user = User.find(user_id)
point = Point.find(point_id)
Tracks::IncrementalProcessor.new(user, point).call
end
end

View file

@ -8,7 +8,7 @@ class Tracks::ParallelGeneratorJob < ApplicationJob
def perform(user_id, start_at: nil, end_at: nil, mode: :bulk, chunk_size: 1.day)
user = User.find(user_id)
session = Tracks::ParallelGenerator.new(
Tracks::ParallelGenerator.new(
user,
start_at: start_at,
end_at: end_at,

View file

@ -17,7 +17,6 @@ class Tracks::TimeChunkProcessorJob < ApplicationJob
tracks_created = process_chunk
update_session_progress(tracks_created)
rescue StandardError => e
ExceptionReporter.call(e, "Failed to process time chunk for user #{user_id}")
@ -48,9 +47,7 @@ class Tracks::TimeChunkProcessorJob < ApplicationJob
# Create tracks from segments
tracks_created = 0
segments.each do |segment_points|
if create_track_from_points_array(segment_points)
tracks_created += 1
end
tracks_created += 1 if create_track_from_points_array(segment_points)
end
tracks_created

View file

@ -17,7 +17,8 @@ class Point < ApplicationRecord
index: true
}
enum :battery_status, { unknown: 0, unplugged: 1, charging: 2, full: 3, connected_not_charging: 4, discharging: 5 }, suffix: true
enum :battery_status, { unknown: 0, unplugged: 1, charging: 2, full: 3, connected_not_charging: 4, discharging: 5 },
suffix: true
enum :trigger, {
unknown: 0, background_event: 1, circular_region_event: 2, beacon_event: 3,
report_location_message_event: 4, manual_event: 5, timer_based_event: 6,
@ -33,7 +34,6 @@ class Point < ApplicationRecord
after_create :async_reverse_geocode, if: -> { DawarichSettings.store_geodata? && !reverse_geocoded? }
after_create :set_country
after_create_commit :broadcast_coordinates
# after_create_commit :trigger_incremental_track_generation, if: -> { import_id.nil? }
# after_commit :recalculate_track, on: :update, if: -> { track.present? }
def self.without_raw_data
@ -68,7 +68,7 @@ class Point < ApplicationRecord
def country_name
# TODO: Remove the country column in the future.
read_attribute(:country_name) || self.country&.name || read_attribute(:country) || ''
read_attribute(:country_name) || country&.name || self[:country] || ''
end
private
@ -101,8 +101,4 @@ class Point < ApplicationRecord
def recalculate_track
track.recalculate_path_and_distance!
end
def trigger_incremental_track_generation
Tracks::IncrementalCheckJob.perform_later(user.id, id)
end
end

View file

@ -22,12 +22,13 @@ class User < ApplicationRecord # rubocop:disable Metrics/ClassLength
before_save :sanitize_input
validates :email, presence: true
validates :reset_password_token, uniqueness: true, allow_nil: true
attribute :admin, :boolean, default: false
attribute :points_count, :integer, default: 0
scope :active_or_trial, -> { where(status: %i[active trial]) }
enum :status, { inactive: 0, active: 1, trial: 2 }
def safe_settings

View file

@ -1,215 +0,0 @@
# frozen_string_literal: true
# This service handles both bulk and incremental track generation using a unified
# approach with different modes:
#
# - :bulk - Regenerates all tracks from scratch (replaces existing)
# - :incremental - Processes untracked points up to a specified end time
# - :daily - Processes tracks on a daily basis
#
# Key features:
# - Deterministic results (same algorithm for all modes)
# - Simple incremental processing without buffering complexity
# - Configurable time and distance thresholds from user settings
# - Automatic track statistics calculation
# - Proper handling of edge cases (empty points, incomplete segments)
#
# Usage:
# # Bulk regeneration
# Tracks::Generator.new(user, mode: :bulk).call
#
# # Incremental processing
# Tracks::Generator.new(user, mode: :incremental).call
#
# # Daily processing
# Tracks::Generator.new(user, start_at: Date.current, mode: :daily).call
#
class Tracks::Generator
include Tracks::Segmentation
include Tracks::TrackBuilder
attr_reader :user, :start_at, :end_at, :mode
def initialize(user, start_at: nil, end_at: nil, mode: :bulk)
@user = user
@start_at = start_at
@end_at = end_at
@mode = mode.to_sym
end
def call
clean_existing_tracks if should_clean_tracks?
start_timestamp, end_timestamp = get_timestamp_range
segments = Track.get_segments_with_points(
user.id,
start_timestamp,
end_timestamp,
time_threshold_minutes,
distance_threshold_meters,
untracked_only: mode == :incremental
)
tracks_created = 0
segments.each do |segment|
track = create_track_from_segment(segment)
tracks_created += 1 if track
end
tracks_created
end
private
def should_clean_tracks?
case mode
when :bulk, :daily then true
else false
end
end
def load_points
case mode
when :bulk then load_bulk_points
when :incremental then load_incremental_points
when :daily then load_daily_points
else
raise ArgumentError, "Tracks::Generator: Unknown mode: #{mode}"
end
end
def load_bulk_points
scope = user.points.order(:timestamp)
scope = scope.where(timestamp: timestamp_range) if time_range_defined?
scope
end
def load_incremental_points
# For incremental mode, we process untracked points
# If end_at is specified, only process points up to that time
scope = user.points.where(track_id: nil).order(:timestamp)
scope = scope.where(timestamp: ..end_at.to_i) if end_at.present?
scope
end
def load_daily_points
day_range = daily_time_range
user.points.where(timestamp: day_range).order(:timestamp)
end
def create_track_from_segment(segment_data)
points = segment_data[:points]
pre_calculated_distance = segment_data[:pre_calculated_distance]
return unless points.size >= 2
create_track_from_points(points, pre_calculated_distance)
end
def time_range_defined?
start_at.present? || end_at.present?
end
def time_range
return nil unless time_range_defined?
start_time = start_at&.to_i
end_time = end_at&.to_i
if start_time && end_time
Time.zone.at(start_time)..Time.zone.at(end_time)
elsif start_time
Time.zone.at(start_time)..
elsif end_time
..Time.zone.at(end_time)
end
end
def timestamp_range
return nil unless time_range_defined?
start_time = start_at&.to_i
end_time = end_at&.to_i
if start_time && end_time
start_time..end_time
elsif start_time
start_time..
elsif end_time
..end_time
end
end
def daily_time_range
day = start_at&.to_date || Date.current
day.beginning_of_day.to_i..day.end_of_day.to_i
end
def clean_existing_tracks
case mode
when :bulk then clean_bulk_tracks
when :daily then clean_daily_tracks
else
raise ArgumentError, "Tracks::Generator: Unknown mode: #{mode}"
end
end
def clean_bulk_tracks
scope = user.tracks
scope = scope.where(start_at: time_range) if time_range_defined?
scope.destroy_all
end
def clean_daily_tracks
day_range = daily_time_range
range = Time.zone.at(day_range.begin)..Time.zone.at(day_range.end)
scope = user.tracks.where(start_at: range)
scope.destroy_all
end
def get_timestamp_range
case mode
when :bulk then bulk_timestamp_range
when :daily then daily_timestamp_range
when :incremental then incremental_timestamp_range
else
raise ArgumentError, "Tracks::Generator: Unknown mode: #{mode}"
end
end
def bulk_timestamp_range
return [start_at.to_i, end_at.to_i] if start_at && end_at
first_point = user.points.order(:timestamp).first
last_point = user.points.order(:timestamp).last
[first_point&.timestamp || 0, last_point&.timestamp || Time.current.to_i]
end
def daily_timestamp_range
day = start_at&.to_date || Date.current
[day.beginning_of_day.to_i, day.end_of_day.to_i]
end
def incremental_timestamp_range
first_point = user.points.where(track_id: nil).order(:timestamp).first
end_timestamp = end_at ? end_at.to_i : Time.current.to_i
[first_point&.timestamp || 0, end_timestamp]
end
def distance_threshold_meters
@distance_threshold_meters ||= user.safe_settings.meters_between_routes.to_i
end
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i
end
end

View file

@ -1,92 +0,0 @@
# frozen_string_literal: true
# This service analyzes new points as they're created and determines whether
# they should trigger incremental track generation based on time and distance
# thresholds defined in user settings.
#
# The key insight is that we should trigger track generation when there's a
# significant gap between the new point and the previous point, indicating
# the end of a journey and the start of a new one.
#
# Process:
# 1. Check if the new point should trigger processing (skip imported points)
# 2. Find the last point before the new point
# 3. Calculate time and distance differences
# 4. If thresholds are exceeded, trigger incremental generation
# 5. Set the end_at time to the previous point's timestamp for track finalization
#
# This ensures tracks are properly finalized when journeys end, not when they start.
#
# Usage:
# # In Point model after_create_commit callback
# Tracks::IncrementalProcessor.new(user, new_point).call
#
class Tracks::IncrementalProcessor
attr_reader :user, :new_point, :previous_point
def initialize(user, new_point)
@user = user
@new_point = new_point
@previous_point = find_previous_point
end
def call
return unless should_process?
start_at = find_start_time
end_at = find_end_time
Tracks::CreateJob.perform_later(user.id, start_at:, end_at:, mode: :incremental)
end
private
def should_process?
return false if new_point.import_id.present?
return true unless previous_point
exceeds_thresholds?(previous_point, new_point)
end
def find_previous_point
@previous_point ||=
user.points
.where('timestamp < ?', new_point.timestamp)
.order(:timestamp)
.last
end
def find_start_time
user.tracks.order(:end_at).last&.end_at
end
def find_end_time
previous_point ? Time.zone.at(previous_point.timestamp) : nil
end
def exceeds_thresholds?(previous_point, current_point)
time_gap = time_difference_minutes(previous_point, current_point)
distance_gap = distance_difference_meters(previous_point, current_point)
time_exceeded = time_gap >= time_threshold_minutes
distance_exceeded = distance_gap >= distance_threshold_meters
time_exceeded || distance_exceeded
end
def time_difference_minutes(point1, point2)
(point2.timestamp - point1.timestamp) / 60.0
end
def distance_difference_meters(point1, point2)
point1.distance_to(point2) * 1000
end
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i
end
def distance_threshold_meters
@distance_threshold_meters ||= user.safe_settings.meters_between_routes.to_i
end
end

View file

@ -17,8 +17,7 @@ class Tracks::ParallelGenerator
end
def call
# Clean existing tracks if needed
clean_existing_tracks if should_clean_tracks?
clean_bulk_tracks if mode == :bulk
# Generate time chunks
time_chunks = generate_time_chunks
@ -40,13 +39,6 @@ class Tracks::ParallelGenerator
private
def should_clean_tracks?
case mode
when :bulk, :daily then true
else false
end
end
def generate_time_chunks
chunker = Tracks::TimeChunker.new(
user,
@ -95,15 +87,6 @@ class Tracks::ParallelGenerator
)
end
def clean_existing_tracks
case mode
when :bulk then clean_bulk_tracks
when :daily then clean_daily_tracks
else
raise ArgumentError, "Unknown mode: #{mode}"
end
end
def clean_bulk_tracks
if time_range_defined?
user.tracks.where(start_at: time_range).destroy_all
@ -112,13 +95,6 @@ class Tracks::ParallelGenerator
end
end
def clean_daily_tracks
day_range = daily_time_range
range = Time.zone.at(day_range.begin)..Time.zone.at(day_range.end)
user.tracks.where(start_at: range).destroy_all
end
def time_range_defined?
start_at.present? || end_at.present?
end
@ -162,8 +138,8 @@ class Tracks::ParallelGenerator
else
# Convert seconds to readable format
seconds = duration.to_i
if seconds >= 86400 # days
days = seconds / 86400
if seconds >= 86_400 # days
days = seconds / 86_400
"#{days} day#{'s' if days != 1}"
elsif seconds >= 3600 # hours
hours = seconds / 3600

View file

@ -21,8 +21,8 @@
# time_threshold_minutes methods.
#
# Used by:
# - Tracks::Generator for splitting points during track generation
# - Tracks::CreateFromPoints for legacy compatibility
# - Tracks::ParallelGenerator and related jobs for splitting points during parallel track generation
# - Tracks::BoundaryDetector for cross-chunk track merging
#
# Example usage:
# class MyTrackProcessor

View file

@ -44,7 +44,7 @@ class Tracks::SessionManager
def get_session_data
data = Rails.cache.read(cache_key)
return nil unless data
# Rails.cache already deserializes the data, no need for JSON parsing
data
end
@ -149,4 +149,4 @@ class Tracks::SessionManager
def cache_key
"#{CACHE_KEY_PREFIX}:user:#{user_id}:session:#{session_id}"
end
end
end

View file

@ -25,7 +25,7 @@
# This ensures consistency when users change their distance unit preferences.
#
# Used by:
# - Tracks::Generator for creating tracks during generation
# - Tracks::ParallelGenerator and related jobs for creating tracks during parallel generation
# - Any class that needs to convert point arrays to Track records
#
# Example usage:
@ -60,7 +60,7 @@ module Tracks::TrackBuilder
)
# TODO: Move trips attrs to columns with more precision and range
track.distance = [[pre_calculated_distance.round, 999999.99].min, 0].max
track.distance = [[pre_calculated_distance.round, 999_999.99].min, 0].max
track.duration = calculate_duration(points)
track.avg_speed = calculate_average_speed(track.distance, track.duration)
@ -103,7 +103,7 @@ module Tracks::TrackBuilder
speed_kmh = (speed_mps * 3.6).round(2) # m/s to km/h
# Cap the speed to prevent database precision overflow (max 999999.99)
[speed_kmh, 999999.99].min
[speed_kmh, 999_999.99].min
end
def calculate_elevation_stats(points)
@ -145,6 +145,6 @@ module Tracks::TrackBuilder
private
def user
raise NotImplementedError, "Including class must implement user method"
raise NotImplementedError, 'Including class must implement user method'
end
end

View file

@ -30,12 +30,12 @@ cache_preheating_job:
class: "Cache::PreheatingJob"
queue: default
# tracks_cleanup_job:
# cron: "0 2 * * 0" # every Sunday at 02:00
# class: "Tracks::CleanupJob"
# queue: tracks
place_name_fetching_job:
cron: "30 0 * * *" # every day at 00:30
class: "Places::BulkNameFetchingJob"
queue: places
daily_track_generation_job:
cron: "0 */4 * * *" # every 4 hours
class: "Tracks::DailyGenerationJob"
queue: tracks

View file

@ -2,33 +2,9 @@
class CreateTracksFromPoints < ActiveRecord::Migration[8.0]
def up
puts "Starting bulk track creation for all users..."
# this data migration used to create tracks from existing points. It was deprecated
total_users = User.count
processed_users = 0
User.find_each do |user|
points_count = user.points.count
if points_count > 0
puts "Enqueuing track creation for user #{user.id} (#{points_count} points)"
# Use explicit parameters for bulk historical processing:
# - No time limits (start_at: nil, end_at: nil) = process ALL historical data
Tracks::CreateJob.perform_later(
user.id,
start_at: nil,
end_at: nil,
mode: :bulk
)
processed_users += 1
else
puts "Skipping user #{user.id} (no tracked points)"
end
end
puts "Enqueued track creation jobs for #{processed_users}/#{total_users} users"
nil
end
def down

View file

@ -1,80 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::CleanupJob, type: :job do
let(:user) { create(:user) }
describe '#perform' do
context 'with old untracked points' do
let!(:old_points) do
create_points_around(user: user, count: 1, base_lat: 20.0, timestamp: 2.days.ago.to_i)
create_points_around(user: user, count: 1, base_lat: 20.0, timestamp: 1.day.ago.to_i)
end
let!(:recent_points) do
create_points_around(user: user, count: 2, base_lat: 20.0, timestamp: 1.hour.ago.to_i)
end
let(:generator) { instance_double(Tracks::Generator) }
it 'processes only old untracked points' do
expect(Tracks::Generator).to receive(:new)
.and_return(generator)
expect(generator).to receive(:call)
described_class.new.perform(older_than: 1.day.ago)
end
end
context 'with users having insufficient points' do
let!(:single_point) do
create_points_around(user: user, count: 1, base_lat: 20.0, timestamp: 2.days.ago.to_i)
end
it 'skips users with less than 2 points' do
expect(Tracks::Generator).not_to receive(:new)
described_class.new.perform(older_than: 1.day.ago)
end
end
context 'with no old untracked points' do
let(:track) { create(:track, user: user) }
let!(:tracked_points) do
create_points_around(user: user, count: 3, base_lat: 20.0, timestamp: 2.days.ago.to_i, track: track)
end
it 'does not process any users' do
expect(Tracks::Generator).not_to receive(:new)
described_class.new.perform(older_than: 1.day.ago)
end
end
context 'with custom older_than parameter' do
let!(:points) do
create_points_around(user: user, count: 3, base_lat: 20.0, timestamp: 3.days.ago.to_i)
end
let(:generator) { instance_double(Tracks::Generator) }
it 'uses custom threshold' do
expect(Tracks::Generator).to receive(:new)
.and_return(generator)
expect(generator).to receive(:call)
described_class.new.perform(older_than: 2.days.ago)
end
end
end
describe 'job configuration' do
it 'uses tracks queue' do
expect(described_class.queue_name).to eq('tracks')
end
it 'does not retry on failure' do
expect(described_class.sidekiq_options_hash['retry']).to be false
end
end
end

View file

@ -1,134 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::CreateJob, type: :job do
let(:user) { create(:user) }
describe '#perform' do
let(:generator_instance) { instance_double(Tracks::Generator) }
before do
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call)
allow(generator_instance).to receive(:call).and_return(2)
end
it 'calls the generator and creates a notification' do
described_class.new.perform(user.id)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :daily
)
expect(generator_instance).to have_received(:call)
end
context 'with custom parameters' do
let(:start_at) { 1.day.ago.beginning_of_day.to_i }
let(:end_at) { 1.day.ago.end_of_day.to_i }
let(:mode) { :daily }
before do
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call)
allow(generator_instance).to receive(:call).and_return(1)
end
it 'passes custom parameters to the generator' do
described_class.new.perform(user.id, start_at: start_at, end_at: end_at, mode: mode)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: start_at,
end_at: end_at,
mode: :daily
)
expect(generator_instance).to have_received(:call)
end
end
context 'when generator raises an error' do
let(:error_message) { 'Something went wrong' }
before do
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call).and_raise(StandardError, error_message)
allow(ExceptionReporter).to receive(:call)
end
it 'reports the error using ExceptionReporter' do
allow(ExceptionReporter).to receive(:call)
described_class.new.perform(user.id)
expect(ExceptionReporter).to have_received(:call).with(
kind_of(StandardError),
'Failed to create tracks for user'
)
end
end
context 'when user does not exist' do
before do
allow(User).to receive(:find).with(999).and_raise(ActiveRecord::RecordNotFound)
allow(ExceptionReporter).to receive(:call)
allow(Notifications::Create).to receive(:new).and_return(instance_double(Notifications::Create, call: nil))
end
it 'handles the error gracefully and creates error notification' do
expect { described_class.new.perform(999) }.not_to raise_error
expect(ExceptionReporter).to have_received(:call)
end
end
context 'when tracks are deleted and recreated' do
let(:existing_tracks) { create_list(:track, 3, user: user) }
before do
allow(generator_instance).to receive(:call).and_return(2)
end
it 'returns the correct count of newly created tracks' do
described_class.new.perform(user.id, mode: :incremental)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :incremental
)
expect(generator_instance).to have_received(:call)
end
end
end
describe 'queue' do
it 'is queued on tracks queue' do
expect(described_class.new.queue_name).to eq('tracks')
end
end
context 'when not self-hosted' do
let(:generator_instance) { instance_double(Tracks::Generator) }
let(:notification_service) { instance_double(Notifications::Create) }
let(:error_message) { 'Something went wrong' }
before do
allow(DawarichSettings).to receive(:self_hosted?).and_return(false)
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call).and_raise(StandardError, error_message)
allow(Notifications::Create).to receive(:new).and_return(notification_service)
allow(notification_service).to receive(:call)
end
it 'does not create a failure notification' do
described_class.new.perform(user.id)
expect(notification_service).not_to have_received(:call)
end
end
end

View file

@ -0,0 +1,120 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::DailyGenerationJob, type: :job do
describe '#perform' do
let!(:active_user) { create(:user, settings: { 'minutes_between_routes' => 60, 'meters_between_routes' => 500 }) }
let!(:trial_user) { create(:user, :trial) }
let!(:inactive_user) { create(:user, :inactive) }
let!(:active_user_old_track) do
create(:track, user: active_user, start_at: 2.days.ago, end_at: 2.days.ago + 1.hour)
end
let!(:active_user_new_points) do
create_list(:point, 3, user: active_user, timestamp: 1.hour.ago.to_i)
end
let!(:trial_user_old_track) do
create(:track, user: trial_user, start_at: 3.days.ago, end_at: 3.days.ago + 1.hour)
end
let!(:trial_user_new_points) do
create_list(:point, 2, user: trial_user, timestamp: 30.minutes.ago.to_i)
end
before do
active_user.update!(points_count: active_user.points.count)
trial_user.update!(points_count: trial_user.points.count)
ActiveJob::Base.queue_adapter.enqueued_jobs.clear
end
it 'processes all active and trial users' do
expect { described_class.perform_now }.to \
have_enqueued_job(Tracks::ParallelGeneratorJob).twice
end
it 'does not process inactive users' do
# Clear points and tracks to make destruction possible
Point.destroy_all
Track.destroy_all
# Remove active and trial users to isolate test
active_user.destroy
trial_user.destroy
expect do
described_class.perform_now
end.not_to have_enqueued_job(Tracks::ParallelGeneratorJob)
end
it 'enqueues correct number of parallel generation jobs for users with new points' do
expect { described_class.perform_now }.to \
have_enqueued_job(Tracks::ParallelGeneratorJob).exactly(2).times
end
it 'enqueues parallel generation job for active user with correct parameters' do
expect { described_class.perform_now }.to \
have_enqueued_job(Tracks::ParallelGeneratorJob).with(
active_user.id,
hash_including(mode: 'daily')
)
end
it 'enqueues parallel generation job for trial user' do
expect { described_class.perform_now }.to \
have_enqueued_job(Tracks::ParallelGeneratorJob).with(
trial_user.id,
hash_including(mode: 'daily')
)
end
it 'does not enqueue jobs for users without new points' do
Point.destroy_all
expect { described_class.perform_now }.not_to \
have_enqueued_job(Tracks::ParallelGeneratorJob)
end
context 'when processing fails' do
before do
allow_any_instance_of(User).to receive(:tracks).and_raise(StandardError, 'Database error')
allow(ExceptionReporter).to receive(:call)
active_user.update!(points_count: 5)
trial_user.update!(points_count: 3)
end
it 'does not raise errors when processing fails' do
expect { described_class.perform_now }.not_to raise_error
end
it 'reports exceptions when processing fails' do
described_class.perform_now
expect(ExceptionReporter).to have_received(:call).at_least(:once)
end
end
context 'when user has no points' do
let!(:empty_user) { create(:user) }
it 'skips users with no points' do
expect { described_class.perform_now }.not_to \
have_enqueued_job(Tracks::ParallelGeneratorJob).with(empty_user.id, any_args)
end
end
context 'when user has tracks but no new points' do
let!(:user_with_current_tracks) { create(:user) }
let!(:recent_points) { create_list(:point, 2, user: user_with_current_tracks, timestamp: 1.hour.ago.to_i) }
let!(:recent_track) do
create(:track, user: user_with_current_tracks, start_at: 1.hour.ago, end_at: 30.minutes.ago)
end
it 'skips users without new points since last track' do
expect { described_class.perform_now }.not_to \
have_enqueued_job(Tracks::ParallelGeneratorJob).with(user_with_current_tracks.id, any_args)
end
end
end
end

View file

@ -1,39 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::IncrementalCheckJob, type: :job do
let(:user) { create(:user) }
let(:point) { create(:point, user: user) }
describe '#perform' do
context 'with valid parameters' do
let(:processor) { instance_double(Tracks::IncrementalProcessor) }
it 'calls the incremental processor' do
expect(Tracks::IncrementalProcessor).to receive(:new)
.with(user, point)
.and_return(processor)
expect(processor).to receive(:call)
described_class.new.perform(user.id, point.id)
end
end
end
describe 'job configuration' do
it 'uses tracks queue' do
expect(described_class.queue_name).to eq('tracks')
end
end
describe 'integration with ActiveJob' do
it 'enqueues the job' do
expect do
described_class.perform_later(user.id, point.id)
end.to have_enqueued_job(described_class)
.with(user.id, point.id)
end
end
end

View file

@ -125,31 +125,10 @@ RSpec.describe Tracks::ParallelGeneratorJob do
describe 'integration with existing track job patterns' do
let!(:point) { create(:point, user: user, timestamp: 1.day.ago.to_i) }
it 'follows the same notification pattern as Tracks::CreateJob' do
# Compare with existing Tracks::CreateJob behavior
# Should create similar notifications and handle errors similarly
expect {
job.perform(user.id)
}.not_to raise_error
end
it 'can be queued and executed' do
expect {
expect do
described_class.perform_later(user.id)
}.to have_enqueued_job(described_class).with(user.id)
end
it 'supports the same parameter structure as Tracks::CreateJob' do
# Should accept the same parameters that would be passed to Tracks::CreateJob
expect {
described_class.perform_later(
user.id,
start_at: 1.week.ago,
end_at: Time.current,
mode: :daily
)
}.to have_enqueued_job(described_class)
end.to have_enqueued_job(described_class).with(user.id)
end
end
end

View file

@ -1,260 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::Generator do
let(:user) { create(:user) }
let(:safe_settings) { user.safe_settings }
before do
allow(user).to receive(:safe_settings).and_return(safe_settings)
end
describe '#call' do
context 'with bulk mode' do
let(:generator) { described_class.new(user, mode: :bulk) }
context 'with sufficient points' do
let!(:points) { create_points_around(user: user, count: 5, base_lat: 20.0) }
it 'generates tracks from all points' do
expect { generator.call }.to change(Track, :count).by(1)
end
it 'cleans existing tracks' do
existing_track = create(:track, user: user)
generator.call
expect(Track.exists?(existing_track.id)).to be false
end
it 'associates points with created tracks' do
generator.call
expect(points.map(&:reload).map(&:track)).to all(be_present)
end
it 'properly handles point associations when cleaning existing tracks' do
# Create existing tracks with associated points
existing_track = create(:track, user: user)
existing_points = create_list(:point, 3, user: user, track: existing_track)
# Verify points are associated
expect(existing_points.map(&:reload).map(&:track_id)).to all(eq(existing_track.id))
# Run generator which should clean existing tracks and create new ones
generator.call
# Verify the old track is deleted
expect(Track.exists?(existing_track.id)).to be false
# Verify the points are no longer associated with the deleted track
expect(existing_points.map(&:reload).map(&:track_id)).to all(be_nil)
end
end
context 'with insufficient points' do
let!(:points) { create_points_around(user: user, count: 1, base_lat: 20.0) }
it 'does not create tracks' do
expect { generator.call }.not_to change(Track, :count)
end
end
context 'with time range' do
let!(:old_points) { create_points_around(user: user, count: 3, base_lat: 20.0, timestamp: 2.days.ago.to_i) }
let!(:new_points) { create_points_around(user: user, count: 3, base_lat: 21.0, timestamp: 1.day.ago.to_i) }
it 'only processes points within range' do
generator = described_class.new(
user,
start_at: 1.day.ago.beginning_of_day,
end_at: 1.day.ago.end_of_day,
mode: :bulk
)
generator.call
track = Track.last
expect(track.points.count).to eq(3)
end
end
end
context 'with incremental mode' do
let(:generator) { described_class.new(user, mode: :incremental) }
context 'with untracked points' do
let!(:points) { create_points_around(user: user, count: 3, base_lat: 22.0, track_id: nil) }
it 'processes untracked points' do
expect { generator.call }.to change(Track, :count).by(1)
end
it 'associates points with created tracks' do
generator.call
expect(points.map(&:reload).map(&:track)).to all(be_present)
end
end
context 'with end_at specified' do
let!(:early_points) { create_points_around(user: user, count: 2, base_lat: 23.0, timestamp: 2.hours.ago.to_i) }
let!(:late_points) { create_points_around(user: user, count: 2, base_lat: 24.0, timestamp: 1.hour.ago.to_i) }
it 'only processes points up to end_at' do
generator = described_class.new(user, end_at: 1.5.hours.ago, mode: :incremental)
generator.call
expect(Track.count).to eq(1)
expect(Track.first.points.count).to eq(2)
end
end
context 'without existing tracks' do
let!(:points) { create_points_around(user: user, count: 3, base_lat: 25.0) }
it 'does not clean existing tracks' do
existing_track = create(:track, user: user)
generator.call
expect(Track.exists?(existing_track.id)).to be true
end
end
end
context 'with daily mode' do
let(:today) { Date.current }
let(:generator) { described_class.new(user, start_at: today, mode: :daily) }
let!(:today_points) { create_points_around(user: user, count: 3, base_lat: 26.0, timestamp: today.beginning_of_day.to_i) }
let!(:yesterday_points) { create_points_around(user: user, count: 3, base_lat: 27.0, timestamp: 1.day.ago.to_i) }
it 'only processes points from specified day' do
generator.call
track = Track.last
expect(track.points.count).to eq(3)
end
it 'cleans existing tracks for the day' do
existing_track = create(:track, user: user, start_at: today.beginning_of_day)
generator.call
expect(Track.exists?(existing_track.id)).to be false
end
it 'properly handles point associations when cleaning daily tracks' do
# Create existing tracks with associated points for today
existing_track = create(:track, user: user, start_at: today.beginning_of_day)
existing_points = create_list(:point, 3, user: user, track: existing_track)
# Verify points are associated
expect(existing_points.map(&:reload).map(&:track_id)).to all(eq(existing_track.id))
# Run generator which should clean existing tracks for the day and create new ones
generator.call
# Verify the old track is deleted
expect(Track.exists?(existing_track.id)).to be false
# Verify the points are no longer associated with the deleted track
expect(existing_points.map(&:reload).map(&:track_id)).to all(be_nil)
end
end
context 'with empty points' do
let(:generator) { described_class.new(user, mode: :bulk) }
it 'does not create tracks' do
expect { generator.call }.not_to change(Track, :count)
end
end
context 'with threshold configuration' do
let(:generator) { described_class.new(user, mode: :bulk) }
before do
allow(safe_settings).to receive(:meters_between_routes).and_return(1000)
allow(safe_settings).to receive(:minutes_between_routes).and_return(90)
end
it 'uses configured thresholds' do
expect(generator.send(:distance_threshold_meters)).to eq(1000)
expect(generator.send(:time_threshold_minutes)).to eq(90)
end
end
context 'with invalid mode' do
it 'raises argument error' do
expect do
described_class.new(user, mode: :invalid).call
end.to raise_error(ArgumentError, /Unknown mode/)
end
end
end
describe 'segmentation behavior' do
let(:generator) { described_class.new(user, mode: :bulk) }
context 'with points exceeding time threshold' do
let!(:points) do
[
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: 90.minutes.ago.to_i),
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: 60.minutes.ago.to_i),
# Gap exceeds threshold 👇👇👇
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: 10.minutes.ago.to_i),
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: Time.current.to_i)
]
end
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(45)
end
it 'creates separate tracks for segments' do
expect { generator.call }.to change(Track, :count).by(2)
end
end
context 'with points exceeding distance threshold' do
let!(:points) do
[
create_points_around(user: user, count: 2, base_lat: 29.0, timestamp: 20.minutes.ago.to_i),
create_points_around(user: user, count: 2, base_lat: 29.0, timestamp: 15.minutes.ago.to_i),
# Large distance jump 👇👇👇
create_points_around(user: user, count: 2, base_lat: 28.0, timestamp: 10.minutes.ago.to_i),
create_points_around(user: user, count: 1, base_lat: 28.0, timestamp: Time.current.to_i)
]
end
before do
allow(safe_settings).to receive(:meters_between_routes).and_return(200)
end
it 'creates separate tracks for segments' do
expect { generator.call }.to change(Track, :count).by(2)
end
end
end
describe 'deterministic behavior' do
let!(:points) { create_points_around(user: user, count: 10, base_lat: 28.0) }
it 'produces same results for bulk and incremental modes' do
# Generate tracks in bulk mode
bulk_generator = described_class.new(user, mode: :bulk)
bulk_generator.call
bulk_tracks = user.tracks.order(:start_at).to_a
# Clear tracks and generate incrementally
user.tracks.destroy_all
incremental_generator = described_class.new(user, mode: :incremental)
incremental_generator.call
incremental_tracks = user.tracks.order(:start_at).to_a
# Should have same number of tracks
expect(incremental_tracks.size).to eq(bulk_tracks.size)
# Should have same track boundaries (allowing for small timing differences)
bulk_tracks.zip(incremental_tracks).each do |bulk_track, incremental_track|
expect(incremental_track.start_at).to be_within(1.second).of(bulk_track.start_at)
expect(incremental_track.end_at).to be_within(1.second).of(bulk_track.end_at)
expect(incremental_track.distance).to be_within(10).of(bulk_track.distance)
end
end
end
end

View file

@ -1,249 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::IncrementalProcessor do
let(:user) { create(:user) }
let(:safe_settings) { user.safe_settings }
before do
allow(user).to receive(:safe_settings).and_return(safe_settings)
allow(safe_settings).to receive(:minutes_between_routes).and_return(30)
allow(safe_settings).to receive(:meters_between_routes).and_return(500)
end
describe '#call' do
context 'with imported points' do
let(:imported_point) { create(:point, user: user, import: create(:import)) }
let(:processor) { described_class.new(user, imported_point) }
it 'does not process imported points' do
expect(Tracks::CreateJob).not_to receive(:perform_later)
processor.call
end
end
context 'with first point for user' do
let(:new_point) { create(:point, user: user) }
let(:processor) { described_class.new(user, new_point) }
it 'processes first point' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: nil, mode: :incremental)
processor.call
end
end
context 'with thresholds exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:processor) { described_class.new(user, new_point) }
before do
# Create previous point first
previous_point
end
it 'processes when time threshold exceeded' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: Time.zone.at(previous_point.timestamp), mode: :incremental)
processor.call
end
end
context 'with existing tracks' do
let(:existing_track) { create(:track, user: user, end_at: 2.hours.ago) }
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:processor) { described_class.new(user, new_point) }
before do
existing_track
previous_point
end
it 'uses existing track end time as start_at' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: existing_track.end_at, end_at: Time.zone.at(previous_point.timestamp), mode: :incremental)
processor.call
end
end
context 'with distance threshold exceeded' do
let(:previous_point) do
create(:point, user: user, timestamp: 10.minutes.ago.to_i, lonlat: 'POINT(0 0)')
end
let(:new_point) do
create(:point, user: user, timestamp: Time.current.to_i, lonlat: 'POINT(1 1)')
end
let(:processor) { described_class.new(user, new_point) }
before do
# Create previous point first
previous_point
# Mock distance calculation to exceed threshold
allow_any_instance_of(Point).to receive(:distance_to).and_return(1.0) # 1 km = 1000m
end
it 'processes when distance threshold exceeded' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: Time.zone.at(previous_point.timestamp), mode: :incremental)
processor.call
end
end
context 'with thresholds not exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 10.minutes.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:processor) { described_class.new(user, new_point) }
before do
# Create previous point first
previous_point
# Mock distance to be within threshold
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.1) # 100m
end
it 'does not process when thresholds not exceeded' do
expect(Tracks::CreateJob).not_to receive(:perform_later)
processor.call
end
end
end
describe '#should_process?' do
let(:processor) { described_class.new(user, new_point) }
context 'with imported point' do
let(:new_point) { create(:point, user: user, import: create(:import)) }
it 'returns false' do
expect(processor.send(:should_process?)).to be false
end
end
context 'with first point for user' do
let(:new_point) { create(:point, user: user) }
it 'returns true' do
expect(processor.send(:should_process?)).to be true
end
end
context 'with thresholds exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
before do
previous_point # Create previous point
end
it 'returns true when time threshold exceeded' do
expect(processor.send(:should_process?)).to be true
end
end
context 'with thresholds not exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 10.minutes.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
before do
previous_point # Create previous point
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.1) # 100m
end
it 'returns false when thresholds not exceeded' do
expect(processor.send(:should_process?)).to be false
end
end
end
describe '#exceeds_thresholds?' do
let(:processor) { described_class.new(user, new_point) }
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
context 'with time threshold exceeded' do
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(30)
end
it 'returns true' do
result = processor.send(:exceeds_thresholds?, previous_point, new_point)
expect(result).to be true
end
end
context 'with distance threshold exceeded' do
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(120) # 2 hours
allow(safe_settings).to receive(:meters_between_routes).and_return(400)
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.5) # 500m
end
it 'returns true' do
result = processor.send(:exceeds_thresholds?, previous_point, new_point)
expect(result).to be true
end
end
context 'with neither threshold exceeded' do
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(120) # 2 hours
allow(safe_settings).to receive(:meters_between_routes).and_return(600)
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.1) # 100m
end
it 'returns false' do
result = processor.send(:exceeds_thresholds?, previous_point, new_point)
expect(result).to be false
end
end
end
describe '#time_difference_minutes' do
let(:processor) { described_class.new(user, new_point) }
let(:point1) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:point2) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:new_point) { point2 }
it 'calculates time difference in minutes' do
result = processor.send(:time_difference_minutes, point1, point2)
expect(result).to be_within(1).of(60) # Approximately 60 minutes
end
end
describe '#distance_difference_meters' do
let(:processor) { described_class.new(user, new_point) }
let(:point1) { create(:point, user: user) }
let(:point2) { create(:point, user: user) }
let(:new_point) { point2 }
before do
allow(point1).to receive(:distance_to).with(point2).and_return(1.5) # 1.5 km
end
it 'calculates distance difference in meters' do
result = processor.send(:distance_difference_meters, point1, point2)
expect(result).to eq(1500) # 1.5 km = 1500 m
end
end
describe 'threshold configuration' do
let(:processor) { described_class.new(user, create(:point, user: user)) }
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(45)
allow(safe_settings).to receive(:meters_between_routes).and_return(750)
end
it 'uses configured time threshold' do
expect(processor.send(:time_threshold_minutes)).to eq(45)
end
it 'uses configured distance threshold' do
expect(processor.send(:distance_threshold_meters)).to eq(750)
end
end
end

View file

@ -80,15 +80,15 @@ RSpec.describe Tracks::ParallelGenerator do
end
it 'enqueues time chunk processor jobs' do
expect {
expect do
generator.call
}.to have_enqueued_job(Tracks::TimeChunkProcessorJob).at_least(:once)
end.to have_enqueued_job(Tracks::TimeChunkProcessorJob).at_least(:once)
end
it 'enqueues boundary resolver job with delay' do
expect {
expect do
generator.call
}.to have_enqueued_job(Tracks::BoundaryResolverJob).at(be >= 5.minutes.from_now)
end.to have_enqueued_job(Tracks::BoundaryResolverJob).at(be >= 5.minutes.from_now)
end
it 'logs the operation' do
@ -108,9 +108,9 @@ RSpec.describe Tracks::ParallelGenerator do
end
it 'does not enqueue any jobs' do
expect {
expect do
generator.call
}.not_to have_enqueued_job
end.not_to have_enqueued_job
end
end
@ -133,14 +133,14 @@ RSpec.describe Tracks::ParallelGenerator do
context 'daily mode' do
let(:options) { { mode: :daily, start_at: 1.day.ago.beginning_of_day } }
it 'cleans tracks for the specific day' do
it 'preserves existing tracks' do
expect(user.tracks.count).to eq(2)
generator.call
# Should only clean tracks from the specified day
# Daily mode should preserve all existing tracks
remaining_tracks = user.tracks.count
expect(remaining_tracks).to be < 2
expect(remaining_tracks).to eq(2)
end
end
@ -191,37 +191,21 @@ RSpec.describe Tracks::ParallelGenerator do
create(:point, user: user, timestamp: (10 - i).days.ago.to_i)
end
expect {
expect do
generator.call
}.to have_enqueued_job(Tracks::BoundaryResolverJob)
end.to have_enqueued_job(Tracks::BoundaryResolverJob)
.with(user.id, kind_of(String))
end
it 'ensures minimum delay for boundary resolver' do
# Even with few chunks, should have minimum delay
expect {
expect do
generator.call
}.to have_enqueued_job(Tracks::BoundaryResolverJob)
end.to have_enqueued_job(Tracks::BoundaryResolverJob)
.at(be >= 5.minutes.from_now)
end
end
context 'error handling in private methods' do
it 'handles unknown mode in should_clean_tracks?' do
generator.instance_variable_set(:@mode, :unknown)
expect(generator.send(:should_clean_tracks?)).to be false
end
it 'raises error for unknown mode in clean_existing_tracks' do
generator.instance_variable_set(:@mode, :unknown)
expect {
generator.send(:clean_existing_tracks)
}.to raise_error(ArgumentError, 'Unknown mode: unknown')
end
end
context 'user settings integration' do
let(:mock_settings) { double('SafeSettings') }
@ -277,15 +261,17 @@ RSpec.describe Tracks::ParallelGenerator do
describe '#enqueue_chunk_jobs' do
let(:session_id) { 'test-session' }
let(:chunks) { [
{ chunk_id: 'chunk1', start_timestamp: 1.day.ago.to_i },
{ chunk_id: 'chunk2', start_timestamp: 2.days.ago.to_i }
] }
let(:chunks) do
[
{ chunk_id: 'chunk1', start_timestamp: 1.day.ago.to_i },
{ chunk_id: 'chunk2', start_timestamp: 2.days.ago.to_i }
]
end
it 'enqueues job for each chunk' do
expect {
expect do
generator.send(:enqueue_chunk_jobs, session_id, chunks)
}.to have_enqueued_job(Tracks::TimeChunkProcessorJob)
end.to have_enqueued_job(Tracks::TimeChunkProcessorJob)
.exactly(2).times
end
@ -303,24 +289,24 @@ RSpec.describe Tracks::ParallelGenerator do
let(:session_id) { 'test-session' }
it 'enqueues boundary resolver with estimated delay' do
expect {
expect do
generator.send(:enqueue_boundary_resolver, session_id, 5)
}.to have_enqueued_job(Tracks::BoundaryResolverJob)
end.to have_enqueued_job(Tracks::BoundaryResolverJob)
.with(user.id, session_id)
.at(be >= 2.minutes.from_now)
end
it 'uses minimum delay for small chunk counts' do
expect {
expect do
generator.send(:enqueue_boundary_resolver, session_id, 1)
}.to have_enqueued_job(Tracks::BoundaryResolverJob)
end.to have_enqueued_job(Tracks::BoundaryResolverJob)
.at(be >= 5.minutes.from_now)
end
it 'scales delay with chunk count' do
expect {
expect do
generator.send(:enqueue_boundary_resolver, session_id, 20)
}.to have_enqueued_job(Tracks::BoundaryResolverJob)
end.to have_enqueued_job(Tracks::BoundaryResolverJob)
.at(be >= 10.minutes.from_now)
end
end