Refactor tracks jobs and services

This commit is contained in:
Eugene Burmakin 2025-07-16 22:22:33 +02:00
parent 54aaf03453
commit c31d09e5c3
40 changed files with 1524 additions and 1896 deletions

View file

@ -1,22 +0,0 @@
# frozen_string_literal: true
# This job is being run on daily basis to create tracks for all users.
# For each user, it starts from the end of their last track (or from their oldest point
# if no tracks exist) and processes points until the specified end_at time.
#
# To manually run for a specific time range:
# Tracks::BulkCreatingJob.perform_later(start_at: 1.week.ago, end_at: Time.current)
#
# To run for specific users only:
# Tracks::BulkCreatingJob.perform_later(user_ids: [1, 2, 3])
#
# To let the job determine start times automatically (recommended):
# Tracks::BulkCreatingJob.perform_later(end_at: Time.current)
class Tracks::BulkCreatingJob < ApplicationJob
queue_as :tracks
sidekiq_options retry: false
def perform(start_at: nil, end_at: 1.day.ago.end_of_day, user_ids: [])
Tracks::BulkTrackCreator.new(start_at:, end_at:, user_ids:).call
end
end

View file

@ -0,0 +1,45 @@
# frozen_string_literal: true
# Background job for bulk track generation.
#
# This job regenerates all tracks for a user from scratch, typically used for:
# - Initial track generation after data import
# - Full recalculation when settings change
# - Manual track regeneration requested by user
#
# The job uses the new simplified Tracks::Generator service with bulk mode,
# which cleans existing tracks and regenerates everything from points.
#
# Parameters:
# - user_id: The user whose tracks should be generated
# - start_at: Optional start timestamp to limit processing
# - end_at: Optional end timestamp to limit processing
#
class Tracks::BulkGeneratorJob < ApplicationJob
queue_as :default
def perform(user_id, start_at: nil, end_at: nil)
user = User.find(user_id)
Rails.logger.info "Starting bulk track generation for user #{user_id}, " \
"start_at: #{start_at}, end_at: #{end_at}"
generator = Tracks::Generator.new(
user,
start_at: start_at,
end_at: end_at,
mode: :bulk
)
generator.call
Rails.logger.info "Completed bulk track generation for user #{user_id}"
rescue ActiveRecord::RecordNotFound => e
Rails.logger.error "Record not found in bulk track generation: #{e.message}"
# Don't retry if records are missing
rescue StandardError => e
Rails.logger.error "Error in bulk track generation for user #{user_id}: #{e.message}"
Rails.logger.error e.backtrace.join("\n")
raise # Re-raise for job retry logic
end
end

View file

@ -0,0 +1,36 @@
# frozen_string_literal: true
# Lightweight cleanup job that runs weekly to catch any missed track generation.
# This replaces the daily bulk creation job with a more targeted approach.
#
# Instead of processing all users daily, this job only processes users who have
# untracked points that are older than a threshold (e.g., 1 day), indicating
# they may have been missed by incremental processing.
#
# This provides a safety net while avoiding the overhead of daily bulk processing.
class Tracks::CleanupJob < ApplicationJob
queue_as :tracks
sidekiq_options retry: false
def perform(older_than: 1.day.ago)
users_with_old_untracked_points(older_than).find_each do |user|
Rails.logger.info "Processing missed tracks for user #{user.id}"
# Process only the old untracked points
Tracks::Generator.new(
user,
end_at: older_than,
mode: :incremental
).call
end
end
private
def users_with_old_untracked_points(older_than)
User.active.joins(:tracked_points)
.where(tracked_points: { track_id: nil, timestamp: ..older_than.to_i })
.having('COUNT(tracked_points.id) >= 2') # Only users with enough points for tracks
.group(:id)
end
end

View file

@ -1,11 +1,30 @@
# frozen_string_literal: true
class Tracks::CreateJob < ApplicationJob
queue_as :default
queue_as :tracks
def perform(user_id, start_at: nil, end_at: nil, cleaning_strategy: :replace)
def perform(user_id, start_at: nil, end_at: nil, mode: :daily)
user = User.find(user_id)
tracks_created = Tracks::CreateFromPoints.new(user, start_at:, end_at:, cleaning_strategy:).call
# Translate mode parameter to Generator mode
generator_mode = case mode
when :daily then :daily
when :none then :incremental
else :bulk
end
# Count tracks before generation
tracks_before = user.tracks.count
Tracks::Generator.new(
user,
start_at: start_at,
end_at: end_at,
mode: generator_mode
).call
# Calculate tracks created
tracks_created = user.tracks.count - tracks_before
create_success_notification(user, tracks_created)
rescue StandardError => e

View file

@ -0,0 +1,12 @@
# frozen_string_literal: true
class Tracks::IncrementalCheckJob < ApplicationJob
queue_as :tracks
def perform(user_id, point_id)
user = User.find(user_id)
point = Point.find(point_id)
Tracks::IncrementalProcessor.new(user, point).call
end
end

View file

@ -1,30 +0,0 @@
# frozen_string_literal: true
class Tracks::IncrementalGeneratorJob < ApplicationJob
queue_as :default
sidekiq_options retry: 3
def perform(user_id, day = nil, grace_period_minutes = 5)
user = User.find(user_id)
day = day ? Date.parse(day.to_s) : Date.current
Rails.logger.info "Starting incremental track generation for user #{user.id}, day #{day}"
generator(user, day, grace_period_minutes).call
rescue StandardError => e
ExceptionReporter.call(e, 'Incremental track generation failed')
raise e
end
private
def generator(user, day, grace_period_minutes)
@generator ||= Tracks::Generator.new(
user,
point_loader: Tracks::PointLoaders::IncrementalLoader.new(user, day),
incomplete_segment_handler: Tracks::IncompleteSegmentHandlers::BufferHandler.new(user, day, grace_period_minutes),
track_cleaner: Tracks::Cleaners::NoOpCleaner.new(user)
)
end
end

View file

@ -105,9 +105,6 @@ class Point < ApplicationRecord
end
def trigger_incremental_track_generation
point_date = Time.zone.at(timestamp).to_date
return if point_date < 1.day.ago.to_date
Tracks::IncrementalGeneratorJob.perform_later(user_id, point_date.to_s, 5)
Tracks::IncrementalCheckJob.perform_later(user.id, id)
end
end

View file

@ -7,7 +7,7 @@ class PointsLimitExceeded
def call
return false if DawarichSettings.self_hosted?
return true if @user.points.count >= points_limit
return true if @user.tracked_points.count >= points_limit
false
end

View file

@ -1,47 +0,0 @@
# frozen_string_literal: true
module Tracks
class BulkTrackCreator
def initialize(start_at: nil, end_at: 1.day.ago.end_of_day, user_ids: [])
@start_at = start_at&.to_datetime
@end_at = end_at&.to_datetime
@user_ids = user_ids
end
def call
users.find_each do |user|
next if user.tracked_points.empty?
user_start_at = start_at || start_time(user)
next unless user.tracked_points.where(timestamp: user_start_at.to_i..end_at.to_i).exists?
Tracks::CreateJob.perform_later(
user.id,
start_at: user_start_at,
end_at:,
cleaning_strategy: :daily
)
end
end
private
attr_reader :start_at, :end_at, :user_ids
def users
user_ids.any? ? User.active.where(id: user_ids) : User.active
end
def start_time(user)
latest_track = user.tracks.order(end_at: :desc).first
if latest_track
latest_track.end_at
else
oldest_point = user.tracked_points.order(:timestamp).first
oldest_point ? Time.zone.at(oldest_point.timestamp) : 1.day.ago.beginning_of_day
end
end
end
end

View file

@ -1,116 +0,0 @@
# frozen_string_literal: true
# Track cleaning strategy for daily track processing.
#
# This cleaner handles tracks that overlap with the specified time window,
# ensuring proper handling of cross-day tracks and preventing orphaned points.
#
# How it works:
# 1. Finds tracks that overlap with the time window (not just those completely contained)
# 2. For overlapping tracks, removes only points within the time window
# 3. Deletes tracks that become empty after point removal
# 4. Preserves tracks that extend beyond the time window with their remaining points
#
# Key differences from ReplaceCleaner:
# - Handles tracks that span multiple days correctly
# - Uses overlap logic instead of containment logic
# - Preserves track portions outside the processing window
# - Prevents orphaned points from cross-day tracks
#
# Used primarily for:
# - Daily track processing that handles 24-hour windows
# - Incremental processing that respects existing cross-day tracks
# - Scenarios where tracks may span the processing boundary
#
# Example usage:
# cleaner = Tracks::Cleaners::DailyCleaner.new(user, start_at: 1.day.ago.beginning_of_day, end_at: 1.day.ago.end_of_day)
# cleaner.cleanup
#
module Tracks
module Cleaners
class DailyCleaner
attr_reader :user, :start_at, :end_at
def initialize(user, start_at: nil, end_at: nil)
@user = user
@start_at = start_at
@end_at = end_at
end
def cleanup
return unless start_at.present? && end_at.present?
overlapping_tracks = find_overlapping_tracks
return if overlapping_tracks.empty?
Rails.logger.info "Processing #{overlapping_tracks.count} overlapping tracks for user #{user.id} in time window #{start_at} to #{end_at}"
overlapping_tracks.each do |track|
process_overlapping_track(track)
end
end
private
def find_overlapping_tracks
# Find tracks that overlap with our time window
# A track overlaps if: track_start < window_end AND track_end > window_start
user.tracks.where(
'(start_at < ? AND end_at > ?)',
Time.zone.at(end_at),
Time.zone.at(start_at)
)
end
def process_overlapping_track(track)
# Find points within our time window that belong to this track
points_in_window = track.points.where(
'timestamp >= ? AND timestamp <= ?',
start_at.to_i,
end_at.to_i
)
if points_in_window.empty?
Rails.logger.debug "Track #{track.id} has no points in time window, skipping"
return
end
# Remove these points from the track
points_in_window.update_all(track_id: nil)
Rails.logger.debug "Removed #{points_in_window.count} points from track #{track.id}"
# Check if the track has any remaining points
remaining_points_count = track.points.count
if remaining_points_count == 0
# Track is now empty, delete it
Rails.logger.debug "Track #{track.id} is now empty, deleting"
track.destroy!
elsif remaining_points_count < 2
# Track has too few points to be valid, delete it and orphan remaining points
Rails.logger.debug "Track #{track.id} has insufficient points (#{remaining_points_count}), deleting"
track.points.update_all(track_id: nil)
track.destroy!
else
# Track still has valid points outside our window, update its boundaries
Rails.logger.debug "Track #{track.id} still has #{remaining_points_count} points, updating boundaries"
update_track_boundaries(track)
end
end
def update_track_boundaries(track)
remaining_points = track.points.order(:timestamp)
return if remaining_points.empty?
# Update track start/end times based on remaining points
track.update!(
start_at: Time.zone.at(remaining_points.first.timestamp),
end_at: Time.zone.at(remaining_points.last.timestamp)
)
end
end
end
end

View file

@ -1,16 +0,0 @@
# frozen_string_literal: true
module Tracks
module Cleaners
class NoOpCleaner
def initialize(user)
@user = user
end
def cleanup
# No cleanup needed for incremental processing
# We only append new tracks, don't remove existing ones
end
end
end
end

View file

@ -1,69 +0,0 @@
# frozen_string_literal: true
# Track cleaning strategy for bulk track regeneration.
#
# This cleaner removes existing tracks before generating new ones,
# ensuring a clean slate for bulk processing without duplicate tracks.
#
# How it works:
# 1. Finds all existing tracks for the user within the specified time range
# 2. Detaches all points from these tracks (sets track_id to nil)
# 3. Destroys the existing track records
# 4. Allows the generator to create fresh tracks from the same points
#
# Used primarily for:
# - Bulk track regeneration after settings changes
# - Reprocessing historical data with updated algorithms
# - Ensuring consistency when tracks need to be rebuilt
#
# The cleaner respects optional time boundaries (start_at/end_at) to enable
# partial regeneration of tracks within specific time windows.
#
# This strategy is essential for bulk operations but should not be used
# for incremental processing where existing tracks should be preserved.
#
# Example usage:
# cleaner = Tracks::Cleaners::ReplaceCleaner.new(user, start_at: 1.week.ago, end_at: Time.current)
# cleaner.cleanup
#
module Tracks
module Cleaners
class ReplaceCleaner
attr_reader :user, :start_at, :end_at
def initialize(user, start_at: nil, end_at: nil)
@user = user
@start_at = start_at
@end_at = end_at
end
def cleanup
tracks_to_remove = find_tracks_to_remove
if tracks_to_remove.any?
Rails.logger.info "Removing #{tracks_to_remove.count} existing tracks for user #{user.id}"
Point.where(track_id: tracks_to_remove.ids).update_all(track_id: nil)
tracks_to_remove.destroy_all
end
end
private
def find_tracks_to_remove
scope = user.tracks
if start_at.present?
scope = scope.where('start_at >= ?', Time.zone.at(start_at))
end
if end_at.present?
scope = scope.where('end_at <= ?', Time.zone.at(end_at))
end
scope
end
end
end
end

View file

@ -1,73 +0,0 @@
# frozen_string_literal: true
class Tracks::CreateFromPoints
include Tracks::Segmentation
include Tracks::TrackBuilder
attr_reader :user, :start_at, :end_at, :cleaning_strategy
def initialize(user, start_at: nil, end_at: nil, cleaning_strategy: :replace)
@user = user
@start_at = start_at
@end_at = end_at
@cleaning_strategy = cleaning_strategy
end
def call
generator = Tracks::Generator.new(
user,
point_loader: point_loader,
incomplete_segment_handler: incomplete_segment_handler,
track_cleaner: track_cleaner
)
generator.call
end
# Expose threshold properties for tests
def distance_threshold_meters
@distance_threshold_meters ||= user.safe_settings.meters_between_routes.to_i || 500
end
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i || 60
end
private
def point_loader
@point_loader ||=
Tracks::PointLoaders::BulkLoader.new(
user, start_at: start_at, end_at: end_at
)
end
def incomplete_segment_handler
@incomplete_segment_handler ||=
Tracks::IncompleteSegmentHandlers::IgnoreHandler.new(user)
end
def track_cleaner
@track_cleaner ||=
case cleaning_strategy
when :daily
Tracks::Cleaners::DailyCleaner.new(user, start_at: start_at, end_at: end_at)
when :none
Tracks::Cleaners::NoOpCleaner.new(user)
else # :replace (default)
Tracks::Cleaners::ReplaceCleaner.new(user, start_at: start_at, end_at: end_at)
end
end
# Legacy method for backward compatibility with tests
# Delegates to segmentation module logic
def should_start_new_track?(current_point, previous_point)
should_start_new_segment?(current_point, previous_point)
end
# Legacy method for backward compatibility with tests
# Delegates to segmentation module logic
def calculate_distance_kilometers(point1, point2)
calculate_distance_kilometers_between_points(point1, point2)
end
end

View file

@ -1,108 +1,162 @@
# frozen_string_literal: true
# The core track generation engine that orchestrates the entire process of creating tracks from GPS points.
# Simplified track generation service that replaces the complex strategy pattern.
#
# This class uses a flexible strategy pattern to handle different track generation scenarios:
# - Bulk processing: Generate all tracks at once from existing points
# - Incremental processing: Generate tracks as new points arrive
# This service handles both bulk and incremental track generation using a unified
# approach with different modes:
#
# How it works:
# 1. Uses a PointLoader strategy to load points from the database
# 2. Applies segmentation logic to split points into track segments based on time/distance gaps
# 3. Determines which segments should be finalized into tracks vs buffered for later
# 4. Creates Track records from finalized segments with calculated statistics
# 5. Manages cleanup of existing tracks based on the chosen strategy
# - :bulk - Regenerates all tracks from scratch (replaces existing)
# - :incremental - Processes untracked points up to a specified end time
# - :daily - Processes tracks on a daily basis
#
# Strategy Components:
# - point_loader: Loads points from database (BulkLoader, IncrementalLoader)
# - incomplete_segment_handler: Handles segments that aren't ready to finalize (IgnoreHandler, BufferHandler)
# - track_cleaner: Manages existing tracks when regenerating (ReplaceCleaner, NoOpCleaner)
# The service maintains the same core logic as the original system but simplifies
# the architecture by removing the multiple strategy classes in favor of
# mode-based configuration.
#
# The class includes Tracks::Segmentation for splitting logic and Tracks::TrackBuilder for track creation.
# Distance and time thresholds are configurable per user via their settings.
# Key features:
# - Deterministic results (same algorithm for all modes)
# - Simple incremental processing without buffering complexity
# - Configurable time and distance thresholds from user settings
# - Automatic track statistics calculation
# - Proper handling of edge cases (empty points, incomplete segments)
#
# Example usage:
# generator = Tracks::Generator.new(
# user,
# point_loader: Tracks::PointLoaders::BulkLoader.new(user),
# incomplete_segment_handler: Tracks::IncompleteSegmentHandlers::IgnoreHandler.new(user),
# track_cleaner: Tracks::Cleaners::ReplaceCleaner.new(user)
# )
# tracks_created = generator.call
# Usage:
# # Bulk regeneration
# Tracks::Generator.new(user, mode: :bulk).call
#
module Tracks
class Generator
include Tracks::Segmentation
include Tracks::TrackBuilder
# # Incremental processing
# Tracks::Generator.new(user, mode: :incremental).call
#
# # Daily processing
# Tracks::Generator.new(user, start_at: Date.current, mode: :daily).call
#
class Tracks::Generator
include Tracks::Segmentation
include Tracks::TrackBuilder
attr_reader :user, :point_loader, :incomplete_segment_handler, :track_cleaner
attr_reader :user, :start_at, :end_at, :mode
def initialize(user, point_loader:, incomplete_segment_handler:, track_cleaner:)
@user = user
@point_loader = point_loader
@incomplete_segment_handler = incomplete_segment_handler
@track_cleaner = track_cleaner
end
def initialize(user, start_at: nil, end_at: nil, mode: :bulk)
@user = user
@start_at = start_at
@end_at = end_at
@mode = mode.to_sym
end
def call
Rails.logger.info "Starting track generation for user #{user.id}"
def call
clean_existing_tracks if should_clean_tracks?
tracks_created = 0
points = load_points
Rails.logger.debug "Generator: loaded #{points.size} points for user #{user.id} in #{mode} mode"
return if points.empty?
Point.transaction do
# Clean up existing tracks if needed
track_cleaner.cleanup
segments = split_points_into_segments(points)
Rails.logger.debug "Generator: created #{segments.size} segments"
# Load points using the configured strategy
points = point_loader.load_points
segments.each { |segment| create_track_from_segment(segment) }
if points.empty?
Rails.logger.info "No points to process for user #{user.id}"
return 0
end
Rails.logger.info "Generated #{segments.size} tracks for user #{user.id} in #{mode} mode"
end
Rails.logger.info "Processing #{points.size} points for user #{user.id}"
private
# Apply segmentation logic
segments = split_points_into_segments(points)
Rails.logger.info "Created #{segments.size} segments for user #{user.id}"
# Process each segment
segments.each do |segment_points|
next if segment_points.size < 2
if incomplete_segment_handler.should_finalize_segment?(segment_points)
# Create track from finalized segment
track = create_track_from_points(segment_points)
if track&.persisted?
tracks_created += 1
Rails.logger.debug "Created track #{track.id} with #{segment_points.size} points"
end
else
# Handle incomplete segment according to strategy
incomplete_segment_handler.handle_incomplete_segment(segment_points)
Rails.logger.debug "Stored #{segment_points.size} points as incomplete segment"
end
end
# Cleanup any processed buffered data
incomplete_segment_handler.cleanup_processed_data
end
Rails.logger.info "Completed track generation for user #{user.id}: #{tracks_created} tracks created"
tracks_created
end
private
# Required by Tracks::Segmentation module
def distance_threshold_meters
@distance_threshold_meters ||= user.safe_settings.meters_between_routes.to_i || 500
end
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i || 60
def should_clean_tracks?
case mode
when :bulk then true
when :daily then true
when :incremental then false
else false
end
end
def load_points
case mode
when :bulk then load_bulk_points
when :incremental then load_incremental_points
when :daily then load_daily_points
else
raise ArgumentError, "Unknown mode: #{mode}"
end
end
def load_bulk_points
scope = user.tracked_points.order(:timestamp)
scope = scope.where(timestamp: time_range) if time_range_defined?
scope
end
def load_incremental_points
# For incremental mode, we process untracked points
# If end_at is specified, only process points up to that time
scope = user.tracked_points.where(track_id: nil).order(:timestamp)
scope = scope.where(timestamp: ..end_at.to_i) if end_at.present?
scope
end
def load_daily_points
day_range = daily_time_range
user.tracked_points.where(timestamp: day_range).order(:timestamp)
end
def create_track_from_segment(segment)
Rails.logger.debug "Generator: processing segment with #{segment.size} points"
return unless segment.size >= 2
track = create_track_from_points(segment)
Rails.logger.debug "Generator: created track #{track&.id}"
track
end
def time_range_defined?
start_at.present? || end_at.present?
end
def time_range
return nil unless time_range_defined?
Time.at(start_at&.to_i)..Time.at(end_at&.to_i)
end
def daily_time_range
day = start_at&.to_date || Date.current
day.beginning_of_day.to_i..day.end_of_day.to_i
end
def incremental_mode?
mode == :incremental
end
def clean_existing_tracks
case mode
when :bulk
clean_bulk_tracks
when :daily
clean_daily_tracks
end
end
def clean_bulk_tracks
scope = user.tracks
scope = scope.where(start_at: time_range) if time_range_defined?
deleted_count = scope.delete_all
Rails.logger.info "Deleted #{deleted_count} existing tracks for user #{user.id}"
end
def clean_daily_tracks
day_range_times = daily_time_range.map { |timestamp| Time.at(timestamp) }
range = Range.new(day_range_times.first, day_range_times.last)
deleted_count = user.tracks.where(start_at: range).delete_all
Rails.logger.info "Deleted #{deleted_count} daily tracks for user #{user.id}"
end
# Threshold methods from safe_settings
def distance_threshold_meters
@distance_threshold_meters ||= user.safe_settings.meters_between_routes.to_i
end
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i
end
end

View file

@ -1,36 +0,0 @@
# frozen_string_literal: true
module Tracks
module IncompleteSegmentHandlers
class BufferHandler
attr_reader :user, :day, :grace_period_minutes, :redis_buffer
def initialize(user, day = nil, grace_period_minutes = 5)
@user = user
@day = day || Date.current
@grace_period_minutes = grace_period_minutes
@redis_buffer = Tracks::RedisBuffer.new(user.id, @day)
end
def should_finalize_segment?(segment_points)
return false if segment_points.empty?
# Check if the last point is old enough (grace period)
last_point_time = Time.zone.at(segment_points.last.timestamp)
grace_period_cutoff = Time.current - grace_period_minutes.minutes
last_point_time < grace_period_cutoff
end
def handle_incomplete_segment(segment_points)
redis_buffer.store(segment_points)
Rails.logger.debug "Stored #{segment_points.size} points in buffer for user #{user.id}, day #{day}"
end
def cleanup_processed_data
redis_buffer.clear
Rails.logger.debug "Cleared buffer for user #{user.id}, day #{day}"
end
end
end
end

View file

@ -1,48 +0,0 @@
# frozen_string_literal: true
# Incomplete segment handling strategy for bulk track generation.
#
# This handler always finalizes segments immediately without buffering,
# making it suitable for bulk processing where all data is historical
# and no segments are expected to grow with new incoming points.
#
# How it works:
# 1. Always returns true for should_finalize_segment? - every segment becomes a track
# 2. Ignores any incomplete segments (logs them but takes no action)
# 3. Requires no cleanup since no data is buffered
#
# Used primarily for:
# - Bulk track generation from historical data
# - One-time processing where all points are already available
# - Scenarios where you want to create tracks from every valid segment
#
# This strategy is efficient for bulk operations but not suitable for
# real-time processing where segments may grow as new points arrive.
#
# Example usage:
# handler = Tracks::IncompleteSegmentHandlers::IgnoreHandler.new(user)
# should_create_track = handler.should_finalize_segment?(segment_points)
#
module Tracks
module IncompleteSegmentHandlers
class IgnoreHandler
def initialize(user)
@user = user
end
def should_finalize_segment?(segment_points)
# Always finalize segments in bulk processing
true
end
def handle_incomplete_segment(segment_points)
# Ignore incomplete segments in bulk processing
Rails.logger.debug "Ignoring incomplete segment with #{segment_points.size} points"
end
def cleanup_processed_data
# No cleanup needed for ignore strategy
end
end
end
end

View file

@ -0,0 +1,97 @@
# frozen_string_literal: true
# This service analyzes new points as they're created and determines whether
# they should trigger incremental track generation based on time and distance
# thresholds defined in user settings.
#
# The key insight is that we should trigger track generation when there's a
# significant gap between the new point and the previous point, indicating
# the end of a journey and the start of a new one.
#
# Process:
# 1. Check if the new point should trigger processing (skip imported points)
# 2. Find the last point before the new point
# 3. Calculate time and distance differences
# 4. If thresholds are exceeded, trigger incremental generation
# 5. Set the end_at time to the previous point's timestamp for track finalization
#
# This ensures tracks are properly finalized when journeys end, not when they start.
#
# Usage:
# # In Point model after_create_commit callback
# Tracks::IncrementalProcessor.new(user, new_point).call
#
class Tracks::IncrementalProcessor
attr_reader :user, :new_point, :previous_point
def initialize(user, new_point)
@user = user
@new_point = new_point
@previous_point = find_previous_point
end
def call
return unless should_process?
start_at = find_start_time
end_at = find_end_time
Tracks::CreateJob.perform_later(
user.id,
start_at: start_at,
end_at: end_at,
mode: :none
)
end
private
def should_process?
return false if new_point.import_id.present?
return true unless previous_point
exceeds_thresholds?(previous_point, new_point)
end
def find_previous_point
@previous_point ||=
user.tracked_points
.where('timestamp < ?', new_point.timestamp)
.order(:timestamp)
.last
end
def find_start_time
user.tracks.order(:end_at).last&.end_at
end
def find_end_time
previous_point ? Time.at(previous_point.timestamp) : nil
end
def exceeds_thresholds?(previous_point, current_point)
time_gap = time_difference_minutes(previous_point, current_point)
distance_gap = distance_difference_meters(previous_point, current_point)
time_exceeded = time_gap >= time_threshold_minutes
distance_exceeded = distance_gap >= distance_threshold_meters
time_exceeded || distance_exceeded
end
def time_difference_minutes(point1, point2)
(point2.timestamp - point1.timestamp) / 60.0
end
def distance_difference_meters(point1, point2)
point1.distance_to(point2) * 1000
end
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i
end
def distance_threshold_meters
@distance_threshold_meters ||= user.safe_settings.meters_between_routes.to_i
end
end

View file

@ -1,54 +0,0 @@
# frozen_string_literal: true
# Point loading strategy for bulk track generation from existing GPS points.
#
# This loader retrieves all valid points for a user within an optional time range,
# suitable for regenerating all tracks at once or processing historical data.
#
# How it works:
# 1. Queries all points belonging to the user
# 2. Filters out points without valid coordinates or timestamps
# 3. Optionally filters by start_at/end_at time range if provided
# 4. Returns points ordered by timestamp for sequential processing
#
# Used primarily for:
# - Initial track generation when a user first enables tracks
# - Bulk regeneration of all tracks after settings changes
# - Processing historical data imports
#
# The loader is designed to be efficient for large datasets while ensuring
# data integrity by filtering out invalid points upfront.
#
# Example usage:
# loader = Tracks::PointLoaders::BulkLoader.new(user, start_at: 1.week.ago, end_at: Time.current)
# points = loader.load_points
#
module Tracks
module PointLoaders
class BulkLoader
attr_reader :user, :start_at, :end_at
def initialize(user, start_at: nil, end_at: nil)
@user = user
@start_at = start_at
@end_at = end_at
end
def load_points
scope = Point.where(user: user)
.where.not(lonlat: nil)
.where.not(timestamp: nil)
if start_at.present?
scope = scope.where('timestamp >= ?', start_at)
end
if end_at.present?
scope = scope.where('timestamp <= ?', end_at)
end
scope.order(:timestamp)
end
end
end
end

View file

@ -1,72 +0,0 @@
# frozen_string_literal: true
module Tracks
module PointLoaders
class IncrementalLoader
attr_reader :user, :day, :redis_buffer
def initialize(user, day = nil)
@user = user
@day = day || Date.current
@redis_buffer = Tracks::RedisBuffer.new(user.id, @day)
end
def load_points
# Get buffered points from Redis
buffered_points = redis_buffer.retrieve
# Find the last track for this day to determine where to start
last_track = Track.last_for_day(user, day)
# Load new points since last track
new_points = load_new_points_since_last_track(last_track)
# Combine buffered points with new points
combined_points = merge_points(buffered_points, new_points)
Rails.logger.debug "Loaded #{buffered_points.size} buffered points and #{new_points.size} new points for user #{user.id}"
combined_points
end
private
def load_new_points_since_last_track(last_track)
scope = user.points
.where.not(lonlat: nil)
.where.not(timestamp: nil)
.where(track_id: nil) # Only process points not already assigned to tracks
if last_track
scope = scope.where('timestamp > ?', last_track.end_at.to_i)
else
# If no last track, load all points for the day
day_start = day.beginning_of_day.to_i
day_end = day.end_of_day.to_i
scope = scope.where('timestamp >= ? AND timestamp <= ?', day_start, day_end)
end
scope.order(:timestamp)
end
def merge_points(buffered_points, new_points)
# Convert buffered point hashes back to Point objects if needed
buffered_point_objects = buffered_points.map do |point_data|
# If it's already a Point object, use it directly
if point_data.is_a?(Point)
point_data
else
# Create a Point-like object from the hash
Point.new(point_data.except('id').symbolize_keys)
end
end
# Combine and sort by timestamp
all_points = (buffered_point_objects + new_points.to_a).sort_by(&:timestamp)
# Remove duplicates based on timestamp and coordinates
all_points.uniq { |point| [point.timestamp, point.lat, point.lon] }
end
end
end
end

View file

@ -1,72 +0,0 @@
# frozen_string_literal: true
class Tracks::RedisBuffer
BUFFER_PREFIX = 'track_buffer'
BUFFER_EXPIRY = 7.days
attr_reader :user_id, :day
def initialize(user_id, day)
@user_id = user_id
@day = day.is_a?(Date) ? day : Date.parse(day.to_s)
end
def store(points)
return if points.empty?
points_data = serialize_points(points)
redis_key = buffer_key
Rails.cache.write(redis_key, points_data, expires_in: BUFFER_EXPIRY)
Rails.logger.debug "Stored #{points.size} points in buffer for user #{user_id}, day #{day}"
end
def retrieve
redis_key = buffer_key
cached_data = Rails.cache.read(redis_key)
return [] unless cached_data
deserialize_points(cached_data)
rescue StandardError => e
Rails.logger.error "Failed to retrieve buffered points for user #{user_id}, day #{day}: #{e.message}"
[]
end
# Clear the buffer for the user/day combination
def clear
redis_key = buffer_key
Rails.cache.delete(redis_key)
Rails.logger.debug "Cleared buffer for user #{user_id}, day #{day}"
end
def exists?
Rails.cache.exist?(buffer_key)
end
private
def buffer_key
"#{BUFFER_PREFIX}:#{user_id}:#{day.strftime('%Y-%m-%d')}"
end
def serialize_points(points)
points.map do |point|
{
id: point.id,
lonlat: point.lonlat.to_s,
timestamp: point.timestamp,
lat: point.lat,
lon: point.lon,
altitude: point.altitude,
velocity: point.velocity,
battery: point.battery,
user_id: point.user_id
}
end
end
def deserialize_points(points_data)
points_data || []
end
end

View file

@ -68,8 +68,8 @@ module Tracks::Segmentation
return false if previous_point.nil?
# Check time threshold (convert minutes to seconds)
current_timestamp = point_timestamp(current_point)
previous_timestamp = point_timestamp(previous_point)
current_timestamp = current_point.timestamp
previous_timestamp = previous_point.timestamp
time_diff_seconds = current_timestamp - previous_timestamp
time_threshold_seconds = time_threshold_minutes.to_i * 60
@ -79,6 +79,7 @@ module Tracks::Segmentation
# Check distance threshold - convert km to meters to match frontend logic
distance_km = calculate_distance_kilometers_between_points(previous_point, current_point)
distance_meters = distance_km * 1000 # Convert km to meters
return true if distance_meters > distance_threshold_meters
false
@ -96,7 +97,7 @@ module Tracks::Segmentation
return false if segment_points.size < 2
last_point = segment_points.last
last_timestamp = point_timestamp(last_point)
last_timestamp = last_point.timestamp
current_time = Time.current.to_i
# Don't finalize if the last point is too recent (within grace period)
@ -106,30 +107,10 @@ module Tracks::Segmentation
time_since_last_point > grace_period_seconds
end
def point_timestamp(point)
if point.respond_to?(:timestamp)
# Point objects from database always have integer timestamps
point.timestamp
elsif point.is_a?(Hash)
# Hash might come from Redis buffer or test data
timestamp = point[:timestamp] || point['timestamp']
timestamp.to_i
else
raise ArgumentError, "Invalid point type: #{point.class}"
end
end
def point_coordinates(point)
if point.respond_to?(:lat) && point.respond_to?(:lon)
[point.lat, point.lon]
elsif point.is_a?(Hash)
[point[:lat] || point['lat'], point[:lon] || point['lon']]
else
raise ArgumentError, "Invalid point type: #{point.class}"
end
[point.lat, point.lon]
end
# These methods need to be implemented by the including class
def distance_threshold_meters
raise NotImplementedError, "Including class must implement distance_threshold_meters"
end

View file

@ -73,6 +73,7 @@ module Tracks::TrackBuilder
if track.save
Point.where(id: points.map(&:id)).update_all(track_id: track.id)
track
else
Rails.logger.error "Failed to create track for user #{user.id}: #{track.errors.full_messages.join(', ')}"
@ -82,7 +83,7 @@ module Tracks::TrackBuilder
end
def build_path(points)
Tracks::BuildPath.new(points.map(&:lonlat)).call
Tracks::BuildPath.new(points).call
end
def calculate_track_distance(points)

View file

@ -1,6 +1,6 @@
<p class="py-6">
<p class='py-2'>
You have used <%= number_with_delimiter(current_user.points.count) %> points of <%= number_with_delimiter(DawarichSettings::BASIC_PAID_PLAN_LIMIT) %> available.
You have used <%= number_with_delimiter(current_user.tracked_points.count) %> points of <%= number_with_delimiter(DawarichSettings::BASIC_PAID_PLAN_LIMIT) %> available.
</p>
<progress class="progress progress-primary w-1/2 h-5" value="<%= current_user.points.count %>" max="<%= DawarichSettings::BASIC_PAID_PLAN_LIMIT %>"></progress>
<progress class="progress progress-primary w-1/2 h-5" value="<%= current_user.tracked_points.count %>" max="<%= DawarichSettings::BASIC_PAID_PLAN_LIMIT %>"></progress>
</p>

View file

@ -30,9 +30,9 @@ cache_preheating_job:
class: "Cache::PreheatingJob"
queue: default
tracks_bulk_creating_job:
cron: "10 0 * * *" # every day at 00:10
class: "Tracks::BulkCreatingJob"
tracks_cleanup_job:
cron: "0 2 * * 0" # every Sunday at 02:00
class: "Tracks::CleanupJob"
queue: tracks
place_name_fetching_job:

View file

@ -20,7 +20,7 @@ class CreateTracksFromPoints < ActiveRecord::Migration[8.0]
user.id,
start_at: nil,
end_at: nil,
cleaning_strategy: :replace
mode: :daily
)
processed_users += 1

483
docs/TRACKS_OVERVIEW.md Normal file
View file

@ -0,0 +1,483 @@
# Dawarich Tracks Feature Overview
## Table of Contents
- [Introduction](#introduction)
- [Architecture Overview](#architecture-overview)
- [Core Components](#core-components)
- [Data Flow](#data-flow)
- [Configuration](#configuration)
- [Usage Examples](#usage-examples)
- [API Reference](#api-reference)
- [Development Guidelines](#development-guidelines)
## Introduction
The Dawarich Tracks feature automatically converts raw GPS points into meaningful movement tracks. It analyzes sequences of location points to identify distinct journeys, providing users with structured visualizations of their movement patterns.
### Key Features
- **Automatic Track Generation**: Converts GPS points into coherent movement tracks
- **Real-time Processing**: Incremental track generation as new points arrive
- **Configurable Thresholds**: User-customizable time and distance parameters
- **Multiple Generation Modes**: Bulk, incremental, and daily processing
- **Rich Statistics**: Distance, speed, elevation, and duration metrics
- **Live Updates**: Real-time track updates via WebSocket connections
## Architecture Overview
```mermaid
graph TB
A[GPS Points] --> B[Incremental Processor]
B --> C[Threshold Check]
C --> D{Exceeds Thresholds?}
D -->|Yes| E[Tracks Generator]
D -->|No| F[Skip Processing]
E --> G[Segmentation Engine]
G --> H[Track Builder]
H --> I[Database]
I --> J[Real-time Broadcasting]
J --> K[Frontend Updates]
```
## Core Components
### 1. Models
#### Track Model
```ruby
# app/models/track.rb
class Track < ApplicationRecord
belongs_to :user
has_many :points, dependent: :nullify
# Attributes
# start_at, end_at (DateTime)
# distance (Integer, meters)
# avg_speed (Float, km/h)
# duration (Integer, seconds)
# elevation_gain/loss/max/min (Integer, meters)
# original_path (PostGIS LineString)
end
```
#### Point Model
```ruby
# app/models/point.rb
class Point < ApplicationRecord
belongs_to :track, optional: true
belongs_to :user
# Triggers incremental track generation via background job
after_create_commit :trigger_incremental_track_generation
private
def trigger_incremental_track_generation
Tracks::IncrementalCheckJob.perform_later(user.id, id)
end
end
```
### 2. Services
#### Tracks::Generator
**Purpose**: Unified track generation service with multiple modes
```ruby
# Usage
Tracks::Generator.new(user, mode: :bulk).call
Tracks::Generator.new(user, mode: :incremental, end_at: Time.current).call
Tracks::Generator.new(user, mode: :daily, start_at: Date.current).call
```
**Modes**:
- `:bulk` - Regenerates all tracks from scratch (replaces existing)
- `:incremental` - Processes only untracked points up to specified time
- `:daily` - Processes tracks on daily basis with cleanup
#### Tracks::IncrementalProcessor
**Purpose**: Analyzes new points and triggers track generation when thresholds are exceeded
```ruby
# Automatically called when new points are created
Tracks::IncrementalProcessor.new(user, new_point).call
```
#### Tracks::Segmentation
**Purpose**: Core algorithm for splitting GPS points into meaningful segments
**Criteria**:
- **Time threshold**: Configurable minutes gap (default: 30 minutes)
- **Distance threshold**: Configurable meters jump (default: 500 meters)
- **Minimum segment size**: 2 points required for valid track
#### Tracks::TrackBuilder
**Purpose**: Converts point arrays into Track records with calculated statistics
**Statistics Calculated**:
- **Distance**: Always stored in meters as integers
- **Duration**: Total time in seconds between first and last point
- **Average Speed**: Calculated in km/h regardless of user preference
- **Elevation Metrics**: Gain, loss, maximum, minimum in meters
### 3. Background Jobs
#### Tracks::IncrementalCheckJob
- **Purpose**: Lightweight job triggered by point creation
- **Queue**: `tracks` for dedicated processing
- **Trigger**: Automatically enqueued when non-import points are created
- **Function**: Checks thresholds and conditionally triggers track generation
#### Tracks::CreateJob
- **Purpose**: Main orchestration job for track creation
- **Features**: User notifications on success/failure
- **Incremental Usage**: Enqueued by IncrementalCheckJob when thresholds are exceeded
- **Parameters**: `user_id`, `start_at`, `end_at`, `mode`
#### Tracks::CleanupJob
- **Purpose**: Weekly cleanup of missed track generation
- **Schedule**: Runs weekly on Sunday at 02:00 via cron
- **Strategy**: Processes only users with old untracked points (1+ days old)
### 4. Real-time Features
#### TracksChannel (ActionCable)
```javascript
// Real-time track updates
consumer.subscriptions.create("TracksChannel", {
received(data) {
// Handle track created/updated/destroyed events
}
});
```
## Data Flow
### 1. Point Creation Flow
```
New Point Created → IncrementalCheckJob → Incremental Processor → Threshold Check →
(if exceeded) → CreateJob → Track Generation → Database Update →
User Notification → Real-time Broadcast → Frontend Update
```
### 2. Bulk Processing Flow
```
Scheduled Job → Load Historical Points → Segmentation →
Track Creation → Statistics Calculation → Database Batch Update
```
### 3. Incremental Processing Flow
```
New Point → IncrementalCheckJob → Find Previous Point → Calculate Time/Distance Gaps →
(if thresholds exceeded) → CreateJob(start_at: last_track_end, end_at: previous_point_time) →
Process Untracked Points → Create Tracks → User Notification
```
## Configuration
### User Settings
Tracks behavior is controlled by user-configurable settings in `Users::SafeSettings`:
```ruby
# Default values
{
'meters_between_routes' => 500, # Distance threshold
'minutes_between_routes' => 30, # Time threshold
'route_opacity' => 60, # Visual opacity
'distance_unit' => 'km' # Display unit (km/mi)
}
```
### Threshold Configuration
```ruby
# Time threshold: Gap longer than X minutes = new track
user.safe_settings.minutes_between_routes # default: 30
# Distance threshold: Jump larger than X meters = new track
user.safe_settings.meters_between_routes # default: 500
# Access in services
def time_threshold_minutes
user.safe_settings.minutes_between_routes.to_i
end
```
### Background Job Schedule
```yaml
# config/schedule.yml
tracks_cleanup_job:
cron: '0 2 * * 0' # Weekly on Sunday at 02:00
class: Tracks::CleanupJob
```
## Usage Examples
### 1. Manual Track Generation
```ruby
# Bulk regeneration (replaces all existing tracks)
Tracks::Generator.new(user, mode: :bulk).call
# Process specific date range
Tracks::Generator.new(
user,
start_at: 1.week.ago,
end_at: Time.current,
mode: :bulk
).call
# Daily processing
Tracks::Generator.new(
user,
start_at: Date.current,
mode: :daily
).call
```
### 2. Incremental Processing
```ruby
# Triggered automatically when points are created
point = Point.create!(
user: user,
timestamp: Time.current.to_i,
lonlat: 'POINT(-122.4194 37.7749)'
)
# → Automatically enqueues IncrementalCheckJob
# → Job checks thresholds and conditionally triggers track generation
```
### 3. Background Job Management
```ruby
# Enqueue bulk processing
Tracks::BulkGeneratorJob.perform_later(user.id)
# Enqueue incremental check (automatically triggered by point creation)
Tracks::IncrementalCheckJob.perform_later(user.id, point.id)
# Enqueue incremental processing (triggered by IncrementalCheckJob)
Tracks::CreateJob.perform_later(
user.id,
start_at: last_track_end,
end_at: previous_point_timestamp,
mode: :none
)
# Run cleanup for missed tracks
Tracks::CleanupJob.perform_later(older_than: 1.day.ago)
# Create tracks with notifications
Tracks::CreateJob.perform_later(user.id, start_at: nil, end_at: nil, mode: :bulk)
```
### 4. Frontend Integration
```javascript
// Initialize tracks on map
const tracksLayer = new TracksLayer(map, tracksData);
// Handle real-time updates
consumer.subscriptions.create("TracksChannel", {
received(data) {
switch(data.event) {
case 'created':
tracksLayer.addTrack(data.track);
break;
case 'updated':
tracksLayer.updateTrack(data.track);
break;
case 'destroyed':
tracksLayer.removeTrack(data.track.id);
break;
}
}
});
```
## API Reference
### Track Model API
```ruby
# Key methods
track.formatted_distance # Distance in user's preferred unit
track.distance_in_unit(unit) # Distance in specific unit
track.recalculate_path_and_distance! # Recalculate from points
# Scopes
Track.for_user(user)
Track.between_dates(start_date, end_date)
Track.last_for_day(user, date)
```
### TrackSerializer Output
```json
{
"id": 123,
"start_at": "2023-01-01T10:00:00Z",
"end_at": "2023-01-01T11:30:00Z",
"distance": 5000,
"avg_speed": 25.5,
"duration": 5400,
"elevation_gain": 150,
"elevation_loss": 100,
"elevation_max": 300,
"elevation_min": 200,
"path": "LINESTRING(...)"
}
```
### Service APIs
```ruby
# Generator API
generator = Tracks::Generator.new(user, options)
generator.call # Returns nil, tracks saved to database
# Processor API
processor = Tracks::IncrementalProcessor.new(user, point)
processor.call # May enqueue background job
# Segmentation API (via inclusion)
segments = split_points_into_segments(points)
should_start_new_segment?(current_point, previous_point)
```
## Development Guidelines
### 1. Adding New Generation Modes
```ruby
# In Tracks::Generator
def load_points
case mode
when :bulk
load_bulk_points
when :incremental
load_incremental_points
when :daily
load_daily_points
when :custom_mode # New mode
load_custom_points
end
end
def should_clean_tracks?
case mode
when :bulk, :daily then true
when :incremental, :custom_mode then false
end
end
```
### 2. Customizing Segmentation Logic
```ruby
# Override in including class
def should_start_new_segment?(current_point, previous_point)
# Custom logic here
super || custom_condition?(current_point, previous_point)
end
```
### 3. Testing Patterns
```ruby
# Test track generation
expect { generator.call }.to change(Track, :count).by(1)
# Test point callback
expect { point.save! }.to have_enqueued_job(Tracks::IncrementalCheckJob)
.with(user.id, point.id)
# Test incremental processing
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: anything, end_at: anything, mode: :none)
processor.call
# Test segmentation
segments = generator.send(:segment_points, points)
expect(segments.size).to eq(2)
```
### 4. Performance Considerations
- **Batch Processing**: Use `find_in_batches` for large datasets
- **Database Indexes**: Ensure proper indexing on `timestamp` and `track_id`
- **Memory Usage**: Process points in chunks for very large datasets
- **Asynchronous Processing**: Point creation is never blocked by track generation
- **Job Queue Management**: Monitor job queue performance for incremental processing
### 5. Error Handling
```ruby
# In services
begin
generator.call
rescue StandardError => e
Rails.logger.error "Track generation failed: #{e.message}"
# Handle gracefully
end
# In jobs
def perform(*args)
# Main logic
rescue ActiveRecord::RecordNotFound
# Don't retry for missing records
rescue StandardError => e
Rails.logger.error "Job failed: #{e.message}"
raise # Re-raise for retry logic
end
```
### 6. Monitoring and Debugging
```ruby
# Add logging
Rails.logger.info "Generated #{segments.size} tracks for user #{user.id}"
# Performance monitoring
Rails.logger.info "Track generation took #{duration}ms"
# Debug segmentation
Rails.logger.debug "Threshold check: time=#{time_gap}min, distance=#{distance_gap}m"
```
## Best Practices
1. **Data Consistency**: Always store distances in meters, convert only for display
2. **Threshold Configuration**: Make thresholds user-configurable for flexibility
3. **Error Handling**: Gracefully handle missing data and network issues
4. **Performance**: Use database queries efficiently, avoid N+1 queries
5. **Testing**: Test all modes and edge cases thoroughly
6. **Real-time Updates**: Use ActionCable for responsive user experience
7. **Background Processing**: Use appropriate queues for different job priorities
8. **Asynchronous Design**: Never block point creation with track generation logic
9. **Job Monitoring**: Monitor background job performance and failure rates
## Troubleshooting
### Common Issues
1. **Missing Tracks**: Check if points have `track_id: nil` for incremental processing
2. **Incorrect Thresholds**: Verify user settings configuration
3. **Job Failures**: Check background job logs for errors
4. **Real-time Updates**: Verify WebSocket connection and channel subscriptions
5. **Performance Issues**: Monitor database query performance and indexing
### Debugging Tools
```ruby
# Check track generation
user.tracked_points.where(track_id: nil).count # Untracked points
# Verify thresholds
user.safe_settings.minutes_between_routes
user.safe_settings.meters_between_routes
# Test segmentation
generator = Tracks::Generator.new(user, mode: :bulk)
segments = generator.send(:segment_points, points)
```
This overview provides a comprehensive understanding of the Dawarich Tracks feature, from high-level architecture to specific implementation details.

View file

@ -1,19 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::BulkCreatingJob, type: :job do
describe '#perform' do
let(:service) { instance_double(Tracks::BulkTrackCreator) }
before do
allow(Tracks::BulkTrackCreator).to receive(:new).with(start_at: 'foo', end_at: 'bar', user_ids: [1, 2]).and_return(service)
end
it 'calls Tracks::BulkTrackCreator with the correct arguments' do
expect(service).to receive(:call)
described_class.new.perform(start_at: 'foo', end_at: 'bar', user_ids: [1, 2])
end
end
end

View file

@ -0,0 +1,88 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::CleanupJob, type: :job do
let(:user) { create(:user) }
describe '#perform' do
context 'with old untracked points' do
let!(:old_points) do
create_points_around(user: user, count: 1, base_lat: 20.0, timestamp: 2.days.ago.to_i)
create_points_around(user: user, count: 1, base_lat: 20.0, timestamp: 1.day.ago.to_i)
end
let!(:recent_points) do
create_points_around(user: user, count: 2, base_lat: 20.0, timestamp: 1.hour.ago.to_i)
end
let(:generator) { instance_double(Tracks::Generator) }
it 'processes only old untracked points' do
expect(Tracks::Generator).to receive(:new)
.and_return(generator)
expect(generator).to receive(:call)
described_class.new.perform(older_than: 1.day.ago)
end
it 'logs processing information' do
allow(Tracks::Generator).to receive(:new).and_return(double(call: nil))
expect(Rails.logger).to receive(:info).with(/Processing missed tracks for user #{user.id}/)
described_class.new.perform(older_than: 1.day.ago)
end
end
context 'with users having insufficient points' do
let!(:single_point) do
create_points_around(user: user, count: 1, base_lat: 20.0, timestamp: 2.days.ago.to_i)
end
it 'skips users with less than 2 points' do
expect(Tracks::Generator).not_to receive(:new)
described_class.new.perform(older_than: 1.day.ago)
end
end
context 'with no old untracked points' do
let(:track) { create(:track, user: user) }
let!(:tracked_points) do
create_points_around(user: user, count: 3, base_lat: 20.0, timestamp: 2.days.ago.to_i, track: track)
end
it 'does not process any users' do
expect(Tracks::Generator).not_to receive(:new)
described_class.new.perform(older_than: 1.day.ago)
end
end
context 'with custom older_than parameter' do
let!(:points) do
create_points_around(user: user, count: 3, base_lat: 20.0, timestamp: 3.days.ago.to_i)
end
let(:generator) { instance_double(Tracks::Generator) }
it 'uses custom threshold' do
expect(Tracks::Generator).to receive(:new)
.and_return(generator)
expect(generator).to receive(:call)
described_class.new.perform(older_than: 2.days.ago)
end
end
end
describe 'job configuration' do
it 'uses tracks queue' do
expect(described_class.queue_name).to eq('tracks')
end
it 'does not retry on failure' do
expect(described_class.sidekiq_options_hash['retry']).to be false
end
end
end

View file

@ -6,26 +6,36 @@ RSpec.describe Tracks::CreateJob, type: :job do
let(:user) { create(:user) }
describe '#perform' do
let(:service_instance) { instance_double(Tracks::CreateFromPoints) }
let(:generator_instance) { instance_double(Tracks::Generator) }
let(:notification_service) { instance_double(Notifications::Create) }
before do
allow(Tracks::CreateFromPoints).to receive(:new).with(user, start_at: nil, end_at: nil, cleaning_strategy: :replace).and_return(service_instance)
allow(service_instance).to receive(:call).and_return(3)
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call)
allow(Notifications::Create).to receive(:new).and_return(notification_service)
allow(notification_service).to receive(:call)
end
it 'calls the service and creates a notification' do
it 'calls the generator and creates a notification' do
# Mock the generator to actually create tracks
allow(generator_instance).to receive(:call) do
create_list(:track, 2, user: user)
end
described_class.new.perform(user.id)
expect(Tracks::CreateFromPoints).to have_received(:new).with(user, start_at: nil, end_at: nil, cleaning_strategy: :replace)
expect(service_instance).to have_received(:call)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :daily
)
expect(generator_instance).to have_received(:call)
expect(Notifications::Create).to have_received(:new).with(
user: user,
kind: :info,
title: 'Tracks Generated',
content: 'Created 3 tracks from your location data. Check your tracks section to view them.'
content: 'Created 2 tracks from your location data. Check your tracks section to view them.'
)
expect(notification_service).to have_received(:call)
end
@ -33,38 +43,108 @@ RSpec.describe Tracks::CreateJob, type: :job do
context 'with custom parameters' do
let(:start_at) { 1.day.ago.beginning_of_day.to_i }
let(:end_at) { 1.day.ago.end_of_day.to_i }
let(:cleaning_strategy) { :daily }
let(:mode) { :daily }
before do
allow(Tracks::CreateFromPoints).to receive(:new).with(user, start_at: start_at, end_at: end_at, cleaning_strategy: cleaning_strategy).and_return(service_instance)
allow(service_instance).to receive(:call).and_return(2)
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call)
allow(Notifications::Create).to receive(:new).and_return(notification_service)
allow(notification_service).to receive(:call)
end
it 'passes custom parameters to the service' do
described_class.new.perform(user.id, start_at: start_at, end_at: end_at, cleaning_strategy: cleaning_strategy)
it 'passes custom parameters to the generator' do
# Create some existing tracks and mock generator to create 1 more
create_list(:track, 5, user: user)
allow(generator_instance).to receive(:call) do
create(:track, user: user)
end
described_class.new.perform(user.id, start_at: start_at, end_at: end_at, mode: mode)
expect(Tracks::CreateFromPoints).to have_received(:new).with(user, start_at: start_at, end_at: end_at, cleaning_strategy: cleaning_strategy)
expect(service_instance).to have_received(:call)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: start_at,
end_at: end_at,
mode: :daily
)
expect(generator_instance).to have_received(:call)
expect(Notifications::Create).to have_received(:new).with(
user: user,
kind: :info,
title: 'Tracks Generated',
content: 'Created 2 tracks from your location data. Check your tracks section to view them.'
content: 'Created 1 tracks from your location data. Check your tracks section to view them.'
)
expect(notification_service).to have_received(:call)
end
end
context 'when service raises an error' do
context 'with mode translation' do
before do
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call) # No tracks created for mode tests
allow(Notifications::Create).to receive(:new).and_return(notification_service)
allow(notification_service).to receive(:call)
end
it 'translates :none to :incremental' do
described_class.new.perform(user.id, mode: :none)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :incremental
)
expect(Notifications::Create).to have_received(:new).with(
user: user,
kind: :info,
title: 'Tracks Generated',
content: 'Created 0 tracks from your location data. Check your tracks section to view them.'
)
end
it 'translates :daily to :daily' do
described_class.new.perform(user.id, mode: :daily)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :daily
)
expect(Notifications::Create).to have_received(:new).with(
user: user,
kind: :info,
title: 'Tracks Generated',
content: 'Created 0 tracks from your location data. Check your tracks section to view them.'
)
end
it 'translates other modes to :bulk' do
described_class.new.perform(user.id, mode: :replace)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :bulk
)
expect(Notifications::Create).to have_received(:new).with(
user: user,
kind: :info,
title: 'Tracks Generated',
content: 'Created 0 tracks from your location data. Check your tracks section to view them.'
)
end
end
context 'when generator raises an error' do
let(:error_message) { 'Something went wrong' }
let(:service_instance) { instance_double(Tracks::CreateFromPoints) }
let(:notification_service) { instance_double(Notifications::Create) }
before do
allow(Tracks::CreateFromPoints).to receive(:new).with(user, start_at: nil, end_at: nil, cleaning_strategy: :replace).and_return(service_instance)
allow(service_instance).to receive(:call).and_raise(StandardError, error_message)
allow(Tracks::Generator).to receive(:new).and_return(generator_instance)
allow(generator_instance).to receive(:call).and_raise(StandardError, error_message)
allow(Notifications::Create).to receive(:new).and_return(notification_service)
allow(notification_service).to receive(:call)
end
@ -108,8 +188,8 @@ RSpec.describe Tracks::CreateJob, type: :job do
end
describe 'queue' do
it 'is queued on default queue' do
expect(described_class.new.queue_name).to eq('default')
it 'is queued on tracks queue' do
expect(described_class.new.queue_name).to eq('tracks')
end
end
end

View file

@ -0,0 +1,39 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::IncrementalCheckJob, type: :job do
let(:user) { create(:user) }
let(:point) { create(:point, user: user) }
describe '#perform' do
context 'with valid parameters' do
let(:processor) { instance_double(Tracks::IncrementalProcessor) }
it 'calls the incremental processor' do
expect(Tracks::IncrementalProcessor).to receive(:new)
.with(user, point)
.and_return(processor)
expect(processor).to receive(:call)
described_class.new.perform(user.id, point.id)
end
end
end
describe 'job configuration' do
it 'uses tracks queue' do
expect(described_class.queue_name).to eq('tracks')
end
end
describe 'integration with ActiveJob' do
it 'enqueues the job' do
expect do
described_class.perform_later(user.id, point.id)
end.to have_enqueued_job(described_class)
.with(user.id, point.id)
end
end
end

View file

@ -127,8 +127,8 @@ RSpec.describe Point, type: :model do
end
let(:track) { create(:track) }
it 'enqueues Tracks::IncrementalGeneratorJob' do
expect { point.send(:trigger_incremental_track_generation) }.to have_enqueued_job(Tracks::IncrementalGeneratorJob).with(point.user_id, point.recorded_at.to_date.to_s, 5)
it 'enqueues Tracks::IncrementalCheckJob' do
expect { point.send(:trigger_incremental_track_generation) }.to have_enqueued_job(Tracks::IncrementalCheckJob).with(point.user_id, point.id)
end
end
end

View file

@ -24,7 +24,7 @@ RSpec.describe PointsLimitExceeded do
context 'when user points count is equal to the limit' do
before do
allow(user.points).to receive(:count).and_return(10)
allow(user.tracked_points).to receive(:count).and_return(10)
end
it { is_expected.to be true }
@ -32,7 +32,7 @@ RSpec.describe PointsLimitExceeded do
context 'when user points count exceeds the limit' do
before do
allow(user.points).to receive(:count).and_return(11)
allow(user.tracked_points).to receive(:count).and_return(11)
end
it { is_expected.to be true }
@ -40,7 +40,7 @@ RSpec.describe PointsLimitExceeded do
context 'when user points count is below the limit' do
before do
allow(user.points).to receive(:count).and_return(9)
allow(user.tracked_points).to receive(:count).and_return(9)
end
it { is_expected.to be false }

View file

@ -1,176 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::BulkTrackCreator do
describe '#call' do
let!(:active_user) { create(:user) }
let!(:inactive_user) { create(:user, :inactive) }
let!(:user_without_points) { create(:user) }
let(:start_at) { 1.day.ago.beginning_of_day }
let(:end_at) { 1.day.ago.end_of_day }
before do
# Create points for active user in the target timeframe
create(:point, user: active_user, timestamp: start_at.to_i + 1.hour.to_i)
create(:point, user: active_user, timestamp: start_at.to_i + 2.hours.to_i)
# Create points for inactive user in the target timeframe
create(:point, user: inactive_user, timestamp: start_at.to_i + 1.hour.to_i)
end
context 'when explicit start_at is provided' do
it 'schedules tracks creation jobs for active users with points in the timeframe' do
expect {
described_class.new(start_at:, end_at:).call
}.to have_enqueued_job(Tracks::CreateJob).with(active_user.id, start_at:, end_at:, cleaning_strategy: :daily)
end
it 'does not schedule jobs for users without tracked points' do
expect {
described_class.new(start_at:, end_at:).call
}.not_to have_enqueued_job(Tracks::CreateJob).with(user_without_points.id, start_at:, end_at:, cleaning_strategy: :daily)
end
it 'does not schedule jobs for users without points in the specified timeframe' do
# Create a user with points outside the timeframe
user_with_old_points = create(:user)
create(:point, user: user_with_old_points, timestamp: 2.days.ago.to_i)
expect {
described_class.new(start_at:, end_at:).call
}.not_to have_enqueued_job(Tracks::CreateJob).with(user_with_old_points.id, start_at:, end_at:, cleaning_strategy: :daily)
end
end
context 'when specific user_ids are provided' do
it 'only processes the specified users' do
expect {
described_class.new(start_at:, end_at:, user_ids: [active_user.id]).call
}.to have_enqueued_job(Tracks::CreateJob).with(active_user.id, start_at:, end_at:, cleaning_strategy: :daily)
end
it 'does not process users not in the user_ids list' do
expect {
described_class.new(start_at:, end_at:, user_ids: [active_user.id]).call
}.not_to have_enqueued_job(Tracks::CreateJob).with(inactive_user.id, start_at:, end_at:, cleaning_strategy: :daily)
end
end
context 'with automatic start time determination' do
let(:user_with_tracks) { create(:user) }
let(:user_without_tracks) { create(:user) }
let(:current_time) { Time.current }
before do
# Create some historical points and tracks for user_with_tracks
create(:point, user: user_with_tracks, timestamp: 3.days.ago.to_i)
create(:point, user: user_with_tracks, timestamp: 2.days.ago.to_i)
# Create a track ending 1 day ago
create(:track, user: user_with_tracks, end_at: 1.day.ago)
# Create newer points after the last track
create(:point, user: user_with_tracks, timestamp: 12.hours.ago.to_i)
create(:point, user: user_with_tracks, timestamp: 6.hours.ago.to_i)
# Create points for user without tracks
create(:point, user: user_without_tracks, timestamp: 2.days.ago.to_i)
create(:point, user: user_without_tracks, timestamp: 1.day.ago.to_i)
end
it 'starts from the end of the last track for users with existing tracks' do
track_end_time = user_with_tracks.tracks.order(end_at: :desc).first.end_at
expect {
described_class.new(end_at: current_time, user_ids: [user_with_tracks.id]).call
}.to have_enqueued_job(Tracks::CreateJob).with(
user_with_tracks.id,
start_at: track_end_time,
end_at: current_time.to_datetime,
cleaning_strategy: :daily
)
end
it 'starts from the oldest point for users without tracks' do
oldest_point_time = Time.zone.at(user_without_tracks.tracked_points.order(:timestamp).first.timestamp)
expect {
described_class.new(end_at: current_time, user_ids: [user_without_tracks.id]).call
}.to have_enqueued_job(Tracks::CreateJob).with(
user_without_tracks.id,
start_at: oldest_point_time,
end_at: current_time.to_datetime,
cleaning_strategy: :daily
)
end
it 'falls back to 1 day ago for users with no points' do
expect {
described_class.new(end_at: current_time, user_ids: [user_without_points.id]).call
}.not_to have_enqueued_job(Tracks::CreateJob).with(
user_without_points.id,
start_at: anything,
end_at: anything,
cleaning_strategy: :daily
)
end
end
context 'with default parameters' do
let(:user_with_recent_points) { create(:user) }
before do
# Create points within yesterday's timeframe
create(:point, user: user_with_recent_points, timestamp: 1.day.ago.beginning_of_day.to_i + 2.hours.to_i)
create(:point, user: user_with_recent_points, timestamp: 1.day.ago.beginning_of_day.to_i + 6.hours.to_i)
end
it 'uses automatic start time determination with yesterday as end_at' do
oldest_point_time = Time.zone.at(user_with_recent_points.tracked_points.order(:timestamp).first.timestamp)
expect {
described_class.new(user_ids: [user_with_recent_points.id]).call
}.to have_enqueued_job(Tracks::CreateJob).with(
user_with_recent_points.id,
start_at: oldest_point_time,
end_at: 1.day.ago.end_of_day.to_datetime,
cleaning_strategy: :daily
)
end
end
end
describe '#start_time' do
let(:user) { create(:user) }
let(:service) { described_class.new }
context 'when user has tracks' do
let!(:old_track) { create(:track, user: user, end_at: 3.days.ago) }
let!(:recent_track) { create(:track, user: user, end_at: 1.day.ago) }
it 'returns the end time of the most recent track' do
result = service.send(:start_time, user)
expect(result).to eq(recent_track.end_at)
end
end
context 'when user has no tracks but has points' do
let!(:old_point) { create(:point, user: user, timestamp: 5.days.ago.to_i) }
let!(:recent_point) { create(:point, user: user, timestamp: 2.days.ago.to_i) }
it 'returns the timestamp of the oldest point' do
result = service.send(:start_time, user)
expect(result).to eq(Time.zone.at(old_point.timestamp))
end
end
context 'when user has no tracks and no points' do
it 'returns 1 day ago beginning of day' do
result = service.send(:start_time, user)
expect(result).to eq(1.day.ago.beginning_of_day)
end
end
end
end

View file

@ -1,95 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::Cleaners::DailyCleaner do
let(:user) { create(:user) }
let(:start_at) { 1.day.ago.beginning_of_day }
let(:end_at) { 1.day.ago.end_of_day }
let(:cleaner) { described_class.new(user, start_at: start_at.to_i, end_at: end_at.to_i) }
describe '#cleanup' do
context 'when there are no overlapping tracks' do
before do
# Create a track that ends before our window
track = create(:track, user: user, start_at: 2.days.ago, end_at: 2.days.ago + 1.hour)
create(:point, user: user, track: track, timestamp: 2.days.ago.to_i)
end
it 'does not remove any tracks' do
expect { cleaner.cleanup }.not_to change { user.tracks.count }
end
end
context 'when a track is completely within the time window' do
let!(:track) { create(:track, user: user, start_at: start_at + 1.hour, end_at: end_at - 1.hour) }
let!(:point1) { create(:point, user: user, track: track, timestamp: (start_at + 1.hour).to_i) }
let!(:point2) { create(:point, user: user, track: track, timestamp: (start_at + 2.hours).to_i) }
it 'removes all points from the track and deletes it' do
expect { cleaner.cleanup }.to change { user.tracks.count }.by(-1)
expect(point1.reload.track_id).to be_nil
expect(point2.reload.track_id).to be_nil
end
end
context 'when a track spans across the time window' do
let!(:track) { create(:track, user: user, start_at: start_at - 1.hour, end_at: end_at + 1.hour) }
let!(:point_before) { create(:point, user: user, track: track, timestamp: (start_at - 30.minutes).to_i) }
let!(:point_during1) { create(:point, user: user, track: track, timestamp: (start_at + 1.hour).to_i) }
let!(:point_during2) { create(:point, user: user, track: track, timestamp: (start_at + 2.hours).to_i) }
let!(:point_after) { create(:point, user: user, track: track, timestamp: (end_at + 30.minutes).to_i) }
it 'removes only points within the window and updates track boundaries' do
expect { cleaner.cleanup }.not_to change { user.tracks.count }
# Points outside window should remain attached
expect(point_before.reload.track_id).to eq(track.id)
expect(point_after.reload.track_id).to eq(track.id)
# Points inside window should be detached
expect(point_during1.reload.track_id).to be_nil
expect(point_during2.reload.track_id).to be_nil
# Track boundaries should be updated
track.reload
expect(track.start_at).to be_within(1.second).of(Time.zone.at(point_before.timestamp))
expect(track.end_at).to be_within(1.second).of(Time.zone.at(point_after.timestamp))
end
end
context 'when a track overlaps but has insufficient remaining points' do
let!(:track) { create(:track, user: user, start_at: start_at - 1.hour, end_at: end_at + 1.hour) }
let!(:point_before) { create(:point, user: user, track: track, timestamp: (start_at - 30.minutes).to_i) }
let!(:point_during) { create(:point, user: user, track: track, timestamp: (start_at + 1.hour).to_i) }
it 'removes the track entirely and orphans remaining points' do
expect { cleaner.cleanup }.to change { user.tracks.count }.by(-1)
expect(point_before.reload.track_id).to be_nil
expect(point_during.reload.track_id).to be_nil
end
end
context 'when track has no points in the time window' do
let!(:track) { create(:track, user: user, start_at: start_at - 2.hours, end_at: end_at + 2.hours) }
let!(:point_before) { create(:point, user: user, track: track, timestamp: (start_at - 30.minutes).to_i) }
let!(:point_after) { create(:point, user: user, track: track, timestamp: (end_at + 30.minutes).to_i) }
it 'does not modify the track' do
expect { cleaner.cleanup }.not_to change { user.tracks.count }
expect(track.reload.start_at).to be_within(1.second).of(track.start_at)
expect(track.reload.end_at).to be_within(1.second).of(track.end_at)
end
end
context 'without start_at and end_at' do
let(:cleaner) { described_class.new(user) }
it 'does not perform any cleanup' do
create(:track, user: user)
expect { cleaner.cleanup }.not_to change { user.tracks.count }
end
end
end
end

View file

@ -1,357 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::CreateFromPoints do
let(:user) { create(:user) }
let(:service) { described_class.new(user) }
describe '#initialize' do
it 'sets user and thresholds from user settings' do
expect(service.user).to eq(user)
expect(service.distance_threshold_meters).to eq(user.safe_settings.meters_between_routes.to_i)
expect(service.time_threshold_minutes).to eq(user.safe_settings.minutes_between_routes.to_i)
end
it 'defaults to replace cleaning strategy' do
expect(service.cleaning_strategy).to eq(:replace)
end
context 'with custom user settings' do
before do
user.update!(settings: user.settings.merge({
'meters_between_routes' => 1000,
'minutes_between_routes' => 60
}))
end
it 'uses custom settings' do
service = described_class.new(user)
expect(service.distance_threshold_meters).to eq(1000)
expect(service.time_threshold_minutes).to eq(60)
end
end
context 'with custom cleaning strategy' do
it 'accepts daily cleaning strategy' do
service = described_class.new(user, cleaning_strategy: :daily)
expect(service.cleaning_strategy).to eq(:daily)
end
it 'accepts none cleaning strategy' do
service = described_class.new(user, cleaning_strategy: :none)
expect(service.cleaning_strategy).to eq(:none)
end
it 'accepts custom date range with cleaning strategy' do
start_time = 1.day.ago.beginning_of_day.to_i
end_time = 1.day.ago.end_of_day.to_i
service = described_class.new(user, start_at: start_time, end_at: end_time, cleaning_strategy: :daily)
expect(service.start_at).to eq(start_time)
expect(service.end_at).to eq(end_time)
expect(service.cleaning_strategy).to eq(:daily)
end
end
end
describe '#call' do
context 'with no points' do
it 'returns 0 tracks created' do
expect(service.call).to eq(0)
end
end
context 'with insufficient points' do
let!(:single_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
it 'returns 0 tracks created' do
expect(service.call).to eq(0)
end
end
context 'with points that form a single track' do
let(:base_time) { 1.hour.ago }
let!(:points) do
[
create(:point, user: user, timestamp: base_time.to_i,
lonlat: 'POINT(-74.0060 40.7128)', altitude: 10),
create(:point, user: user, timestamp: (base_time + 5.minutes).to_i,
lonlat: 'POINT(-74.0070 40.7130)', altitude: 15),
create(:point, user: user, timestamp: (base_time + 10.minutes).to_i,
lonlat: 'POINT(-74.0080 40.7132)', altitude: 20)
]
end
it 'creates one track' do
expect { service.call }.to change(Track, :count).by(1)
end
it 'returns 1 track created' do
expect(service.call).to eq(1)
end
it 'sets track attributes correctly' do
service.call
track = Track.last
expect(track.user).to eq(user)
expect(track.start_at).to be_within(1.second).of(base_time)
expect(track.end_at).to be_within(1.second).of(base_time + 10.minutes)
expect(track.duration).to eq(600) # 10 minutes in seconds
expect(track.original_path).to be_present
expect(track.distance).to be > 0
expect(track.avg_speed).to be > 0
expect(track.elevation_gain).to eq(10) # 20 - 10
expect(track.elevation_loss).to eq(0)
expect(track.elevation_max).to eq(20)
expect(track.elevation_min).to eq(10)
end
it 'associates points with the track' do
service.call
track = Track.last
expect(points.map(&:reload).map(&:track)).to all(eq(track))
end
end
context 'with points that should be split by time' do
let(:base_time) { 2.hours.ago }
let!(:points) do
[
# First track
create(:point, user: user, timestamp: base_time.to_i,
lonlat: 'POINT(-74.0060 40.7128)'),
create(:point, user: user, timestamp: (base_time + 5.minutes).to_i,
lonlat: 'POINT(-74.0070 40.7130)'),
# Gap > time threshold (default 30 minutes)
create(:point, user: user, timestamp: (base_time + 45.minutes).to_i,
lonlat: 'POINT(-74.0080 40.7132)'),
create(:point, user: user, timestamp: (base_time + 50.minutes).to_i,
lonlat: 'POINT(-74.0090 40.7134)')
]
end
it 'creates two tracks' do
expect { service.call }.to change(Track, :count).by(2)
end
it 'returns 2 tracks created' do
expect(service.call).to eq(2)
end
end
context 'with points that should be split by distance' do
let(:base_time) { 1.hour.ago }
let!(:points) do
[
# First track - close points
create(:point, user: user, timestamp: base_time.to_i,
lonlat: 'POINT(-74.0060 40.7128)'),
create(:point, user: user, timestamp: (base_time + 1.minute).to_i,
lonlat: 'POINT(-74.0061 40.7129)'),
# Far point (> distance threshold, but within time threshold)
create(:point, user: user, timestamp: (base_time + 2.minutes).to_i,
lonlat: 'POINT(-74.0500 40.7500)'), # ~5km away
create(:point, user: user, timestamp: (base_time + 3.minutes).to_i,
lonlat: 'POINT(-74.0501 40.7501)')
]
end
it 'creates two tracks' do
expect { service.call }.to change(Track, :count).by(2)
end
end
context 'with existing tracks' do
let!(:existing_track) { create(:track, user: user) }
let!(:points) do
[
create(:point, user: user, timestamp: 1.hour.ago.to_i,
lonlat: 'POINT(-74.0060 40.7128)'),
create(:point, user: user, timestamp: 50.minutes.ago.to_i,
lonlat: 'POINT(-74.0070 40.7130)')
]
end
it 'destroys existing tracks and creates new ones' do
expect { service.call }.to change(Track, :count).by(0) # -1 + 1
expect(Track.exists?(existing_track.id)).to be false
end
context 'with none cleaning strategy' do
let(:service) { described_class.new(user, cleaning_strategy: :none) }
it 'preserves existing tracks and creates new ones' do
expect { service.call }.to change(Track, :count).by(1) # +1, existing preserved
expect(Track.exists?(existing_track.id)).to be true
end
end
end
context 'with different cleaning strategies' do
let!(:points) do
[
create(:point, user: user, timestamp: 1.hour.ago.to_i,
lonlat: 'POINT(-74.0060 40.7128)'),
create(:point, user: user, timestamp: 50.minutes.ago.to_i,
lonlat: 'POINT(-74.0070 40.7130)')
]
end
it 'works with replace strategy (default)' do
service = described_class.new(user, cleaning_strategy: :replace)
expect { service.call }.to change(Track, :count).by(1)
end
it 'works with daily strategy' do
# Create points within the daily range we're testing
start_time = 1.day.ago.beginning_of_day.to_i
end_time = 1.day.ago.end_of_day.to_i
# Create test points within the daily range
create(:point, user: user, timestamp: start_time + 1.hour.to_i,
lonlat: 'POINT(-74.0060 40.7128)')
create(:point, user: user, timestamp: start_time + 2.hours.to_i,
lonlat: 'POINT(-74.0070 40.7130)')
# Create an existing track that overlaps with our time window
existing_track = create(:track, user: user,
start_at: Time.zone.at(start_time - 1.hour),
end_at: Time.zone.at(start_time + 30.minutes))
service = described_class.new(user, start_at: start_time, end_at: end_time, cleaning_strategy: :daily)
# Daily cleaning should handle existing tracks properly and create new ones
expect { service.call }.to change(Track, :count).by(0) # existing cleaned and new created
end
it 'works with none strategy' do
service = described_class.new(user, cleaning_strategy: :none)
expect { service.call }.to change(Track, :count).by(1)
end
end
context 'with mixed elevation data' do
let!(:points) do
[
create(:point, user: user, timestamp: 1.hour.ago.to_i,
lonlat: 'POINT(-74.0060 40.7128)', altitude: 100),
create(:point, user: user, timestamp: 50.minutes.ago.to_i,
lonlat: 'POINT(-74.0070 40.7130)', altitude: 150),
create(:point, user: user, timestamp: 40.minutes.ago.to_i,
lonlat: 'POINT(-74.0080 40.7132)', altitude: 120)
]
end
it 'calculates elevation correctly' do
service.call
track = Track.last
expect(track.elevation_gain).to eq(50) # 150 - 100
expect(track.elevation_loss).to eq(30) # 150 - 120
expect(track.elevation_max).to eq(150)
expect(track.elevation_min).to eq(100)
end
end
context 'with points missing altitude data' do
let!(:points) do
[
create(:point, user: user, timestamp: 1.hour.ago.to_i,
lonlat: 'POINT(-74.0060 40.7128)', altitude: nil),
create(:point, user: user, timestamp: 50.minutes.ago.to_i,
lonlat: 'POINT(-74.0070 40.7130)', altitude: nil)
]
end
it 'uses default elevation values' do
service.call
track = Track.last
expect(track.elevation_gain).to eq(0)
expect(track.elevation_loss).to eq(0)
expect(track.elevation_max).to eq(0)
expect(track.elevation_min).to eq(0)
end
end
end
describe 'private methods' do
describe '#should_start_new_track?' do
let(:point1) { build(:point, timestamp: 1.hour.ago.to_i, lonlat: 'POINT(-74.0060 40.7128)') }
let(:point2) { build(:point, timestamp: 50.minutes.ago.to_i, lonlat: 'POINT(-74.0070 40.7130)') }
it 'returns false when previous point is nil' do
result = service.send(:should_start_new_track?, point1, nil)
expect(result).to be false
end
it 'returns true when time threshold is exceeded' do
# Create a point > 30 minutes later (default threshold)
later_point = build(:point, timestamp: 29.minutes.ago.to_i, lonlat: 'POINT(-74.0070 40.7130)')
result = service.send(:should_start_new_track?, later_point, point1)
expect(result).to be true
end
it 'returns true when distance threshold is exceeded' do
# Create a point far away (> 500m default threshold)
far_point = build(:point, timestamp: 59.minutes.ago.to_i, lonlat: 'POINT(-74.0500 40.7500)')
result = service.send(:should_start_new_track?, far_point, point1)
expect(result).to be true
end
it 'returns false when both thresholds are not exceeded' do
result = service.send(:should_start_new_track?, point2, point1)
expect(result).to be false
end
end
describe '#calculate_distance_kilometers' do
let(:point1) { build(:point, lonlat: 'POINT(-74.0060 40.7128)') }
let(:point2) { build(:point, lonlat: 'POINT(-74.0070 40.7130)') }
it 'calculates distance between two points in kilometers' do
distance = service.send(:calculate_distance_kilometers, point1, point2)
expect(distance).to be > 0
expect(distance).to be < 0.2 # Should be small distance for close points (in km)
end
end
describe '#calculate_average_speed' do
it 'calculates speed correctly' do
# 1000 meters in 100 seconds = 10 m/s = 36 km/h
speed = service.send(:calculate_average_speed, 1000, 100)
expect(speed).to eq(36.0)
end
it 'returns 0 for zero duration' do
speed = service.send(:calculate_average_speed, 1000, 0)
expect(speed).to eq(0.0)
end
it 'returns 0 for zero distance' do
speed = service.send(:calculate_average_speed, 0, 100)
expect(speed).to eq(0.0)
end
end
describe '#calculate_track_distance' do
let(:points) do
[
build(:point, lonlat: 'POINT(-74.0060 40.7128)'),
build(:point, lonlat: 'POINT(-74.0070 40.7130)')
]
end
it 'stores distance in meters by default' do
distance = service.send(:calculate_track_distance, points)
expect(distance).to eq(87)
end
end
end
end

View file

@ -4,253 +4,220 @@ require 'rails_helper'
RSpec.describe Tracks::Generator do
let(:user) { create(:user) }
let(:point_loader) { double('PointLoader') }
let(:incomplete_segment_handler) { double('IncompleteSegmentHandler') }
let(:track_cleaner) { double('Cleaner') }
let(:generator) do
described_class.new(
user,
point_loader: point_loader,
incomplete_segment_handler: incomplete_segment_handler,
track_cleaner: track_cleaner
)
end
let(:safe_settings) { user.safe_settings }
before do
allow_any_instance_of(Users::SafeSettings).to receive(:meters_between_routes).and_return(500)
allow_any_instance_of(Users::SafeSettings).to receive(:minutes_between_routes).and_return(60)
allow_any_instance_of(Users::SafeSettings).to receive(:distance_unit).and_return('km')
allow(user).to receive(:safe_settings).and_return(safe_settings)
end
describe '#call' do
context 'with no points to process' do
before do
allow(track_cleaner).to receive(:cleanup)
allow(point_loader).to receive(:load_points).and_return([])
context 'with bulk mode' do
let(:generator) { described_class.new(user, mode: :bulk) }
context 'with sufficient points' do
let!(:points) { create_points_around(user: user, count: 5, base_lat: 20.0) }
it 'generates tracks from all points' do
expect { generator.call }.to change(Track, :count).by(1)
end
it 'cleans existing tracks' do
existing_track = create(:track, user: user)
generator.call
expect(Track.exists?(existing_track.id)).to be false
end
it 'associates points with created tracks' do
generator.call
expect(points.map(&:reload).map(&:track)).to all(be_present)
end
end
it 'returns 0 tracks created' do
result = generator.call
expect(result).to eq(0)
context 'with insufficient points' do
let!(:points) { create_points_around(user: user, count: 1, base_lat: 20.0) }
it 'does not create tracks' do
expect { generator.call }.not_to change(Track, :count)
end
end
it 'does not call incomplete segment handler' do
expect(incomplete_segment_handler).not_to receive(:should_finalize_segment?)
expect(incomplete_segment_handler).not_to receive(:handle_incomplete_segment)
expect(incomplete_segment_handler).not_to receive(:cleanup_processed_data)
context 'with time range' do
let!(:old_points) { create_points_around(user: user, count: 3, base_lat: 20.0, timestamp: 2.days.ago.to_i) }
let!(:new_points) { create_points_around(user: user, count: 3, base_lat: 21.0, timestamp: 1.day.ago.to_i) }
generator.call
it 'only processes points within range' do
generator = described_class.new(
user,
start_at: 1.day.ago.beginning_of_day,
end_at: 1.day.ago.end_of_day,
mode: :bulk
)
generator.call
track = Track.last
expect(track.points.count).to eq(3)
end
end
end
context 'with points that create tracks' do
let!(:points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 1.hour.ago.to_i, latitude: 40.7128, longitude: -74.0060),
create(:point, user: user, lonlat: 'POINT(-74.0050 40.7138)', timestamp: 30.minutes.ago.to_i, latitude: 40.7138, longitude: -74.0050),
create(:point, user: user, lonlat: 'POINT(-74.0040 40.7148)', timestamp: 10.minutes.ago.to_i, latitude: 40.7148, longitude: -74.0040)
]
end
context 'with incremental mode' do
let(:generator) { described_class.new(user, mode: :incremental) }
before do
allow(track_cleaner).to receive(:cleanup)
allow(point_loader).to receive(:load_points).and_return(points)
allow(incomplete_segment_handler).to receive(:should_finalize_segment?).and_return(true)
allow(incomplete_segment_handler).to receive(:cleanup_processed_data)
context 'with untracked points' do
let!(:points) { create_points_around(user: user, count: 3, base_lat: 22.0, track_id: nil) }
it 'processes untracked points' do
expect { generator.call }.to change(Track, :count).by(1)
end
it 'associates points with created tracks' do
generator.call
expect(points.map(&:reload).map(&:track)).to all(be_present)
end
end
it 'creates tracks from segments' do
expect { generator.call }.to change { Track.count }.by(1)
context 'with end_at specified' do
let!(:early_points) { create_points_around(user: user, count: 2, base_lat: 23.0, timestamp: 2.hours.ago.to_i) }
let!(:late_points) { create_points_around(user: user, count: 2, base_lat: 24.0, timestamp: 1.hour.ago.to_i) }
it 'only processes points up to end_at' do
generator = described_class.new(user, end_at: 1.5.hours.ago, mode: :incremental)
generator.call
expect(Track.count).to eq(1)
expect(Track.first.points.count).to eq(2)
end
end
it 'returns the number of tracks created' do
result = generator.call
expect(result).to eq(1)
end
context 'without existing tracks' do
let!(:points) { create_points_around(user: user, count: 3, base_lat: 25.0) }
it 'calls cleanup on processed data' do
expect(incomplete_segment_handler).to receive(:cleanup_processed_data)
generator.call
it 'does not clean existing tracks' do
existing_track = create(:track, user: user)
generator.call
expect(Track.exists?(existing_track.id)).to be true
end
end
it 'assigns points to the created track' do
generator.call
points.each(&:reload)
track_ids = points.map(&:track_id).uniq.compact
expect(track_ids.size).to eq(1)
end
end
context 'with incomplete segments' do
let!(:points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 5.minutes.ago.to_i, latitude: 40.7128, longitude: -74.0060),
create(:point, user: user, lonlat: 'POINT(-74.0050 40.7138)', timestamp: 4.minutes.ago.to_i, latitude: 40.7138, longitude: -74.0050)
]
end
context 'with daily mode' do
let(:today) { Date.current }
let(:generator) { described_class.new(user, start_at: today, mode: :daily) }
before do
allow(track_cleaner).to receive(:cleanup)
allow(point_loader).to receive(:load_points).and_return(points)
allow(incomplete_segment_handler).to receive(:should_finalize_segment?).and_return(false)
allow(incomplete_segment_handler).to receive(:handle_incomplete_segment)
allow(incomplete_segment_handler).to receive(:cleanup_processed_data)
let!(:today_points) { create_points_around(user: user, count: 3, base_lat: 26.0, timestamp: today.beginning_of_day.to_i) }
let!(:yesterday_points) { create_points_around(user: user, count: 3, base_lat: 27.0, timestamp: 1.day.ago.to_i) }
it 'only processes points from specified day' do
generator.call
track = Track.last
expect(track.points.count).to eq(3)
end
it 'cleans existing tracks for the day' do
existing_track = create(:track, user: user, start_at: today.beginning_of_day)
generator.call
expect(Track.exists?(existing_track.id)).to be false
end
end
context 'with empty points' do
let(:generator) { described_class.new(user, mode: :bulk) }
it 'does not create tracks' do
expect { generator.call }.not_to change { Track.count }
end
it 'handles incomplete segments' do
expect(incomplete_segment_handler).to receive(:handle_incomplete_segment).with(points)
generator.call
end
it 'returns 0 tracks created' do
result = generator.call
expect(result).to eq(0)
expect { generator.call }.not_to change(Track, :count)
end
end
context 'with mixed complete and incomplete segments' do
let!(:old_points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 2.hours.ago.to_i, latitude: 40.7128, longitude: -74.0060),
create(:point, user: user, lonlat: 'POINT(-74.0050 40.7138)', timestamp: 1.hour.ago.to_i, latitude: 40.7138, longitude: -74.0050)
]
end
let!(:recent_points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0040 40.7148)', timestamp: 3.minutes.ago.to_i, latitude: 40.7148, longitude: -74.0040),
create(:point, user: user, lonlat: 'POINT(-74.0030 40.7158)', timestamp: 2.minutes.ago.to_i, latitude: 40.7158, longitude: -74.0030)
]
end
before do
allow(track_cleaner).to receive(:cleanup)
allow(point_loader).to receive(:load_points).and_return(old_points + recent_points)
# First segment (old points) should be finalized
# Second segment (recent points) should be incomplete
call_count = 0
allow(incomplete_segment_handler).to receive(:should_finalize_segment?) do |segment_points|
call_count += 1
call_count == 1 # Only finalize first segment
end
allow(incomplete_segment_handler).to receive(:handle_incomplete_segment)
allow(incomplete_segment_handler).to receive(:cleanup_processed_data)
end
it 'creates tracks for complete segments only' do
expect { generator.call }.to change { Track.count }.by(1)
end
it 'handles incomplete segments' do
# Note: The exact behavior depends on segmentation logic
# The important thing is that the method can be called without errors
generator.call
# Test passes if no exceptions are raised
expect(true).to be_truthy
end
it 'returns the correct number of tracks created' do
result = generator.call
expect(result).to eq(1)
end
end
context 'with insufficient points for track creation' do
let!(:single_point) do
[create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 1.hour.ago.to_i, latitude: 40.7128, longitude: -74.0060)]
end
context 'with threshold configuration' do
let(:generator) { described_class.new(user, mode: :bulk) }
before do
allow(track_cleaner).to receive(:cleanup)
allow(point_loader).to receive(:load_points).and_return(single_point)
allow(incomplete_segment_handler).to receive(:should_finalize_segment?).and_return(true)
allow(incomplete_segment_handler).to receive(:cleanup_processed_data)
allow(safe_settings).to receive(:meters_between_routes).and_return(1000)
allow(safe_settings).to receive(:minutes_between_routes).and_return(90)
end
it 'does not create tracks with less than 2 points' do
expect { generator.call }.not_to change { Track.count }
end
it 'returns 0 tracks created' do
result = generator.call
expect(result).to eq(0)
it 'uses configured thresholds' do
expect(generator.send(:distance_threshold_meters)).to eq(1000)
expect(generator.send(:time_threshold_minutes)).to eq(90)
end
end
context 'error handling' do
before do
allow(track_cleaner).to receive(:cleanup)
allow(point_loader).to receive(:load_points).and_raise(StandardError, 'Point loading failed')
end
it 'propagates errors from point loading' do
expect { generator.call }.to raise_error(StandardError, 'Point loading failed')
context 'with invalid mode' do
it 'raises argument error' do
expect do
described_class.new(user, mode: :invalid).call
end.to raise_error(ArgumentError, /Unknown mode/)
end
end
end
describe 'strategy pattern integration' do
context 'with bulk processing strategies' do
let(:bulk_loader) { Tracks::PointLoaders::BulkLoader.new(user) }
let(:ignore_handler) { Tracks::IncompleteSegmentHandlers::IgnoreHandler.new(user) }
let(:replace_cleaner) { Tracks::Cleaners::ReplaceCleaner.new(user) }
describe 'segmentation behavior' do
let(:generator) { described_class.new(user, mode: :bulk) }
let(:bulk_generator) do
described_class.new(
user,
point_loader: bulk_loader,
incomplete_segment_handler: ignore_handler,
track_cleaner: replace_cleaner
)
context 'with points exceeding time threshold' do
let!(:points) do
[
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: 90.minutes.ago.to_i),
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: 60.minutes.ago.to_i),
# Gap exceeds threshold 👇👇👇
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: 10.minutes.ago.to_i),
create_points_around(user: user, count: 1, base_lat: 29.0, timestamp: Time.current.to_i)
]
end
let!(:existing_track) { create(:track, user: user) }
let!(:points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 1.hour.ago.to_i, latitude: 40.7128, longitude: -74.0060),
create(:point, user: user, lonlat: 'POINT(-74.0050 40.7138)', timestamp: 30.minutes.ago.to_i, latitude: 40.7138, longitude: -74.0050)
]
end
it 'behaves like bulk processing' do
initial_count = Track.count
bulk_generator.call
# Bulk processing replaces existing tracks with new ones
# The final count depends on how many valid tracks can be created from the points
expect(Track.count).to be >= 0
end
end
context 'with incremental processing strategies' do
let(:incremental_loader) { Tracks::PointLoaders::IncrementalLoader.new(user) }
let(:buffer_handler) { Tracks::IncompleteSegmentHandlers::BufferHandler.new(user, Date.current, 5) }
let(:noop_cleaner) { Tracks::Cleaners::NoOpCleaner.new(user) }
let(:incremental_generator) do
described_class.new(
user,
point_loader: incremental_loader,
incomplete_segment_handler: buffer_handler,
track_cleaner: noop_cleaner
)
end
let!(:existing_track) { create(:track, user: user) }
before do
# Mock the incremental loader to return some points
allow(incremental_loader).to receive(:load_points).and_return([])
allow(safe_settings).to receive(:minutes_between_routes).and_return(45)
end
it 'behaves like incremental processing' do
expect { incremental_generator.call }.not_to change { Track.count }
it 'creates separate tracks for segments' do
expect { generator.call }.to change(Track, :count).by(2)
end
end
context 'with points exceeding distance threshold' do
let!(:points) do
[
create_points_around(user: user, count: 2, base_lat: 29.0, timestamp: 20.minutes.ago.to_i),
create_points_around(user: user, count: 2, base_lat: 29.0, timestamp: 15.minutes.ago.to_i),
# Large distance jump 👇👇👇
create_points_around(user: user, count: 2, base_lat: 28.0, timestamp: 10.minutes.ago.to_i),
create_points_around(user: user, count: 1, base_lat: 28.0, timestamp: Time.current.to_i)
]
end
before do
allow(safe_settings).to receive(:meters_between_routes).and_return(200)
end
it 'creates separate tracks for segments' do
expect { generator.call }.to change(Track, :count).by(2)
end
end
end
describe 'deterministic behavior' do
let!(:points) { create_points_around(user: user, count: 10, base_lat: 28.0) }
it 'produces same results for bulk and incremental modes' do
# Generate tracks in bulk mode
bulk_generator = described_class.new(user, mode: :bulk)
bulk_generator.call
bulk_tracks = user.tracks.order(:start_at).to_a
# Clear tracks and generate incrementally
user.tracks.destroy_all
incremental_generator = described_class.new(user, mode: :incremental)
incremental_generator.call
incremental_tracks = user.tracks.order(:start_at).to_a
# Should have same number of tracks
expect(incremental_tracks.size).to eq(bulk_tracks.size)
# Should have same track boundaries (allowing for small timing differences)
bulk_tracks.zip(incremental_tracks).each do |bulk_track, incremental_track|
expect(incremental_track.start_at).to be_within(1.second).of(bulk_track.start_at)
expect(incremental_track.end_at).to be_within(1.second).of(bulk_track.end_at)
expect(incremental_track.distance).to be_within(10).of(bulk_track.distance)
end
end
end

View file

@ -0,0 +1,249 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::IncrementalProcessor do
let(:user) { create(:user) }
let(:safe_settings) { user.safe_settings }
before do
allow(user).to receive(:safe_settings).and_return(safe_settings)
allow(safe_settings).to receive(:minutes_between_routes).and_return(30)
allow(safe_settings).to receive(:meters_between_routes).and_return(500)
end
describe '#call' do
context 'with imported points' do
let(:imported_point) { create(:point, user: user, import: create(:import)) }
let(:processor) { described_class.new(user, imported_point) }
it 'does not process imported points' do
expect(Tracks::CreateJob).not_to receive(:perform_later)
processor.call
end
end
context 'with first point for user' do
let(:new_point) { create(:point, user: user) }
let(:processor) { described_class.new(user, new_point) }
it 'processes first point' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: nil, mode: :none)
processor.call
end
end
context 'with thresholds exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:processor) { described_class.new(user, new_point) }
before do
# Create previous point first
previous_point
end
it 'processes when time threshold exceeded' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: Time.at(previous_point.timestamp), mode: :none)
processor.call
end
end
context 'with existing tracks' do
let(:existing_track) { create(:track, user: user, end_at: 2.hours.ago) }
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:processor) { described_class.new(user, new_point) }
before do
existing_track
previous_point
end
it 'uses existing track end time as start_at' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: existing_track.end_at, end_at: Time.at(previous_point.timestamp), mode: :none)
processor.call
end
end
context 'with distance threshold exceeded' do
let(:previous_point) do
create(:point, user: user, timestamp: 10.minutes.ago.to_i, lonlat: 'POINT(0 0)')
end
let(:new_point) do
create(:point, user: user, timestamp: Time.current.to_i, lonlat: 'POINT(1 1)')
end
let(:processor) { described_class.new(user, new_point) }
before do
# Create previous point first
previous_point
# Mock distance calculation to exceed threshold
allow_any_instance_of(Point).to receive(:distance_to).and_return(1.0) # 1 km = 1000m
end
it 'processes when distance threshold exceeded' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: Time.at(previous_point.timestamp), mode: :none)
processor.call
end
end
context 'with thresholds not exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 10.minutes.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:processor) { described_class.new(user, new_point) }
before do
# Create previous point first
previous_point
# Mock distance to be within threshold
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.1) # 100m
end
it 'does not process when thresholds not exceeded' do
expect(Tracks::CreateJob).not_to receive(:perform_later)
processor.call
end
end
end
describe '#should_process?' do
let(:processor) { described_class.new(user, new_point) }
context 'with imported point' do
let(:new_point) { create(:point, user: user, import: create(:import)) }
it 'returns false' do
expect(processor.send(:should_process?)).to be false
end
end
context 'with first point for user' do
let(:new_point) { create(:point, user: user) }
it 'returns true' do
expect(processor.send(:should_process?)).to be true
end
end
context 'with thresholds exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
before do
previous_point # Create previous point
end
it 'returns true when time threshold exceeded' do
expect(processor.send(:should_process?)).to be true
end
end
context 'with thresholds not exceeded' do
let(:previous_point) { create(:point, user: user, timestamp: 10.minutes.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
before do
previous_point # Create previous point
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.1) # 100m
end
it 'returns false when thresholds not exceeded' do
expect(processor.send(:should_process?)).to be false
end
end
end
describe '#exceeds_thresholds?' do
let(:processor) { described_class.new(user, new_point) }
let(:previous_point) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:new_point) { create(:point, user: user, timestamp: Time.current.to_i) }
context 'with time threshold exceeded' do
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(30)
end
it 'returns true' do
result = processor.send(:exceeds_thresholds?, previous_point, new_point)
expect(result).to be true
end
end
context 'with distance threshold exceeded' do
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(120) # 2 hours
allow(safe_settings).to receive(:meters_between_routes).and_return(400)
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.5) # 500m
end
it 'returns true' do
result = processor.send(:exceeds_thresholds?, previous_point, new_point)
expect(result).to be true
end
end
context 'with neither threshold exceeded' do
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(120) # 2 hours
allow(safe_settings).to receive(:meters_between_routes).and_return(600)
allow_any_instance_of(Point).to receive(:distance_to).and_return(0.1) # 100m
end
it 'returns false' do
result = processor.send(:exceeds_thresholds?, previous_point, new_point)
expect(result).to be false
end
end
end
describe '#time_difference_minutes' do
let(:processor) { described_class.new(user, new_point) }
let(:point1) { create(:point, user: user, timestamp: 1.hour.ago.to_i) }
let(:point2) { create(:point, user: user, timestamp: Time.current.to_i) }
let(:new_point) { point2 }
it 'calculates time difference in minutes' do
result = processor.send(:time_difference_minutes, point1, point2)
expect(result).to be_within(1).of(60) # Approximately 60 minutes
end
end
describe '#distance_difference_meters' do
let(:processor) { described_class.new(user, new_point) }
let(:point1) { create(:point, user: user) }
let(:point2) { create(:point, user: user) }
let(:new_point) { point2 }
before do
allow(point1).to receive(:distance_to).with(point2).and_return(1.5) # 1.5 km
end
it 'calculates distance difference in meters' do
result = processor.send(:distance_difference_meters, point1, point2)
expect(result).to eq(1500) # 1.5 km = 1500 m
end
end
describe 'threshold configuration' do
let(:processor) { described_class.new(user, create(:point, user: user)) }
before do
allow(safe_settings).to receive(:minutes_between_routes).and_return(45)
allow(safe_settings).to receive(:meters_between_routes).and_return(750)
end
it 'uses configured time threshold' do
expect(processor.send(:time_threshold_minutes)).to eq(45)
end
it 'uses configured distance threshold' do
expect(processor.send(:distance_threshold_meters)).to eq(750)
end
end
end

View file

@ -1,238 +0,0 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Tracks::RedisBuffer do
let(:user_id) { 123 }
let(:day) { Date.current }
let(:buffer) { described_class.new(user_id, day) }
describe '#initialize' do
it 'stores user_id and converts day to Date' do
expect(buffer.user_id).to eq(user_id)
expect(buffer.day).to eq(day)
expect(buffer.day).to be_a(Date)
end
it 'handles string date input' do
buffer = described_class.new(user_id, '2024-01-15')
expect(buffer.day).to eq(Date.parse('2024-01-15'))
end
it 'handles Time input' do
time = Time.current
buffer = described_class.new(user_id, time)
expect(buffer.day).to eq(time.to_date)
end
end
describe '#store' do
let(:user) { create(:user) }
let!(:points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 1.hour.ago.to_i),
create(:point, user: user, lonlat: 'POINT(-74.0070 40.7130)', timestamp: 30.minutes.ago.to_i)
]
end
it 'stores points in Redis cache' do
expect(Rails.cache).to receive(:write).with(
"track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}",
anything,
expires_in: 7.days
)
buffer.store(points)
end
it 'serializes points correctly' do
buffer.store(points)
stored_data = Rails.cache.read("track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}")
expect(stored_data).to be_an(Array)
expect(stored_data.size).to eq(2)
first_point = stored_data.first
expect(first_point[:id]).to eq(points.first.id)
expect(first_point[:timestamp]).to eq(points.first.timestamp)
expect(first_point[:lat]).to eq(points.first.lat)
expect(first_point[:lon]).to eq(points.first.lon)
expect(first_point[:user_id]).to eq(points.first.user_id)
end
it 'does nothing when given empty array' do
expect(Rails.cache).not_to receive(:write)
buffer.store([])
end
it 'logs debug message when storing points' do
expect(Rails.logger).to receive(:debug).with(
"Stored 2 points in buffer for user #{user_id}, day #{day}"
)
buffer.store(points)
end
end
describe '#retrieve' do
context 'when buffer exists' do
let(:stored_data) do
[
{
id: 1,
lonlat: 'POINT(-74.0060 40.7128)',
timestamp: 1.hour.ago.to_i,
lat: 40.7128,
lon: -74.0060,
altitude: 100,
velocity: 5.0,
battery: 80,
user_id: user_id
},
{
id: 2,
lonlat: 'POINT(-74.0070 40.7130)',
timestamp: 30.minutes.ago.to_i,
lat: 40.7130,
lon: -74.0070,
altitude: 105,
velocity: 6.0,
battery: 75,
user_id: user_id
}
]
end
before do
Rails.cache.write(
"track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}",
stored_data
)
end
it 'returns the stored point data' do
result = buffer.retrieve
expect(result).to eq(stored_data)
expect(result.size).to eq(2)
end
end
context 'when buffer does not exist' do
it 'returns empty array' do
result = buffer.retrieve
expect(result).to eq([])
end
end
context 'when Redis read fails' do
before do
allow(Rails.cache).to receive(:read).and_raise(StandardError.new('Redis error'))
end
it 'returns empty array and logs error' do
expect(Rails.logger).to receive(:error).with(
"Failed to retrieve buffered points for user #{user_id}, day #{day}: Redis error"
)
result = buffer.retrieve
expect(result).to eq([])
end
end
end
describe '#clear' do
before do
Rails.cache.write(
"track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}",
[{ id: 1, timestamp: 1.hour.ago.to_i }]
)
end
it 'deletes the buffer from cache' do
buffer.clear
expect(Rails.cache.read("track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}")).to be_nil
end
it 'logs debug message' do
expect(Rails.logger).to receive(:debug).with(
"Cleared buffer for user #{user_id}, day #{day}"
)
buffer.clear
end
end
describe '#exists?' do
context 'when buffer exists' do
before do
Rails.cache.write(
"track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}",
[{ id: 1 }]
)
end
it 'returns true' do
expect(buffer.exists?).to be true
end
end
context 'when buffer does not exist' do
it 'returns false' do
expect(buffer.exists?).to be false
end
end
end
describe 'buffer key generation' do
it 'generates correct Redis key format' do
expected_key = "track_buffer:#{user_id}:#{day.strftime('%Y-%m-%d')}"
# Access private method for testing
actual_key = buffer.send(:buffer_key)
expect(actual_key).to eq(expected_key)
end
it 'handles different date formats consistently' do
date_as_string = '2024-03-15'
date_as_date = Date.parse(date_as_string)
buffer1 = described_class.new(user_id, date_as_string)
buffer2 = described_class.new(user_id, date_as_date)
expect(buffer1.send(:buffer_key)).to eq(buffer2.send(:buffer_key))
end
end
describe 'integration test' do
let(:user) { create(:user) }
let!(:points) do
[
create(:point, user: user, lonlat: 'POINT(-74.0060 40.7128)', timestamp: 2.hours.ago.to_i),
create(:point, user: user, lonlat: 'POINT(-74.0070 40.7130)', timestamp: 1.hour.ago.to_i)
]
end
it 'stores and retrieves points correctly' do
# Store points
buffer.store(points)
expect(buffer.exists?).to be true
# Retrieve points
retrieved_points = buffer.retrieve
expect(retrieved_points.size).to eq(2)
# Verify data integrity
expect(retrieved_points.first[:id]).to eq(points.first.id)
expect(retrieved_points.last[:id]).to eq(points.last.id)
# Clear buffer
buffer.clear
expect(buffer.exists?).to be false
expect(buffer.retrieve).to eq([])
end
end
end

View file

@ -116,11 +116,11 @@ RSpec.describe Tracks::TrackBuilder do
it 'builds path using Tracks::BuildPath service' do
expect(Tracks::BuildPath).to receive(:new).with(
points.map(&:lonlat)
points
).and_call_original
result = builder.build_path(points)
expect(result).to respond_to(:as_text) # RGeo geometry object
expect(result).to respond_to(:as_text)
end
end

View file

@ -0,0 +1,20 @@
# frozen_string_literal: true
module PointHelpers
# Creates a list of points spaced ~100m apart northwards
def create_points_around(user:, count:, base_lat: 20.0, base_lon: 10.0, timestamp: nil, **attrs)
Array.new(count) do |i|
create(
:point,
user: user,
timestamp: (timestamp.respond_to?(:call) ? timestamp.call(i) : timestamp) || (Time.current - i.minutes).to_i,
lonlat: "POINT(#{base_lon} #{base_lat + i * 0.0009})",
**attrs
)
end
end
end
RSpec.configure do |config|
config.include PointHelpers
end