Compare commits

...

7 commits

Author SHA1 Message Date
Eugene Burmakin
9d616c7957 Remove logging from tracks generator 2025-07-17 21:02:45 +02:00
Eugene Burmakin
7cdb7d2f21 Add some more tests to make sure points are properly cleaned up 2025-07-17 20:57:55 +02:00
Eugene Burmakin
dc8460a948 Fix tracks create job spec 2025-07-17 20:46:07 +02:00
Eugene Burmakin
91f4cf7c7a Fix range objects in generator 2025-07-17 20:36:21 +02:00
Eugene Burmakin
f5ef2ab9ef Fix potential issue with time range data types 2025-07-17 20:20:14 +02:00
Eugene Burmakin
1f5325d9bb Remove doc file 2025-07-17 19:22:50 +02:00
Eugene Burmakin
10777714b1 Clean up a bit 2025-07-17 19:19:50 +02:00
10 changed files with 133 additions and 536 deletions

View file

@ -1,11 +1,6 @@
# frozen_string_literal: true
# Lightweight cleanup job that runs weekly to catch any missed track generation.
# This replaces the daily bulk creation job with a more targeted approach.
#
# Instead of processing all users daily, this job only processes users who have
# untracked points that are older than a threshold (e.g., 1 day), indicating
# they may have been missed by incremental processing.
#
# This provides a safety net while avoiding the overhead of daily bulk processing.
class Tracks::CleanupJob < ApplicationJob

View file

@ -5,26 +5,21 @@ class Tracks::CreateJob < ApplicationJob
def perform(user_id, start_at: nil, end_at: nil, mode: :daily)
user = User.find(user_id)
# Translate mode parameter to Generator mode
generator_mode = case mode
when :daily then :daily
when :none then :incremental
else :bulk
end
# Count tracks before generation
tracks_before = user.tracks.count
Tracks::Generator.new(
# Generate tracks and get the count of tracks created
tracks_created = Tracks::Generator.new(
user,
start_at: start_at,
end_at: end_at,
mode: generator_mode
).call
# Calculate tracks created
tracks_created = user.tracks.count - tracks_before
create_success_notification(user, tracks_created)
rescue StandardError => e

View file

@ -1,7 +1,5 @@
# frozen_string_literal: true
# Simplified track generation service that replaces the complex strategy pattern.
#
# This service handles both bulk and incremental track generation using a unified
# approach with different modes:
#
@ -9,10 +7,6 @@
# - :incremental - Processes untracked points up to a specified end time
# - :daily - Processes tracks on a daily basis
#
# The service maintains the same core logic as the original system but simplifies
# the architecture by removing the multiple strategy classes in favor of
# mode-based configuration.
#
# Key features:
# - Deterministic results (same algorithm for all modes)
# - Simple incremental processing without buffering complexity
@ -48,23 +42,26 @@ class Tracks::Generator
points = load_points
Rails.logger.debug "Generator: loaded #{points.size} points for user #{user.id} in #{mode} mode"
return if points.empty?
return 0 if points.empty?
segments = split_points_into_segments(points)
Rails.logger.debug "Generator: created #{segments.size} segments"
segments.each { |segment| create_track_from_segment(segment) }
tracks_created = 0
segments.each do |segment|
track = create_track_from_segment(segment)
tracks_created += 1 if track
end
Rails.logger.info "Generated #{segments.size} tracks for user #{user.id} in #{mode} mode"
Rails.logger.info "Generated #{tracks_created} tracks for user #{user.id} in #{mode} mode"
tracks_created
end
private
def should_clean_tracks?
case mode
when :bulk then true
when :daily then true
when :incremental then false
when :bulk, :daily then true
else false
end
end
@ -81,7 +78,8 @@ class Tracks::Generator
def load_bulk_points
scope = user.tracked_points.order(:timestamp)
scope = scope.where(timestamp: time_range) if time_range_defined?
scope = scope.where(timestamp: timestamp_range) if time_range_defined?
scope
end
@ -90,11 +88,13 @@ class Tracks::Generator
# If end_at is specified, only process points up to that time
scope = user.tracked_points.where(track_id: nil).order(:timestamp)
scope = scope.where(timestamp: ..end_at.to_i) if end_at.present?
scope
end
def load_daily_points
day_range = daily_time_range
user.tracked_points.where(timestamp: day_range).order(:timestamp)
end
@ -114,7 +114,31 @@ class Tracks::Generator
def time_range
return nil unless time_range_defined?
Time.at(start_at&.to_i)..Time.at(end_at&.to_i)
start_time = start_at&.to_i
end_time = end_at&.to_i
if start_time && end_time
Time.zone.at(start_time)..Time.zone.at(end_time)
elsif start_time
Time.zone.at(start_time)..
elsif end_time
..Time.zone.at(end_time)
end
end
def timestamp_range
return nil unless time_range_defined?
start_time = start_at&.to_i
end_time = end_at&.to_i
if start_time && end_time
start_time..end_time
elsif start_time
start_time..
elsif end_time
..end_time
end
end
def daily_time_range
@ -128,10 +152,10 @@ class Tracks::Generator
def clean_existing_tracks
case mode
when :bulk
clean_bulk_tracks
when :daily
clean_daily_tracks
when :bulk then clean_bulk_tracks
when :daily then clean_daily_tracks
else
raise ArgumentError, "Unknown mode: #{mode}"
end
end
@ -139,16 +163,15 @@ class Tracks::Generator
scope = user.tracks
scope = scope.where(start_at: time_range) if time_range_defined?
deleted_count = scope.delete_all
Rails.logger.info "Deleted #{deleted_count} existing tracks for user #{user.id}"
scope.destroy_all
end
def clean_daily_tracks
day_range_times = daily_time_range.map { |timestamp| Time.at(timestamp) }
range = Range.new(day_range_times.first, day_range_times.last)
day_range = daily_time_range
range = Time.zone.at(day_range.begin)..Time.zone.at(day_range.end)
deleted_count = user.tracks.where(start_at: range).delete_all
Rails.logger.info "Deleted #{deleted_count} daily tracks for user #{user.id}"
scope = user.tracks.where(start_at: range)
scope.destroy_all
end
# Threshold methods from safe_settings

View file

@ -66,7 +66,7 @@ class Tracks::IncrementalProcessor
end
def find_end_time
previous_point ? Time.at(previous_point.timestamp) : nil
previous_point ? Time.zone.at(previous_point.timestamp) : nil
end
def exceeds_thresholds?(previous_point, current_point)

View file

@ -15,12 +15,11 @@ class CreateTracksFromPoints < ActiveRecord::Migration[8.0]
# Use explicit parameters for bulk historical processing:
# - No time limits (start_at: nil, end_at: nil) = process ALL historical data
# - Replace strategy = clean slate, removes any existing tracks first
Tracks::CreateJob.perform_later(
user.id,
start_at: nil,
end_at: nil,
mode: :daily
mode: :bulk
)
processed_users += 1

View file

@ -1,480 +0,0 @@
# Dawarich Tracks Feature Overview
## Table of Contents
- [Introduction](#introduction)
- [Architecture Overview](#architecture-overview)
- [Core Components](#core-components)
- [Data Flow](#data-flow)
- [Configuration](#configuration)
- [Usage Examples](#usage-examples)
- [API Reference](#api-reference)
- [Development Guidelines](#development-guidelines)
## Introduction
The Dawarich Tracks feature automatically converts raw GPS points into meaningful movement tracks. It analyzes sequences of location points to identify distinct journeys, providing users with structured visualizations of their movement patterns.
### Key Features
- **Automatic Track Generation**: Converts GPS points into coherent movement tracks
- **Real-time Processing**: Incremental track generation as new points arrive
- **Configurable Thresholds**: User-customizable time and distance parameters
- **Multiple Generation Modes**: Bulk, incremental, and daily processing
- **Rich Statistics**: Distance, speed, elevation, and duration metrics
- **Live Updates**: Real-time track updates via WebSocket connections
## Architecture Overview
```mermaid
graph TB
A[GPS Points] --> B[Incremental Processor]
B --> C[Threshold Check]
C --> D{Exceeds Thresholds?}
D -->|Yes| E[Tracks Generator]
D -->|No| F[Skip Processing]
E --> G[Segmentation Engine]
G --> H[Track Builder]
H --> I[Database]
I --> J[Real-time Broadcasting]
J --> K[Frontend Updates]
```
## Core Components
### 1. Models
#### Track Model
```ruby
# app/models/track.rb
class Track < ApplicationRecord
belongs_to :user
has_many :points, dependent: :nullify
# Attributes
# start_at, end_at (DateTime)
# distance (Integer, meters)
# avg_speed (Float, km/h)
# duration (Integer, seconds)
# elevation_gain/loss/max/min (Integer, meters)
# original_path (PostGIS LineString)
end
```
#### Point Model
```ruby
# app/models/point.rb
class Point < ApplicationRecord
belongs_to :track, optional: true
belongs_to :user
# Triggers incremental track generation via background job
after_create_commit :trigger_incremental_track_generation
private
def trigger_incremental_track_generation
Tracks::IncrementalCheckJob.perform_later(user.id, id)
end
end
```
### 2. Services
#### Tracks::Generator
**Purpose**: Unified track generation service with multiple modes
```ruby
# Usage
Tracks::Generator.new(user, mode: :bulk).call
Tracks::Generator.new(user, mode: :incremental, end_at: Time.current).call
Tracks::Generator.new(user, mode: :daily, start_at: Date.current).call
```
**Modes**:
- `:bulk` - Regenerates all tracks from scratch (replaces existing)
- `:incremental` - Processes only untracked points up to specified time
- `:daily` - Processes tracks on daily basis with cleanup
#### Tracks::IncrementalProcessor
**Purpose**: Analyzes new points and triggers track generation when thresholds are exceeded
```ruby
# Automatically called when new points are created
Tracks::IncrementalProcessor.new(user, new_point).call
```
#### Tracks::Segmentation
**Purpose**: Core algorithm for splitting GPS points into meaningful segments
**Criteria**:
- **Time threshold**: Configurable minutes gap (default: 30 minutes)
- **Distance threshold**: Configurable meters jump (default: 500 meters)
- **Minimum segment size**: 2 points required for valid track
#### Tracks::TrackBuilder
**Purpose**: Converts point arrays into Track records with calculated statistics
**Statistics Calculated**:
- **Distance**: Always stored in meters as integers
- **Duration**: Total time in seconds between first and last point
- **Average Speed**: Calculated in km/h regardless of user preference
- **Elevation Metrics**: Gain, loss, maximum, minimum in meters
### 3. Background Jobs
#### Tracks::IncrementalCheckJob
- **Purpose**: Lightweight job triggered by point creation
- **Queue**: `tracks` for dedicated processing
- **Trigger**: Automatically enqueued when non-import points are created
- **Function**: Checks thresholds and conditionally triggers track generation
#### Tracks::CreateJob
- **Purpose**: Main orchestration job for track creation
- **Features**: User notifications on success/failure
- **Incremental Usage**: Enqueued by IncrementalCheckJob when thresholds are exceeded
- **Parameters**: `user_id`, `start_at`, `end_at`, `mode`
#### Tracks::CleanupJob
- **Purpose**: Weekly cleanup of missed track generation
- **Schedule**: Runs weekly on Sunday at 02:00 via cron
- **Strategy**: Processes only users with old untracked points (1+ days old)
### 4. Real-time Features
#### TracksChannel (ActionCable)
```javascript
// Real-time track updates
consumer.subscriptions.create("TracksChannel", {
received(data) {
// Handle track created/updated/destroyed events
}
});
```
## Data Flow
### 1. Point Creation Flow
```
New Point Created → IncrementalCheckJob → Incremental Processor → Threshold Check →
(if exceeded) → CreateJob → Track Generation → Database Update →
User Notification → Real-time Broadcast → Frontend Update
```
### 2. Bulk Processing Flow
```
Scheduled Job → Load Historical Points → Segmentation →
Track Creation → Statistics Calculation → Database Batch Update
```
### 3. Incremental Processing Flow
```
New Point → IncrementalCheckJob → Find Previous Point → Calculate Time/Distance Gaps →
(if thresholds exceeded) → CreateJob(start_at: last_track_end, end_at: previous_point_time) →
Process Untracked Points → Create Tracks → User Notification
```
## Configuration
### User Settings
Tracks behavior is controlled by user-configurable settings in `Users::SafeSettings`:
```ruby
# Default values
{
'meters_between_routes' => 500, # Distance threshold
'minutes_between_routes' => 30, # Time threshold
'route_opacity' => 60, # Visual opacity
'distance_unit' => 'km' # Display unit (km/mi)
}
```
### Threshold Configuration
```ruby
# Time threshold: Gap longer than X minutes = new track
user.safe_settings.minutes_between_routes # default: 30
# Distance threshold: Jump larger than X meters = new track
user.safe_settings.meters_between_routes # default: 500
# Access in services
def time_threshold_minutes
user.safe_settings.minutes_between_routes.to_i
end
```
### Background Job Schedule
```yaml
# config/schedule.yml
tracks_cleanup_job:
cron: '0 2 * * 0' # Weekly on Sunday at 02:00
class: Tracks::CleanupJob
```
## Usage Examples
### 1. Manual Track Generation
```ruby
# Bulk regeneration (replaces all existing tracks)
Tracks::Generator.new(user, mode: :bulk).call
# Process specific date range
Tracks::Generator.new(
user,
start_at: 1.week.ago,
end_at: Time.current,
mode: :bulk
).call
# Daily processing
Tracks::Generator.new(
user,
start_at: Date.current,
mode: :daily
).call
```
### 2. Incremental Processing
```ruby
# Triggered automatically when points are created
point = Point.create!(
user: user,
timestamp: Time.current.to_i,
lonlat: 'POINT(-122.4194 37.7749)'
)
# → Automatically enqueues IncrementalCheckJob
# → Job checks thresholds and conditionally triggers track generation
```
### 3. Background Job Management
```ruby
# Enqueue incremental check (automatically triggered by point creation)
Tracks::IncrementalCheckJob.perform_later(user.id, point.id)
# Enqueue incremental processing (triggered by IncrementalCheckJob)
Tracks::CreateJob.perform_later(
user.id,
start_at: last_track_end,
end_at: previous_point_timestamp,
mode: :none
)
# Run cleanup for missed tracks
Tracks::CleanupJob.perform_later(older_than: 1.day.ago)
# Create tracks with notifications
Tracks::CreateJob.perform_later(user.id, start_at: nil, end_at: nil, mode: :bulk)
```
### 4. Frontend Integration
```javascript
// Initialize tracks on map
const tracksLayer = new TracksLayer(map, tracksData);
// Handle real-time updates
consumer.subscriptions.create("TracksChannel", {
received(data) {
switch(data.event) {
case 'created':
tracksLayer.addTrack(data.track);
break;
case 'updated':
tracksLayer.updateTrack(data.track);
break;
case 'destroyed':
tracksLayer.removeTrack(data.track.id);
break;
}
}
});
```
## API Reference
### Track Model API
```ruby
# Key methods
track.formatted_distance # Distance in user's preferred unit
track.distance_in_unit(unit) # Distance in specific unit
track.recalculate_path_and_distance! # Recalculate from points
# Scopes
Track.for_user(user)
Track.between_dates(start_date, end_date)
Track.last_for_day(user, date)
```
### TrackSerializer Output
```json
{
"id": 123,
"start_at": "2023-01-01T10:00:00Z",
"end_at": "2023-01-01T11:30:00Z",
"distance": 5000,
"avg_speed": 25.5,
"duration": 5400,
"elevation_gain": 150,
"elevation_loss": 100,
"elevation_max": 300,
"elevation_min": 200,
"path": "LINESTRING(...)"
}
```
### Service APIs
```ruby
# Generator API
generator = Tracks::Generator.new(user, options)
generator.call # Returns nil, tracks saved to database
# Processor API
processor = Tracks::IncrementalProcessor.new(user, point)
processor.call # May enqueue background job
# Segmentation API (via inclusion)
segments = split_points_into_segments(points)
should_start_new_segment?(current_point, previous_point)
```
## Development Guidelines
### 1. Adding New Generation Modes
```ruby
# In Tracks::Generator
def load_points
case mode
when :bulk
load_bulk_points
when :incremental
load_incremental_points
when :daily
load_daily_points
when :custom_mode # New mode
load_custom_points
end
end
def should_clean_tracks?
case mode
when :bulk, :daily then true
when :incremental, :custom_mode then false
end
end
```
### 2. Customizing Segmentation Logic
```ruby
# Override in including class
def should_start_new_segment?(current_point, previous_point)
# Custom logic here
super || custom_condition?(current_point, previous_point)
end
```
### 3. Testing Patterns
```ruby
# Test track generation
expect { generator.call }.to change(Track, :count).by(1)
# Test point callback
expect { point.save! }.to have_enqueued_job(Tracks::IncrementalCheckJob)
.with(user.id, point.id)
# Test incremental processing
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: anything, end_at: anything, mode: :none)
processor.call
# Test segmentation
segments = generator.send(:segment_points, points)
expect(segments.size).to eq(2)
```
### 4. Performance Considerations
- **Batch Processing**: Use `find_in_batches` for large datasets
- **Database Indexes**: Ensure proper indexing on `timestamp` and `track_id`
- **Memory Usage**: Process points in chunks for very large datasets
- **Asynchronous Processing**: Point creation is never blocked by track generation
- **Job Queue Management**: Monitor job queue performance for incremental processing
### 5. Error Handling
```ruby
# In services
begin
generator.call
rescue StandardError => e
Rails.logger.error "Track generation failed: #{e.message}"
# Handle gracefully
end
# In jobs
def perform(*args)
# Main logic
rescue ActiveRecord::RecordNotFound
# Don't retry for missing records
rescue StandardError => e
Rails.logger.error "Job failed: #{e.message}"
raise # Re-raise for retry logic
end
```
### 6. Monitoring and Debugging
```ruby
# Add logging
Rails.logger.info "Generated #{segments.size} tracks for user #{user.id}"
# Performance monitoring
Rails.logger.info "Track generation took #{duration}ms"
# Debug segmentation
Rails.logger.debug "Threshold check: time=#{time_gap}min, distance=#{distance_gap}m"
```
## Best Practices
1. **Data Consistency**: Always store distances in meters, convert only for display
2. **Threshold Configuration**: Make thresholds user-configurable for flexibility
3. **Error Handling**: Gracefully handle missing data and network issues
4. **Performance**: Use database queries efficiently, avoid N+1 queries
5. **Testing**: Test all modes and edge cases thoroughly
6. **Real-time Updates**: Use ActionCable for responsive user experience
7. **Background Processing**: Use appropriate queues for different job priorities
8. **Asynchronous Design**: Never block point creation with track generation logic
9. **Job Monitoring**: Monitor background job performance and failure rates
## Troubleshooting
### Common Issues
1. **Missing Tracks**: Check if points have `track_id: nil` for incremental processing
2. **Incorrect Thresholds**: Verify user settings configuration
3. **Job Failures**: Check background job logs for errors
4. **Real-time Updates**: Verify WebSocket connection and channel subscriptions
5. **Performance Issues**: Monitor database query performance and indexing
### Debugging Tools
```ruby
# Check track generation
user.tracked_points.where(track_id: nil).count # Untracked points
# Verify thresholds
user.safe_settings.minutes_between_routes
user.safe_settings.meters_between_routes
# Test segmentation
generator = Tracks::Generator.new(user, mode: :bulk)
segments = generator.send(:segment_points, points)
```
This overview provides a comprehensive understanding of the Dawarich Tracks feature, from high-level architecture to specific implementation details.

View file

@ -17,11 +17,9 @@ RSpec.describe Tracks::CreateJob, type: :job do
end
it 'calls the generator and creates a notification' do
# Mock the generator to actually create tracks
allow(generator_instance).to receive(:call) do
create_list(:track, 2, user: user)
end
# Mock the generator to return the count of tracks created
allow(generator_instance).to receive(:call).and_return(2)
described_class.new.perform(user.id)
expect(Tracks::Generator).to have_received(:new).with(
@ -53,12 +51,9 @@ RSpec.describe Tracks::CreateJob, type: :job do
end
it 'passes custom parameters to the generator' do
# Create some existing tracks and mock generator to create 1 more
create_list(:track, 5, user: user)
allow(generator_instance).to receive(:call) do
create(:track, user: user)
end
# Mock generator to return the count of tracks created
allow(generator_instance).to receive(:call).and_return(1)
described_class.new.perform(user.id, start_at: start_at, end_at: end_at, mode: mode)
expect(Tracks::Generator).to have_received(:new).with(
@ -87,6 +82,8 @@ RSpec.describe Tracks::CreateJob, type: :job do
end
it 'translates :none to :incremental' do
allow(generator_instance).to receive(:call).and_return(0)
described_class.new.perform(user.id, mode: :none)
expect(Tracks::Generator).to have_received(:new).with(
@ -104,6 +101,8 @@ RSpec.describe Tracks::CreateJob, type: :job do
end
it 'translates :daily to :daily' do
allow(generator_instance).to receive(:call).and_return(0)
described_class.new.perform(user.id, mode: :daily)
expect(Tracks::Generator).to have_received(:new).with(
@ -121,6 +120,8 @@ RSpec.describe Tracks::CreateJob, type: :job do
end
it 'translates other modes to :bulk' do
allow(generator_instance).to receive(:call).and_return(0)
described_class.new.perform(user.id, mode: :replace)
expect(Tracks::Generator).to have_received(:new).with(
@ -185,6 +186,34 @@ RSpec.describe Tracks::CreateJob, type: :job do
expect(ExceptionReporter).to have_received(:call)
end
end
context 'when tracks are deleted and recreated' do
it 'returns the correct count of newly created tracks' do
# Create some existing tracks first
create_list(:track, 3, user: user)
# Mock the generator to simulate deleting existing tracks and creating new ones
# This should return the count of newly created tracks, not the difference
allow(generator_instance).to receive(:call).and_return(2)
described_class.new.perform(user.id, mode: :bulk)
expect(Tracks::Generator).to have_received(:new).with(
user,
start_at: nil,
end_at: nil,
mode: :bulk
)
expect(generator_instance).to have_received(:call)
expect(Notifications::Create).to have_received(:new).with(
user: user,
kind: :info,
title: 'Tracks Generated',
content: 'Created 2 tracks from your location data. Check your tracks section to view them.'
)
expect(notification_service).to have_received(:call)
end
end
end
describe 'queue' do

View file

@ -31,6 +31,24 @@ RSpec.describe Tracks::Generator do
generator.call
expect(points.map(&:reload).map(&:track)).to all(be_present)
end
it 'properly handles point associations when cleaning existing tracks' do
# Create existing tracks with associated points
existing_track = create(:track, user: user)
existing_points = create_list(:point, 3, user: user, track: existing_track)
# Verify points are associated
expect(existing_points.map(&:reload).map(&:track_id)).to all(eq(existing_track.id))
# Run generator which should clean existing tracks and create new ones
generator.call
# Verify the old track is deleted
expect(Track.exists?(existing_track.id)).to be false
# Verify the points are no longer associated with the deleted track
expect(existing_points.map(&:reload).map(&:track_id)).to all(be_nil)
end
end
context 'with insufficient points' do
@ -118,6 +136,24 @@ RSpec.describe Tracks::Generator do
generator.call
expect(Track.exists?(existing_track.id)).to be false
end
it 'properly handles point associations when cleaning daily tracks' do
# Create existing tracks with associated points for today
existing_track = create(:track, user: user, start_at: today.beginning_of_day)
existing_points = create_list(:point, 3, user: user, track: existing_track)
# Verify points are associated
expect(existing_points.map(&:reload).map(&:track_id)).to all(eq(existing_track.id))
# Run generator which should clean existing tracks for the day and create new ones
generator.call
# Verify the old track is deleted
expect(Track.exists?(existing_track.id)).to be false
# Verify the points are no longer associated with the deleted track
expect(existing_points.map(&:reload).map(&:track_id)).to all(be_nil)
end
end
context 'with empty points' do

View file

@ -47,7 +47,7 @@ RSpec.describe Tracks::IncrementalProcessor do
it 'processes when time threshold exceeded' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: Time.at(previous_point.timestamp), mode: :none)
.with(user.id, start_at: nil, end_at: Time.zone.at(previous_point.timestamp), mode: :none)
processor.call
end
end
@ -65,7 +65,7 @@ RSpec.describe Tracks::IncrementalProcessor do
it 'uses existing track end time as start_at' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: existing_track.end_at, end_at: Time.at(previous_point.timestamp), mode: :none)
.with(user.id, start_at: existing_track.end_at, end_at: Time.zone.at(previous_point.timestamp), mode: :none)
processor.call
end
end
@ -88,7 +88,7 @@ RSpec.describe Tracks::IncrementalProcessor do
it 'processes when distance threshold exceeded' do
expect(Tracks::CreateJob).to receive(:perform_later)
.with(user.id, start_at: nil, end_at: Time.at(previous_point.timestamp), mode: :none)
.with(user.id, start_at: nil, end_at: Time.zone.at(previous_point.timestamp), mode: :none)
processor.call
end
end

View file

@ -9,7 +9,7 @@ RSpec.describe Users::ImportData, type: :service do
let(:import_directory) { Rails.root.join('tmp', "import_#{user.email.gsub(/[^0-9A-Za-z._-]/, '_')}_1234567890") }
before do
allow(Time).to receive(:current).and_return(Time.at(1234567890))
allow(Time).to receive(:current).and_return(Time.zone.at(1234567890))
allow(FileUtils).to receive(:mkdir_p)
allow(FileUtils).to receive(:rm_rf)
allow(File).to receive(:directory?).and_return(true)