Remove logs and unused code

This commit is contained in:
Eugene Burmakin 2025-08-29 13:59:46 +02:00
parent 289ce5dabb
commit 30fb51940c
11 changed files with 47 additions and 354 deletions

View file

@ -9,18 +9,13 @@ class Tracks::BoundaryResolverJob < ApplicationJob
@user = User.find(user_id)
@session_manager = Tracks::SessionManager.new(user_id, session_id)
Rails.logger.info "Starting boundary resolution for user #{user_id} (session: #{session_id})"
return unless session_exists_and_ready?
boundary_tracks_resolved = resolve_boundary_tracks
finalize_session(boundary_tracks_resolved)
Rails.logger.info "Boundary resolution completed for user #{user_id}: #{boundary_tracks_resolved} boundary tracks resolved"
rescue StandardError => e
ExceptionReporter.call(e, "Failed to resolve boundaries for user #{user_id}")
Rails.logger.error "Boundary resolution failed for user #{user_id}: #{e.message}"
mark_session_failed(e.message)
end
@ -30,14 +25,11 @@ class Tracks::BoundaryResolverJob < ApplicationJob
attr_reader :user, :session_manager
def session_exists_and_ready?
unless session_manager.session_exists?
Rails.logger.warn "Session #{session_manager.session_id} not found for user #{user.id}, skipping boundary resolution"
return false
end
return false unless session_manager.session_exists?
unless session_manager.all_chunks_completed?
Rails.logger.warn "Not all chunks completed for session #{session_manager.session_id}, rescheduling boundary resolution"
reschedule_boundary_resolution
return false
end
@ -54,7 +46,6 @@ class Tracks::BoundaryResolverJob < ApplicationJob
total_tracks = session_data['tracks_created'] + boundary_tracks_resolved
session_manager.mark_completed
create_success_notification(total_tracks)
end
def reschedule_boundary_resolution
@ -62,31 +53,9 @@ class Tracks::BoundaryResolverJob < ApplicationJob
delay = [30.seconds, 1.minute, 2.minutes, 5.minutes].sample
self.class.set(wait: delay).perform_later(user.id, session_manager.session_id)
Rails.logger.info "Rescheduled boundary resolution for user #{user.id} in #{delay} seconds"
end
def mark_session_failed(error_message)
session_manager.mark_failed(error_message)
create_error_notification(error_message)
end
def create_success_notification(tracks_created)
Notifications::Create.new(
user: user,
kind: :info,
title: 'Track Generation Complete',
content: "Generated #{tracks_created} tracks from your location data using parallel processing. Check your tracks section to view them."
).call
end
def create_error_notification(error_message)
return unless DawarichSettings.self_hosted?
Notifications::Create.new(
user: user,
kind: :error,
title: 'Track Generation Failed',
content: "Failed to complete track generation: #{error_message}"
).call
end
end

View file

@ -9,8 +9,6 @@ class Tracks::CleanupJob < ApplicationJob
def perform(older_than: 1.day.ago)
users_with_old_untracked_points(older_than).find_each do |user|
Rails.logger.info "Processing missed tracks for user #{user.id}"
# Process only the old untracked points
Tracks::Generator.new(
user,

View file

@ -6,34 +6,8 @@ class Tracks::CreateJob < ApplicationJob
def perform(user_id, start_at: nil, end_at: nil, mode: :daily)
user = User.find(user_id)
tracks_created = Tracks::Generator.new(user, start_at:, end_at:, mode:).call
create_success_notification(user, tracks_created)
Tracks::Generator.new(user, start_at:, end_at:, mode:).call
rescue StandardError => e
ExceptionReporter.call(e, 'Failed to create tracks for user')
create_error_notification(user, e)
end
private
def create_success_notification(user, tracks_created)
Notifications::Create.new(
user: user,
kind: :info,
title: 'Tracks Generated',
content: "Created #{tracks_created} tracks from your location data. Check your tracks section to view them."
).call
end
def create_error_notification(user, error)
return unless DawarichSettings.self_hosted?
Notifications::Create.new(
user: user,
kind: :error,
title: 'Track Generation Failed',
content: "Failed to generate tracks from your location data: #{error.message}"
).call
end
end

View file

@ -7,8 +7,6 @@ class Tracks::ParallelGeneratorJob < ApplicationJob
def perform(user_id, start_at: nil, end_at: nil, mode: :bulk, chunk_size: 1.day)
user = User.find(user_id)
Rails.logger.info "Starting parallel track generation for user #{user_id} (mode: #{mode})"
session = Tracks::ParallelGenerator.new(
user,
@ -17,43 +15,7 @@ class Tracks::ParallelGeneratorJob < ApplicationJob
mode: mode,
chunk_size: chunk_size
).call
if session && session != 0
Rails.logger.info "Parallel track generation initiated for user #{user_id} (session: #{session.session_id})"
else
Rails.logger.warn "No tracks to generate for user #{user_id} (no time chunks created)"
create_info_notification(user, 0)
end
rescue ActiveRecord::RecordNotFound => e
# Re-raise RecordNotFound as it indicates a programming error
raise
rescue StandardError => e
ExceptionReporter.call(e, 'Failed to start parallel track generation')
Rails.logger.error "Parallel track generation failed for user #{user_id}: #{e.message}"
create_error_notification(user, e) if user
end
private
def create_info_notification(user, tracks_created)
Notifications::Create.new(
user: user,
kind: :info,
title: 'Track Generation Complete',
content: "Generated #{tracks_created} tracks from your location data. Check your tracks section to view them."
).call
end
def create_error_notification(user, error)
return unless DawarichSettings.self_hosted?
Notifications::Create.new(
user: user,
kind: :error,
title: 'Track Generation Failed',
content: "Failed to generate tracks from your location data: #{error.message}"
).call
end
end
end

View file

@ -1,93 +0,0 @@
# frozen_string_literal: true
# Background job for cleaning up track generation sessions
# Handles expired sessions, stuck sessions, and general maintenance
class Tracks::SessionCleanupJob < ApplicationJob
queue_as :maintenance
def perform
Rails.logger.info "Starting track generation session cleanup"
expired_cleaned = cleanup_expired_sessions
stuck_cleaned = cleanup_stuck_sessions
Rails.logger.info "Session cleanup completed: #{expired_cleaned} expired, #{stuck_cleaned} stuck sessions cleaned"
rescue StandardError => e
ExceptionReporter.call(e, 'Failed to cleanup track generation sessions')
Rails.logger.error "Session cleanup failed: #{e.message}"
end
private
def cleanup_expired_sessions
# Rails cache handles TTL automatically, but we can still clean up
# any sessions that might have been missed
Tracks::SessionManager.cleanup_expired_sessions
end
def cleanup_stuck_sessions
stuck_sessions = find_stuck_sessions
return 0 if stuck_sessions.empty?
Rails.logger.warn "Found #{stuck_sessions.size} stuck track generation sessions"
cleaned_count = 0
stuck_sessions.each do |session_info|
if cleanup_stuck_session(session_info)
cleaned_count += 1
end
end
cleaned_count
end
def find_stuck_sessions
stuck_sessions = []
threshold = 4.hours.ago
# Since we're using Rails.cache, we need to scan for stuck sessions differently
# We'll look for sessions that are still in 'processing' state but very old
# This is a simplified approach - in production you might want more sophisticated detection
# For now, return empty array since Rails.cache doesn't provide easy key scanning
# In a real implementation, you might want to:
# 1. Store session keys in a separate tracking mechanism
# 2. Use Redis directly for better key management
# 3. Add session heartbeats for stuck detection
stuck_sessions
end
def cleanup_stuck_session(session_info)
session_manager = Tracks::SessionManager.new(session_info[:user_id], session_info[:session_id])
session_data = session_manager.get_session_data
return false unless session_data
# Mark session as failed
session_manager.mark_failed("Session stuck - cleaned up by maintenance job")
# Notify user if configured
if DawarichSettings.self_hosted?
user = User.find_by(id: session_info[:user_id])
notify_user_of_cleanup(user) if user
end
Rails.logger.info "Cleaned up stuck session #{session_info[:session_id]} for user #{session_info[:user_id]}"
true
rescue StandardError => e
Rails.logger.error "Failed to cleanup stuck session #{session_info[:session_id]}: #{e.message}"
false
end
def notify_user_of_cleanup(user)
Notifications::Create.new(
user: user,
kind: :warning,
title: 'Track Generation Interrupted',
content: 'Your track generation process was interrupted and has been cleaned up. You may need to restart the generation manually.'
).call
rescue StandardError => e
Rails.logger.error "Failed to notify user #{user.id} about session cleanup: #{e.message}"
end
end

View file

@ -13,18 +13,13 @@ class Tracks::TimeChunkProcessorJob < ApplicationJob
@session_manager = Tracks::SessionManager.new(user_id, session_id)
@chunk_data = chunk_data
Rails.logger.debug "Processing chunk #{chunk_data[:chunk_id]} for user #{user_id} (session: #{session_id})"
return unless session_exists?
tracks_created = process_chunk
update_session_progress(tracks_created)
Rails.logger.debug "Chunk #{chunk_data[:chunk_id]} processed: #{tracks_created} tracks created"
rescue StandardError => e
ExceptionReporter.call(e, "Failed to process time chunk for user #{user_id}")
Rails.logger.error "Chunk processing failed for user #{user_id}, chunk #{chunk_data[:chunk_id]}: #{e.message}"
mark_session_failed(e.message)
end
@ -117,8 +112,6 @@ class Tracks::TimeChunkProcessorJob < ApplicationJob
track
rescue StandardError => e
Rails.logger.error "Error calculating distance for track in chunk #{chunk_data[:chunk_id]}: #{e.message}"
Rails.logger.debug "Point details: #{points.map { |p| { id: p.id, lat: p.latitude, lon: p.longitude, timestamp: p.timestamp } }.inspect}"
nil
end
end

View file

@ -17,16 +17,11 @@ class Tracks::BoundaryDetector
boundary_candidates = find_boundary_track_candidates
return 0 if boundary_candidates.empty?
Rails.logger.debug "Found #{boundary_candidates.size} boundary track candidates for user #{user.id}"
resolved_count = 0
boundary_candidates.each do |group|
if merge_boundary_tracks(group)
resolved_count += 1
end
resolved_count += 1 if merge_boundary_tracks(group)
end
Rails.logger.info "Resolved #{resolved_count} boundary tracks for user #{user.id}"
resolved_count
end
@ -49,11 +44,11 @@ class Tracks::BoundaryDetector
recent_tracks.each do |track|
# Look for tracks that end close to where another begins
connected_tracks = find_connected_tracks(track, recent_tracks)
if connected_tracks.any?
# Create or extend a boundary group
existing_group = potential_groups.find { |group| group.include?(track) }
if existing_group
existing_group.concat(connected_tracks).uniq!
else
@ -71,60 +66,60 @@ class Tracks::BoundaryDetector
connected = []
track_end_time = track.end_at.to_i
track_start_time = track.start_at.to_i
# Look for tracks that start shortly after this one ends (within 30 minutes)
time_window = 30.minutes.to_i
all_tracks.each do |candidate|
next if candidate.id == track.id
candidate_start = candidate.start_at.to_i
candidate_end = candidate.end_at.to_i
# Check if tracks are temporally adjacent
if (candidate_start - track_end_time).abs <= time_window ||
(track_start_time - candidate_end).abs <= time_window
# Check if they're spatially connected
if tracks_spatially_connected?(track, candidate)
connected << candidate
end
end
end
connected
end
# Check if two tracks are spatially connected (endpoints are close)
def tracks_spatially_connected?(track1, track2)
return false unless track1.points.exists? && track2.points.exists?
# Get endpoints of both tracks
track1_start = track1.points.order(:timestamp).first
track1_end = track1.points.order(:timestamp).last
track2_start = track2.points.order(:timestamp).first
track2_end = track2.points.order(:timestamp).last
# Check various connection scenarios
connection_threshold = distance_threshold_meters
# Track1 end connects to Track2 start
return true if points_are_close?(track1_end, track2_start, connection_threshold)
# Track2 end connects to Track1 start
# Track2 end connects to Track1 start
return true if points_are_close?(track2_end, track1_start, connection_threshold)
# Tracks overlap or are very close
return true if points_are_close?(track1_start, track2_start, connection_threshold) ||
points_are_close?(track1_end, track2_end, connection_threshold)
false
end
# Check if two points are within the specified distance
def points_are_close?(point1, point2, threshold_meters)
return false unless point1 && point2
distance_meters = point1.distance_to_geocoder(point2, :m)
distance_meters <= threshold_meters
end
@ -132,59 +127,52 @@ class Tracks::BoundaryDetector
# Validate that a group of tracks represents a legitimate boundary case
def valid_boundary_group?(group)
return false if group.size < 2
# Check that tracks are sequential in time
sorted_tracks = group.sort_by(&:start_at)
# Ensure no large time gaps that would indicate separate journeys
max_gap = 1.hour.to_i
sorted_tracks.each_cons(2) do |track1, track2|
time_gap = track2.start_at.to_i - track1.end_at.to_i
return false if time_gap > max_gap
end
true
end
# Merge a group of boundary tracks into a single track
def merge_boundary_tracks(track_group)
return false if track_group.size < 2
Rails.logger.debug "Merging #{track_group.size} boundary tracks for user #{user.id}"
# Sort tracks by start time
sorted_tracks = track_group.sort_by(&:start_at)
# Collect all points from all tracks
all_points = []
sorted_tracks.each do |track|
track_points = track.points.order(:timestamp).to_a
all_points.concat(track_points)
end
# Remove duplicates and sort by timestamp
unique_points = all_points.uniq(&:id).sort_by(&:timestamp)
return false if unique_points.size < 2
# Calculate merged track distance
merged_distance = Point.calculate_distance_for_array_geocoder(unique_points, :m)
# Create new merged track
merged_track = create_track_from_points(unique_points, merged_distance)
if merged_track
# Delete the original boundary tracks
sorted_tracks.each do |track|
Rails.logger.debug "Deleting boundary track #{track.id} (merged into #{merged_track.id})"
track.destroy
end
Rails.logger.info "Created merged boundary track #{merged_track.id} with #{unique_points.size} points"
sorted_tracks.each(&:destroy)
true
else
Rails.logger.warn "Failed to create merged boundary track for user #{user.id}"
false
end
end
@ -197,4 +185,4 @@ class Tracks::BoundaryDetector
def time_threshold_minutes
@time_threshold_minutes ||= user.safe_settings.minutes_between_routes.to_i
end
end
end

View file

@ -42,8 +42,6 @@ class Tracks::Generator
start_timestamp, end_timestamp = get_timestamp_range
Rails.logger.debug "Generator: querying points for user #{user.id} in #{mode} mode"
segments = Track.get_segments_with_points(
user.id,
start_timestamp,
@ -53,8 +51,6 @@ class Tracks::Generator
untracked_only: mode == :incremental
)
Rails.logger.debug "Generator: created #{segments.size} segments via SQL"
tracks_created = 0
segments.each do |segment|
@ -62,7 +58,6 @@ class Tracks::Generator
tracks_created += 1 if track
end
Rails.logger.info "Generated #{tracks_created} tracks for user #{user.id} in #{mode} mode"
tracks_created
end
@ -81,7 +76,7 @@ class Tracks::Generator
when :incremental then load_incremental_points
when :daily then load_daily_points
else
raise ArgumentError, "Unknown mode: #{mode}"
raise ArgumentError, "Tracks::Generator: Unknown mode: #{mode}"
end
end
@ -111,12 +106,9 @@ class Tracks::Generator
points = segment_data[:points]
pre_calculated_distance = segment_data[:pre_calculated_distance]
Rails.logger.debug "Generator: processing segment with #{points.size} points"
return unless points.size >= 2
track = create_track_from_points(points, pre_calculated_distance)
Rails.logger.debug "Generator: created track #{track&.id}"
track
create_track_from_points(points, pre_calculated_distance)
end
def time_range_defined?
@ -163,7 +155,7 @@ class Tracks::Generator
when :bulk then clean_bulk_tracks
when :daily then clean_daily_tracks
else
raise ArgumentError, "Unknown mode: #{mode}"
raise ArgumentError, "Tracks::Generator: Unknown mode: #{mode}"
end
end
@ -188,7 +180,7 @@ class Tracks::Generator
when :daily then daily_timestamp_range
when :incremental then incremental_timestamp_range
else
raise ArgumentError, "Unknown mode: #{mode}"
raise ArgumentError, "Tracks::Generator: Unknown mode: #{mode}"
end
end

View file

@ -34,7 +34,7 @@ class Tracks::ParallelGenerator
enqueue_boundary_resolver(session.session_id, time_chunks.size)
Rails.logger.info "Started parallel track generation for user #{user.id} with #{time_chunks.size} chunks (session: #{session.session_id})"
session
end
@ -54,7 +54,7 @@ class Tracks::ParallelGenerator
end_at: end_at,
chunk_size: chunk_size
)
chunker.call
end
@ -88,7 +88,7 @@ class Tracks::ParallelGenerator
def enqueue_boundary_resolver(session_id, chunk_count)
# Delay based on estimated processing time (30 seconds per chunk + buffer)
estimated_delay = [chunk_count * 30.seconds, 5.minutes].max
Tracks::BoundaryResolverJob.set(wait: estimated_delay).perform_later(
user.id,
session_id
@ -105,20 +105,14 @@ class Tracks::ParallelGenerator
end
def clean_bulk_tracks
scope = user.tracks
scope = scope.where(start_at: time_range) if time_range_defined?
Rails.logger.info "Cleaning #{scope.count} existing tracks for bulk regeneration (user: #{user.id})"
scope.destroy_all
user.tracks.where(start_at: time_range).destroy_all if time_range_defined?
end
def clean_daily_tracks
day_range = daily_time_range
range = Time.zone.at(day_range.begin)..Time.zone.at(day_range.end)
scope = user.tracks.where(start_at: range)
Rails.logger.info "Cleaning #{scope.count} existing tracks for daily regeneration (user: #{user.id})"
scope.destroy_all
user.tracks.where(start_at: range).destroy_all
end
def time_range_defined?
@ -178,4 +172,4 @@ class Tracks::ParallelGenerator
end
end
end
end
end

View file

@ -144,16 +144,14 @@ module Tracks::Segmentation
def calculate_km_distance_between_points_geocoder(point1, point2)
begin
distance = point1.distance_to_geocoder(point2, :km)
# Validate result
if !distance.finite? || distance < 0
Rails.logger.warn "Invalid distance calculated between points: #{point1.id} (#{point1.latitude}, #{point1.longitude}) and #{point2.id} (#{point2.latitude}, #{point2.longitude})"
return 0
end
distance
rescue StandardError => e
Rails.logger.error "Error calculating distance between points #{point1.id} and #{point2.id}: #{e.message}"
0
end
end

View file

@ -1,82 +0,0 @@
# frozen_string_literal: true
# Service for cleaning up track generation sessions and maintenance tasks
# Provides utilities for session management and cleanup operations
class Tracks::SessionCleanup
class << self
# Schedule regular cleanup job
def schedule_cleanup
Tracks::SessionCleanupJob.perform_later
end
# Manual cleanup of all sessions for a user (e.g., when user is deleted)
def cleanup_user_sessions(user_id)
Rails.logger.info "Cleaning up all sessions for user #{user_id}"
cleaned_count = 0
# Since we can't easily scan Rails.cache keys, we'll rely on TTL cleanup
# In a production setup, you might want to maintain a separate index of active sessions
Rails.logger.info "Cleaned up #{cleaned_count} sessions for user #{user_id}"
cleaned_count
end
# Force cleanup of a specific session
def cleanup_session(user_id, session_id)
session_manager = Tracks::SessionManager.new(user_id, session_id)
if session_manager.session_exists?
session_manager.cleanup_session
Rails.logger.info "Force cleaned session #{session_id} for user #{user_id}"
true
else
Rails.logger.warn "Session #{session_id} not found for user #{user_id}"
false
end
end
# Get session statistics (for monitoring)
def session_statistics
# With Rails.cache, we can't easily get detailed statistics
# This is a limitation of using Rails.cache vs direct Redis access
# In production, consider maintaining separate session tracking
{
total_sessions: 0, # Can't count easily with Rails.cache
processing_sessions: 0,
completed_sessions: 0,
failed_sessions: 0,
cleanup_performed_at: Time.current
}
end
# Health check for session management system
def health_check
begin
# Test session creation and cleanup
test_user_id = 'health_check_user'
test_session = Tracks::SessionManager.create_for_user(test_user_id, { test: true })
# Verify session exists
session_exists = test_session.session_exists?
# Cleanup test session
test_session.cleanup_session
{
status: session_exists ? 'healthy' : 'unhealthy',
cache_accessible: true,
timestamp: Time.current
}
rescue StandardError => e
{
status: 'unhealthy',
cache_accessible: false,
error: e.message,
timestamp: Time.current
}
end
end
end
end