Documentation

Performance Profiling

Using profiling features effectively with Epochly.

Built-in Profiling

Enable Monitoring

import epochly
# Set to Level 0 for profiling without optimization
epochly.set_level(0)
# Or use decorator
@epochly.optimize(level=0)
def profiled_function(data):
return process(data)

Monitoring Context

import epochly
with epochly.monitoring_context() as metrics:
result = my_function(data)
print(f"Duration: {metrics.get('duration_ms')} ms")
print(f"CPU time: {metrics.get('cpu_time_ms')} ms")
print(f"Memory: {metrics.get('memory_mb')} MB")

Benchmark Context

import epochly
with epochly.benchmark_context("my_algorithm") as results:
output = algorithm(data)
# Automatically prints timing information

Accessing Metrics

Get Metrics Summary

import epochly
metrics = epochly.get_metrics()
# Timing metrics
print(f"Total calls: {metrics.get('total_calls')}")
print(f"Total time: {metrics.get('total_time_ms')} ms")
print(f"Mean time: {metrics.get('mean_time_ms')} ms")
# Resource metrics
print(f"Memory peak: {metrics.get('memory_peak_mb')} MB")
print(f"CPU utilization: {metrics.get('cpu_utilization_percent')}%")

Get Status

import epochly
status = epochly.get_status()
print(f"Enhancement level: {status['enhancement_level']}")
print(f"Enabled: {status['enabled']}")
print(f"JIT active: {status.get('jit_active', False)}")

Integration with cProfile

Using cProfile with Epochly

import cProfile
import pstats
import epochly
@epochly.optimize(level=0) # Monitor but don't optimize
def profiled_work(data):
return process(data)
# Profile with cProfile
profiler = cProfile.Profile()
profiler.enable()
for _ in range(100):
profiled_work(test_data)
profiler.disable()
# Analyze results
stats = pstats.Stats(profiler)
stats.strip_dirs()
stats.sort_stats('cumulative')
stats.print_stats(20)

Saving Profile Results

import cProfile
import pstats
def profile_to_file(func, args, filename='profile.stats'):
profiler = cProfile.Profile()
profiler.enable()
result = func(*args)
profiler.disable()
profiler.dump_stats(filename)
return result
# Later: analyze with pstats
# python -m pstats profile.stats

Integration with line_profiler

Installation

pip install line_profiler

Usage

# Add @profile decorator (provided by kernprof)
@profile
def my_function(data):
result = [] # Line timing
for x in data: # Line timing
result.append(x ** 2) # Line timing
return result # Line timing
# Run with kernprof
kernprof -l -v my_script.py

Combining with Epochly

import epochly
@profile
@epochly.optimize(level=0) # Monitor
def combined_profile(data):
# Both Epochly metrics and line-by-line profiling
return process(data)

Memory Profiling

Using memory_profiler

pip install memory_profiler
from memory_profiler import profile
@profile
def memory_heavy(n):
data = [i ** 2 for i in range(n)]
return sum(data)
memory_heavy(1000000)

Epochly Memory Metrics

import epochly
with epochly.monitoring_context() as metrics:
result = memory_intensive_operation()
print(f"Memory allocated: {metrics.get('memory_allocated_mb')} MB")
print(f"Peak memory: {metrics.get('memory_peak_mb')} MB")

GPU Profiling

Epochly GPU Metrics

import epochly
with epochly.optimize_context(level=4):
result = gpu_operation(large_array)
status = epochly.get_status()
print(f"GPU utilization: {status.get('gpu_utilization_percent')}%")
print(f"GPU memory used: {status.get('gpu_memory_mb')} MB")

NVIDIA Profiling Tools

# nvidia-smi for live monitoring
watch -n 1 nvidia-smi
# nsys for detailed profiling
nsys profile python my_script.py
# ncu for kernel analysis
ncu python my_script.py

Continuous Profiling

Production Monitoring

import epochly
import time
def continuous_profile():
while True:
metrics = epochly.get_metrics()
# Log metrics
log_metrics({
'timestamp': time.time(),
'total_calls': metrics.get('total_calls'),
'mean_time_ms': metrics.get('mean_time_ms'),
'memory_mb': metrics.get('memory_peak_mb')
})
time.sleep(60) # Every minute

Alert on Regression

import epochly
def check_performance():
metrics = epochly.get_metrics()
baseline_ms = 100 # Expected baseline
actual_ms = metrics.get('mean_time_ms', 0)
if actual_ms > baseline_ms * 1.5: # 50% regression
alert(f"Performance regression: {actual_ms}ms > {baseline_ms}ms")

Profiling Best Practices

1. Profile Before Optimizing

import epochly
# Step 1: Profile
epochly.set_level(0)
run_workload()
analyze_results()
# Step 2: Choose level based on profile
epochly.set_level(appropriate_level)

2. Use Representative Data

# Bad: profiling with toy data
profile(small_test_data)
# Good: profiling with production-like data
profile(representative_production_data)

3. Profile Multiple Runs

import statistics
times = []
for _ in range(100):
with epochly.monitoring_context() as m:
result = my_function(data)
times.append(m.get('duration_ms'))
print(f"Mean: {statistics.mean(times):.2f}ms")
print(f"Stdev: {statistics.stdev(times):.2f}ms")
print(f"P95: {sorted(times)[95]:.2f}ms")

4. Profile in Production Context

# Profile with production constraints
# - Same hardware
# - Same data volumes
# - Same concurrency levels
# - Same memory constraints

Profiling Output Formats

JSON Export

import epochly
import json
metrics = epochly.get_metrics()
with open('metrics.json', 'w') as f:
json.dump(metrics, f, indent=2)

Prometheus Format

# If Prometheus exporter is enabled
curl http://localhost:9090/metrics