Performance Profiling
Using profiling features effectively with Epochly.
Built-in Profiling
Enable Monitoring
import epochly# Set to Level 0 for profiling without optimizationepochly.set_level(0)# Or use decorator@epochly.optimize(level=0)def profiled_function(data):return process(data)
Monitoring Context
import epochlywith epochly.monitoring_context() as metrics:result = my_function(data)print(f"Duration: {metrics.get('duration_ms')} ms")print(f"CPU time: {metrics.get('cpu_time_ms')} ms")print(f"Memory: {metrics.get('memory_mb')} MB")
Benchmark Context
import epochlywith epochly.benchmark_context("my_algorithm") as results:output = algorithm(data)# Automatically prints timing information
Accessing Metrics
Get Metrics Summary
import epochlymetrics = epochly.get_metrics()# Timing metricsprint(f"Total calls: {metrics.get('total_calls')}")print(f"Total time: {metrics.get('total_time_ms')} ms")print(f"Mean time: {metrics.get('mean_time_ms')} ms")# Resource metricsprint(f"Memory peak: {metrics.get('memory_peak_mb')} MB")print(f"CPU utilization: {metrics.get('cpu_utilization_percent')}%")
Get Status
import epochlystatus = epochly.get_status()print(f"Enhancement level: {status['enhancement_level']}")print(f"Enabled: {status['enabled']}")print(f"JIT active: {status.get('jit_active', False)}")
Integration with cProfile
Using cProfile with Epochly
import cProfileimport pstatsimport epochly@epochly.optimize(level=0) # Monitor but don't optimizedef profiled_work(data):return process(data)# Profile with cProfileprofiler = cProfile.Profile()profiler.enable()for _ in range(100):profiled_work(test_data)profiler.disable()# Analyze resultsstats = pstats.Stats(profiler)stats.strip_dirs()stats.sort_stats('cumulative')stats.print_stats(20)
Saving Profile Results
import cProfileimport pstatsdef profile_to_file(func, args, filename='profile.stats'):profiler = cProfile.Profile()profiler.enable()result = func(*args)profiler.disable()profiler.dump_stats(filename)return result# Later: analyze with pstats# python -m pstats profile.stats
Integration with line_profiler
Installation
pip install line_profiler
Usage
# Add @profile decorator (provided by kernprof)@profiledef my_function(data):result = [] # Line timingfor x in data: # Line timingresult.append(x ** 2) # Line timingreturn result # Line timing
# Run with kernprofkernprof -l -v my_script.py
Combining with Epochly
import epochly@profile@epochly.optimize(level=0) # Monitordef combined_profile(data):# Both Epochly metrics and line-by-line profilingreturn process(data)
Memory Profiling
Using memory_profiler
pip install memory_profiler
from memory_profiler import profile@profiledef memory_heavy(n):data = [i ** 2 for i in range(n)]return sum(data)memory_heavy(1000000)
Epochly Memory Metrics
import epochlywith epochly.monitoring_context() as metrics:result = memory_intensive_operation()print(f"Memory allocated: {metrics.get('memory_allocated_mb')} MB")print(f"Peak memory: {metrics.get('memory_peak_mb')} MB")
GPU Profiling
Epochly GPU Metrics
import epochlywith epochly.optimize_context(level=4):result = gpu_operation(large_array)status = epochly.get_status()print(f"GPU utilization: {status.get('gpu_utilization_percent')}%")print(f"GPU memory used: {status.get('gpu_memory_mb')} MB")
NVIDIA Profiling Tools
# nvidia-smi for live monitoringwatch -n 1 nvidia-smi# nsys for detailed profilingnsys profile python my_script.py# ncu for kernel analysisncu python my_script.py
Continuous Profiling
Production Monitoring
import epochlyimport timedef continuous_profile():while True:metrics = epochly.get_metrics()# Log metricslog_metrics({'timestamp': time.time(),'total_calls': metrics.get('total_calls'),'mean_time_ms': metrics.get('mean_time_ms'),'memory_mb': metrics.get('memory_peak_mb')})time.sleep(60) # Every minute
Alert on Regression
import epochlydef check_performance():metrics = epochly.get_metrics()baseline_ms = 100 # Expected baselineactual_ms = metrics.get('mean_time_ms', 0)if actual_ms > baseline_ms * 1.5: # 50% regressionalert(f"Performance regression: {actual_ms}ms > {baseline_ms}ms")
Profiling Best Practices
1. Profile Before Optimizing
import epochly# Step 1: Profileepochly.set_level(0)run_workload()analyze_results()# Step 2: Choose level based on profileepochly.set_level(appropriate_level)
2. Use Representative Data
# Bad: profiling with toy dataprofile(small_test_data)# Good: profiling with production-like dataprofile(representative_production_data)
3. Profile Multiple Runs
import statisticstimes = []for _ in range(100):with epochly.monitoring_context() as m:result = my_function(data)times.append(m.get('duration_ms'))print(f"Mean: {statistics.mean(times):.2f}ms")print(f"Stdev: {statistics.stdev(times):.2f}ms")print(f"P95: {sorted(times)[95]:.2f}ms")
4. Profile in Production Context
# Profile with production constraints# - Same hardware# - Same data volumes# - Same concurrency levels# - Same memory constraints
Profiling Output Formats
JSON Export
import epochlyimport jsonmetrics = epochly.get_metrics()with open('metrics.json', 'w') as f:json.dump(metrics, f, indent=2)
Prometheus Format
# If Prometheus exporter is enabledcurl http://localhost:9090/metrics