This documentation is automatically generated by online-judge-tools/verification-helper
import cp_library.__header__
"""
Timing and measurement utilities for benchmarks.
Separated from main benchmark class for single responsibility.
"""
import time
from typing import Any, Callable, Tuple
import cp_library.perf.__header__
from cp_library.perf.interfaces import TimerInterface
from cp_library.perf.checksum import result_checksum
class BenchmarkTimer(TimerInterface):
"""Handles timing and measurement of benchmark functions"""
def __init__(self, iterations: int = 10, warmup: int = 2):
self.iterations = iterations
self.warmup = warmup
def measure_time(self, func: Callable, data: Any, setup_func: Callable = None) -> Tuple[Any, float]:
"""Measure execution time with warmup and optional setup"""
# Warmup runs
for _ in range(self.warmup):
try:
if setup_func:
setup_data = setup_func(data)
func(setup_data)
else:
func(data)
except Exception:
# If warmup fails, let the main measurement handle the error
break
# Actual measurement
start = time.perf_counter()
for _ in range(self.iterations):
if setup_func:
setup_data = setup_func(data)
result = func(setup_data)
else:
result = func(data)
elapsed_ms = (time.perf_counter() - start) * 1000 / self.iterations
# Calculate checksum after timing with fallback for non-hashable types
# This reduces overhead during the timed section
checksum = result_checksum(result)
return checksum, elapsed_ms
'''
╺━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╸
https://kobejean.github.io/cp-library
'''
"""
Timing and measurement utilities for benchmarks.
Separated from main benchmark class for single responsibility.
"""
import time
from typing import Any, Callable, Tuple
"""
Interfaces for the benchmark framework following SOLID principles.
"""
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union
from dataclasses import dataclass
@dataclass
class BenchmarkResult:
"""Immutable benchmark result value object"""
operation: str
size: int
implementation: str
time_ms: float
correct: bool
error: Optional[str] = None
class TimerInterface(ABC):
"""Interface for timing implementations"""
@abstractmethod
def measure_time(self, func: Callable, data: Any, setup_func: Callable = None) -> tuple[Any, float]:
"""Measure execution time of a function"""
pass
class PlotRenderer(ABC):
"""Interface for plot rendering implementations"""
@abstractmethod
def can_render(self) -> bool:
"""Check if this renderer is available"""
pass
@abstractmethod
def create_plots(self, results: List[BenchmarkResult], config: Any) -> None:
"""Create plots from benchmark results"""
pass
class ResultValidator(ABC):
"""Interface for result validation strategies"""
@abstractmethod
def validate(self, expected: Any, actual: Any) -> bool:
"""Validate benchmark result"""
pass
class DataGenerator(ABC):
"""Interface for data generation strategies"""
@abstractmethod
def generate(self, size: int, operation: str) -> Any:
"""Generate test data for given size and operation"""
pass
class OutputManager(ABC):
"""Interface for output management"""
@abstractmethod
def save_results(self, results: List[BenchmarkResult], config: Any) -> None:
"""Save benchmark results"""
pass
class BenchmarkRegistry(ABC):
"""Interface for benchmark component registration"""
@abstractmethod
def register_implementation(self, name: str, func: Callable, operations: List[str]) -> None:
"""Register a benchmark implementation"""
pass
@abstractmethod
def register_data_generator(self, name: str, generator: DataGenerator) -> None:
"""Register a data generator"""
pass
@abstractmethod
def register_validator(self, operation: str, validator: ResultValidator) -> None:
"""Register a result validator"""
pass
@abstractmethod
def register_setup(self, name: str, setup_func: Callable, operations: List[str]) -> None:
"""Register a setup function"""
pass
class BenchmarkOrchestrator(ABC):
"""Interface for benchmark execution orchestration"""
@abstractmethod
def run_benchmarks(self, operations: List[str], sizes: List[int]) -> List[BenchmarkResult]:
"""Execute benchmarks and return results"""
pass
"""
Checksum utilities for benchmark validation.
Provides consistent ways to compute checksums across benchmarks.
"""
def update_checksum(current: int, value: int) -> int:
"""Update checksum with a single value using hash-like function."""
return (current * 31 + value) & 0xFFFFFFFF
def result_checksum(result: Any) -> Any:
"""
Calculate checksum for benchmark result with fallback for non-hashable types.
This function tries to create a consistent hash for any type of result,
with intelligent fallbacks for common non-hashable types like lists, sets, and dicts.
Args:
result: The result to checksum (can be any type)
Returns:
Hash value if successful, original result if all fallbacks fail
"""
# Try direct hash first (fastest path for hashable objects)
try:
return hash(result)
except TypeError:
pass
# Try common fallback conversions for non-hashable types
try:
if isinstance(result, dict):
# Convert dict to sorted tuple of items for consistent ordering
return hash(tuple(sorted(result.items())))
elif isinstance(result, set):
# Convert set to sorted tuple for consistent ordering
return hash(tuple(sorted(result)))
elif _is_iterable_not_string(result):
# Convert other iterables (lists, etc.) to tuple
return hash(tuple(result))
elif hasattr(result, '__dict__'):
# Convert objects with attributes to tuple of sorted items
return hash(tuple(sorted(result.__dict__.items())))
else:
# For other types, convert to string as last resort
return hash(str(result))
except (TypeError, RecursionError):
# If all fallbacks fail, return the original result
# The validation logic will handle it
return result
def _is_iterable_not_string(obj: Any) -> bool:
"""
Check if object is iterable but not a string or bytes.
Uses both __iter__ and __getitem__ checks to catch more iterable types
while excluding strings and bytes which should be handled differently.
"""
return (
(hasattr(obj, '__iter__') or hasattr(obj, '__getitem__'))
and not isinstance(obj, (str, bytes))
)
class BenchmarkTimer(TimerInterface):
"""Handles timing and measurement of benchmark functions"""
def __init__(self, iterations: int = 10, warmup: int = 2):
self.iterations = iterations
self.warmup = warmup
def measure_time(self, func: Callable, data: Any, setup_func: Callable = None) -> Tuple[Any, float]:
"""Measure execution time with warmup and optional setup"""
# Warmup runs
for _ in range(self.warmup):
try:
if setup_func:
setup_data = setup_func(data)
func(setup_data)
else:
func(data)
except Exception:
# If warmup fails, let the main measurement handle the error
break
# Actual measurement
start = time.perf_counter()
for _ in range(self.iterations):
if setup_func:
setup_data = setup_func(data)
result = func(setup_data)
else:
result = func(data)
elapsed_ms = (time.perf_counter() - start) * 1000 / self.iterations
# Calculate checksum after timing with fallback for non-hashable types
# This reduces overhead during the timed section
checksum = result_checksum(result)
return checksum, elapsed_ms