cp-library

This documentation is automatically generated by online-judge-tools/verification-helper

View the Project on GitHub kobejean/cp-library

:warning: cp_library/perf/output.py

Depends on

Required by

Code

import cp_library.__header__
"""
Output management for benchmark results.
"""
import json
import time
from pathlib import Path
from typing import List, Any
import cp_library.perf.__header__
from cp_library.perf.interfaces import OutputManager, BenchmarkResult


class JSONOutputManager(OutputManager):
    """Output manager that saves results to JSON files"""
    
    def __init__(self, output_dir: str = "./output/benchmark_results"):
        self.output_dir = Path(output_dir)
    
    def save_results(self, results: List[BenchmarkResult], config: Any) -> None:
        """Save benchmark results to JSON"""
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # Convert results to dictionaries for JSON serialization
        results_dict = [
            {
                'operation': r.operation,
                'size': r.size,
                'implementation': r.implementation,
                'time_ms': r.time_ms,
                'correct': r.correct,
                'error': r.error
            }
            for r in results
        ]
        
        # Use config name if available, otherwise default name
        name = getattr(config, 'name', 'benchmark') if config else 'benchmark'
        filename = self.output_dir / f"{name}_{int(time.time())}.json"
        
        with open(filename, 'w') as f:
            json.dump(results_dict, f, indent=2)
        
        print(f"\nResults saved to {filename}")


class NoOpOutputManager(OutputManager):
    """Output manager that does nothing (for testing or when output is disabled)"""
    
    def save_results(self, results: List[BenchmarkResult], config: Any) -> None:
        """No-op implementation"""
        pass
'''
╺━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╸
             https://kobejean.github.io/cp-library               
'''
"""
Output management for benchmark results.
"""
import json
import time
from pathlib import Path
from typing import List, Any

"""
Interfaces for the benchmark framework following SOLID principles.
"""
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union
from dataclasses import dataclass

@dataclass
class BenchmarkResult:
    """Immutable benchmark result value object"""
    operation: str
    size: int
    implementation: str
    time_ms: float
    correct: bool
    error: Optional[str] = None


class TimerInterface(ABC):
    """Interface for timing implementations"""
    
    @abstractmethod
    def measure_time(self, func: Callable, data: Any, setup_func: Callable = None) -> tuple[Any, float]:
        """Measure execution time of a function"""
        pass


class PlotRenderer(ABC):
    """Interface for plot rendering implementations"""
    
    @abstractmethod
    def can_render(self) -> bool:
        """Check if this renderer is available"""
        pass
    
    @abstractmethod
    def create_plots(self, results: List[BenchmarkResult], config: Any) -> None:
        """Create plots from benchmark results"""
        pass


class ResultValidator(ABC):
    """Interface for result validation strategies"""
    
    @abstractmethod
    def validate(self, expected: Any, actual: Any) -> bool:
        """Validate benchmark result"""
        pass


class DataGenerator(ABC):
    """Interface for data generation strategies"""
    
    @abstractmethod
    def generate(self, size: int, operation: str) -> Any:
        """Generate test data for given size and operation"""
        pass


class OutputManager(ABC):
    """Interface for output management"""
    
    @abstractmethod
    def save_results(self, results: List[BenchmarkResult], config: Any) -> None:
        """Save benchmark results"""
        pass


class BenchmarkRegistry(ABC):
    """Interface for benchmark component registration"""
    
    @abstractmethod
    def register_implementation(self, name: str, func: Callable, operations: List[str]) -> None:
        """Register a benchmark implementation"""
        pass
    
    @abstractmethod
    def register_data_generator(self, name: str, generator: DataGenerator) -> None:
        """Register a data generator"""
        pass
    
    @abstractmethod
    def register_validator(self, operation: str, validator: ResultValidator) -> None:
        """Register a result validator"""
        pass
    
    @abstractmethod
    def register_setup(self, name: str, setup_func: Callable, operations: List[str]) -> None:
        """Register a setup function"""
        pass


class BenchmarkOrchestrator(ABC):
    """Interface for benchmark execution orchestration"""
    
    @abstractmethod
    def run_benchmarks(self, operations: List[str], sizes: List[int]) -> List[BenchmarkResult]:
        """Execute benchmarks and return results"""
        pass


class JSONOutputManager(OutputManager):
    """Output manager that saves results to JSON files"""
    
    def __init__(self, output_dir: str = "./output/benchmark_results"):
        self.output_dir = Path(output_dir)
    
    def save_results(self, results: List[BenchmarkResult], config: Any) -> None:
        """Save benchmark results to JSON"""
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # Convert results to dictionaries for JSON serialization
        results_dict = [
            {
                'operation': r.operation,
                'size': r.size,
                'implementation': r.implementation,
                'time_ms': r.time_ms,
                'correct': r.correct,
                'error': r.error
            }
            for r in results
        ]
        
        # Use config name if available, otherwise default name
        name = getattr(config, 'name', 'benchmark') if config else 'benchmark'
        filename = self.output_dir / f"{name}_{int(time.time())}.json"
        
        with open(filename, 'w') as f:
            json.dump(results_dict, f, indent=2)
        
        print(f"\nResults saved to {filename}")


class NoOpOutputManager(OutputManager):
    """Output manager that does nothing (for testing or when output is disabled)"""
    
    def save_results(self, results: List[BenchmarkResult], config: Any) -> None:
        """No-op implementation"""
        pass
Back to top page