cp-library

This documentation is automatically generated by online-judge-tools/verification-helper

View the Project on GitHub kobejean/cp-library

:warning: perf/rank.py

Depends on

Code

#!/usr/bin/env python3
"""
Simple ranking benchmark using the new declarative framework.
Compares irank vs irank_multi performance across different data patterns.
"""

import random
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from cp_library.perf.benchmark import Benchmark, BenchmarkConfig
from cp_library.alg.iter.rank.irank_fn import irank
from cp_library.alg.iter.rank.irank_multi_fn import irank as irank_multi

# Configure benchmark
config = BenchmarkConfig(
    name="rank",
    sizes=[1000000, 100000, 10000, 1000, 100, 10, 1],  # Reverse order to warm up JIT
    operations=['construction', 'random', 'sorted', 'duplicates', 'reverse'],
    iterations=5,
    warmup=3,
    output_dir="./output/benchmark_results/rank"
)

# Create benchmark instance
benchmark = Benchmark(config)

# Data generator
@benchmark.data_generator("default")
def generate_rank_data(size: int, operation: str):
    """Generate ranking data in different patterns"""
    if operation == 'random':
        data = [random.randint(1, size) for _ in range(size)]
    elif operation == 'sorted':
        data = list(range(size))
    elif operation == 'duplicates':
        # Many duplicates (10% unique values)
        unique_count = max(1, size // 10)
        data = [random.randint(1, unique_count) for _ in range(size)]
    elif operation == 'reverse':
        data = list(range(size, 0, -1))
    else:
        raise ValueError(f"Unknown operation: {operation}")
    
    # Pre-initialize data for fair timing (exclude copy overhead)
    preinitialized = {
        'data_copy1': list(data),
        'data_copy2': list(data),
        'distinct': False
    }
    
    return {
        'data': data,
        'distinct': False,
        'size': size,
        'operation': operation,
        'preinitialized': preinitialized
    }

# Construction operation
@benchmark.implementation("irank", "construction")
def construction_irank(data):
    """Construct data copy for irank"""
    data_copy = list(data['data'])
    checksum = 0
    for x in data_copy:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "construction")
def construction_irank_multi(data):
    """Construct data copy for irank_multi"""
    data_copy = list(data['data'])
    checksum = 0
    for x in data_copy:
        checksum ^= x
    return checksum

# Random operation
@benchmark.implementation("irank", "random")
def random_irank(data):
    """Standard irank implementation for random data"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "random")
def random_irank_multi(data):
    """Multi-pass irank implementation for random data"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Sorted operation
@benchmark.implementation("irank", "sorted")
def sorted_irank(data):
    """Standard irank implementation for sorted data"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "sorted")
def sorted_irank_multi(data):
    """Multi-pass irank implementation for sorted data"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Duplicates operation
@benchmark.implementation("irank", "duplicates")
def duplicates_irank(data):
    """Standard irank implementation for data with duplicates"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "duplicates")
def duplicates_irank_multi(data):
    """Multi-pass irank implementation for data with duplicates"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Reverse operation
@benchmark.implementation("irank", "reverse")
def reverse_irank(data):
    """Standard irank implementation for reverse data"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "reverse")
def reverse_irank_multi(data):
    """Multi-pass irank implementation for reverse data"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Additional benchmark with distinct=True
@benchmark.data_generator("distinct")
def generate_rank_data_distinct(size: int, operation: str):
    """Generate ranking data with distinct=True"""
    base_data = generate_rank_data(size, operation)
    base_data['distinct'] = True
    base_data['preinitialized']['distinct'] = True
    return base_data

def irank_distinct_implementation(data):
    """irank with distinct=True"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=True)
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

def irank_multi_distinct_implementation(data):
    """irank_multi with distinct=True"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=True)
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Custom validator for rank results (now using XOR checksums)
@benchmark.validator("default")
def validate_rank_result(expected, actual):
    """Validate ranking results using XOR checksums"""
    try:
        # Compare XOR checksums directly
        return int(expected) == int(actual)
    except Exception:
        return False

if __name__ == "__main__":
    # Parse command line args and run appropriate mode
    runner = benchmark.parse_args()
    runner.run()
#!/usr/bin/env python3
"""
Simple ranking benchmark using the new declarative framework.
Compares irank vs irank_multi performance across different data patterns.
"""

import random
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

"""
Declarative benchmark framework with minimal boilerplate.

Features:
- Decorator-based benchmark registration
- Automatic data generation and validation
- Built-in timing with warmup
- Configurable operations and sizes
- JSON results and matplotlib plotting
"""

import time
import json
import statistics
import argparse
from typing import Dict, List, Any, Callable, Union
from dataclasses import dataclass
from pathlib import Path
from collections import defaultdict

@dataclass
class BenchmarkConfig:
    """Configuration for benchmark runs"""
    name: str
    sizes: List[int] = None
    operations: List[str] = None
    iterations: int = 10
    warmup: int = 2
    output_dir: str = "./output/benchmark_results"
    save_results: bool = True
    plot_results: bool = True
    plot_scale: str = "loglog"  # Options: "loglog", "linear", "semilogx", "semilogy"
    progressive: bool = True  # Show results operation by operation across sizes
    # Profiling mode
    profile_mode: bool = False
    profile_size: int = None
    profile_operation: str = None
    profile_implementation: str = None
    
    def __post_init__(self):
        if self.sizes is None:
            self.sizes = [100, 1000, 10000, 100000]
        if self.operations is None:
            self.operations = ['default']

class Benchmark:
    """Declarative benchmark framework using decorators"""
    
    def __init__(self, config: BenchmarkConfig):
        self.config = config
        self.data_generators = {}
        self.implementations = {}
        self.validators = {}
        self.setups = {}
        self.results = []
    
    def profile(self, operation: str = None, size: int = None, implementation: str = None):
        """Create a profiling version of this benchmark"""
        profile_config = BenchmarkConfig(
            name=f"{self.config.name}_profile",
            sizes=self.config.sizes,
            operations=self.config.operations,
            profile_mode=True,
            profile_operation=operation,
            profile_size=size,
            profile_implementation=implementation,
            save_results=False,
            plot_results=False
        )
        
        profile_benchmark = Benchmark(profile_config)
        profile_benchmark.data_generators = self.data_generators
        profile_benchmark.implementations = self.implementations
        profile_benchmark.validators = self.validators
        profile_benchmark.setups = self.setups
        
        return profile_benchmark
    
    def parse_args(self):
        """Parse command line arguments for profiling mode"""
        parser = argparse.ArgumentParser(
            description=f"Benchmark {self.config.name} with optional profiling mode",
            formatter_class=argparse.RawDescriptionHelpFormatter,
            epilog="""
Examples:
  # Normal benchmark mode
  python benchmark.py
  
  # Profile specific operation and implementation
  python benchmark.py --profile --operation random_access --implementation grid
  
  # Profile with specific size
  python benchmark.py --profile --size 1000000
  
  # Profile all implementations of an operation
  python benchmark.py --profile --operation construction
"""
        )
        
        parser.add_argument('--profile', action='store_true',
                          help='Run in profiling mode (minimal overhead for profilers)')
        parser.add_argument('--operation', type=str, 
                          help=f'Operation to profile. Options: {", ".join(self.config.operations)}')
        parser.add_argument('--size', type=int,
                          help=f'Size to profile. Options: {", ".join(map(str, self.config.sizes))}')
        parser.add_argument('--implementation', type=str,
                          help='Specific implementation to profile (default: all)')
        
        args = parser.parse_args()
        
        # If profile mode requested, return a profiling benchmark
        if args.profile:
            return self.profile(
                operation=args.operation,
                size=args.size,
                implementation=args.implementation
            )
        
        # Otherwise return self for normal mode
        return self
        
    def data_generator(self, name: str = "default"):
        """Decorator to register data generator"""
        def decorator(func):
            self.data_generators[name] = func
            return func
        return decorator
    
    def implementation(self, name: str, operations: Union[str, List[str]] = None):
        """Decorator to register implementation"""
        if operations is None:
            operations = ['default']
        elif isinstance(operations, str):
            operations = [operations]
            
        def decorator(func):
            for op in operations:
                if op not in self.implementations:
                    self.implementations[op] = {}
                self.implementations[op][name] = func
            return func
        return decorator
    
    def validator(self, operation: str = "default"):
        """Decorator to register custom validator"""
        def decorator(func):
            self.validators[operation] = func
            return func
        return decorator
    
    def setup(self, name: str, operations: Union[str, List[str]] = None):
        """Decorator to register setup function that runs before timing"""
        if operations is None:
            operations = ['default']
        elif isinstance(operations, str):
            operations = [operations]
            
        def decorator(func):
            for op in operations:
                if op not in self.setups:
                    self.setups[op] = {}
                self.setups[op][name] = func
            return func
        return decorator
    
    def measure_time(self, func: Callable, data: Any, setup_func: Callable = None) -> tuple[Any, float]:
        """Measure execution time with warmup and optional setup"""
        # Warmup runs
        for _ in range(self.config.warmup):
            try:
                if setup_func:
                    setup_data = setup_func(data)
                    func(setup_data)
                else:
                    func(data)
            except Exception:
                # If warmup fails, let the main measurement handle the error
                break
        
        # Actual measurement
        start = time.perf_counter()
        for _ in range(self.config.iterations):
            if setup_func:
                setup_data = setup_func(data)
                result = func(setup_data)
            else:
                result = func(data)
        elapsed_ms = (time.perf_counter() - start) * 1000 / self.config.iterations
        
        return result, elapsed_ms
    
    def validate_result(self, expected: Any, actual: Any, operation: str) -> bool:
        """Validate result using custom validator or default comparison"""
        if operation in self.validators:
            return self.validators[operation](expected, actual)
        return expected == actual
    
    def run(self):
        """Run all benchmarks"""
        if self.config.profile_mode:
            self._run_profile_mode()
        else:
            self._run_normal_mode()
    
    def _run_normal_mode(self):
        """Run normal benchmark mode"""
        print(f"Running {self.config.name}")
        print(f"Sizes: {self.config.sizes}")
        print(f"Operations: {self.config.operations}")
        print("="*80)
        
        # Always show progressive results: operation by operation across all sizes
        for operation in self.config.operations:
            for size in self.config.sizes:
                self._run_single(operation, size)
        
        # Save and plot results
        if self.config.save_results:
            self._save_results()
        
        if self.config.plot_results:
            self._plot_results()
        
        # Print summary
        self._print_summary()
    
    def _run_profile_mode(self):
        """Run profiling mode with minimal overhead for use with vmprof"""
        operation = self.config.profile_operation or self.config.operations[0]
        size = self.config.profile_size or max(self.config.sizes)
        impl_name = self.config.profile_implementation
        
        print(f"PROFILING MODE: {self.config.name}")
        print(f"Operation: {operation}, Size: {size}")
        if impl_name:
            print(f"Implementation: {impl_name}")
        print("="*80)
        print("Run with vmprof: vmprof --web " + ' '.join(sys.argv))
        print("="*80)
        
        # Generate test data
        generator = self.data_generators.get(operation, self.data_generators.get('default'))
        if not generator:
            raise ValueError(f"No data generator for operation: {operation}")
        
        test_data = generator(size, operation)
        
        # Get implementations
        impls = self.implementations.get(operation, {})
        if not impls:
            raise ValueError(f"No implementations for operation: {operation}")
        
        # Filter to specific implementation if requested
        if impl_name:
            if impl_name not in impls:
                raise ValueError(f"Implementation '{impl_name}' not found for operation '{operation}'")
            impls = {impl_name: impls[impl_name]}
        
        # Run with minimal overhead - no timing, no validation
        for name, func in impls.items():
            print(f"\nRunning {name}...")
            sys.stdout.flush()
            
            # Setup if needed
            setup_func = self.setups.get(operation, {}).get(name)
            if setup_func:
                data = setup_func(test_data)
            else:
                data = test_data
            
            # Run the actual function (this is what vmprof will profile)
            result = func(data)
            print(f"Completed {name}, result checksum: {result}")
            sys.stdout.flush()
    
    def _run_single(self, operation: str, size: int):
        """Run a single operation/size combination"""
        print(f"\nOperation: {operation}, Size: {size}")
        print("-" * 50)
        sys.stdout.flush()
        
        # Generate test data
        generator = self.data_generators.get(operation, 
                                           self.data_generators.get('default'))
        if not generator:
            raise ValueError(f"No data generator for operation: {operation}")
        
        test_data = generator(size, operation)
        
        # Get implementations for this operation
        impls = self.implementations.get(operation, {})
        if not impls:
            print(f"No implementations for operation: {operation}")
            return
        
        # Get setup functions for this operation
        setups = self.setups.get(operation, {})
        
        # Run reference implementation first
        ref_name, ref_impl = next(iter(impls.items()))
        ref_setup = setups.get(ref_name)
        expected_result, _ = self.measure_time(ref_impl, test_data, ref_setup)
        
        # Run all implementations
        for impl_name, impl_func in impls.items():
            try:
                setup_func = setups.get(impl_name)
                result, time_ms = self.measure_time(impl_func, test_data, setup_func)
                correct = self.validate_result(expected_result, result, operation)
                
                # Store result
                self.results.append({
                    'operation': operation,
                    'size': size,
                    'implementation': impl_name,
                    'time_ms': time_ms,
                    'correct': correct,
                    'error': None
                })
                
                status = "OK" if correct else "FAIL"
                print(f"  {impl_name:<20} {time_ms:>8.3f} ms  {status}")
                sys.stdout.flush()
                
            except Exception as e:
                self.results.append({
                    'operation': operation,
                    'size': size,
                    'implementation': impl_name,
                    'time_ms': float('inf'),
                    'correct': False,
                    'error': str(e)
                })
                print(f"  {impl_name:<20} ERROR: {str(e)[:40]}")
                sys.stdout.flush()
    
    def _save_results(self):
        """Save results to JSON"""
        output_dir = Path(self.config.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)
        
        filename = output_dir / f"{self.config.name}_{int(time.time())}.json"
        with open(filename, 'w') as f:
            json.dump(self.results, f, indent=2)
        print(f"\nResults saved to {filename}")
    
    def _plot_results(self):
        """Generate plots using matplotlib if available"""
        try:
            import matplotlib.pyplot as plt
            
            output_dir = Path(self.config.output_dir)
            output_dir.mkdir(parents=True, exist_ok=True)
            
            # Group and prepare data for plotting
            data_by_op = self._group_results_by_operation()
            
            # Create plots for each operation
            for operation, operation_data in data_by_op.items():
                self._create_performance_plot(plt, operation, operation_data, output_dir)
                
        except ImportError:
            print("Matplotlib not available - skipping plots")
        except Exception as e:
            print(f"Plotting failed: {e}")
    
    def _group_results_by_operation(self) -> Dict[str, Dict[int, List[Dict[str, Any]]]]:
        """Group results by operation and size for plotting"""
        data_by_op = defaultdict(lambda: defaultdict(list))
        for r in self.results:
            if r['time_ms'] != float('inf') and r['correct']:
                data_by_op[r['operation']][r['size']].append({
                    'implementation': r['implementation'],
                    'time_ms': r['time_ms']
                })
        return data_by_op
    
    def _create_performance_plot(self, plt, operation: str, operation_data: Dict[int, List[Dict[str, Any]]], output_dir: Path):
        """Create a performance plot for a single operation"""
        sizes = sorted(operation_data.keys())
        implementations = set()
        for size_data in operation_data.values():
            for entry in size_data:
                implementations.add(entry['implementation'])
        
        implementations = sorted(implementations)
        
        plt.figure(figsize=(10, 6))
        for impl in implementations:
            impl_times = []
            impl_sizes = []
            for size in sizes:
                times = [entry['time_ms'] for entry in operation_data[size] 
                        if entry['implementation'] == impl]
                if times:
                    impl_times.append(statistics.mean(times))
                    impl_sizes.append(size)
            
            if impl_times:
                plt.plot(impl_sizes, impl_times, 'o-', label=impl)
        
        plt.xlabel('Input Size')
        plt.ylabel('Time (ms)')
        plt.title(f'{self.config.name} - {operation} Operation')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # Apply the configured scaling
        if self.config.plot_scale == "loglog":
            plt.loglog()
        elif self.config.plot_scale == "linear":
            pass  # Default linear scale
        elif self.config.plot_scale == "semilogx":
            plt.semilogx()
        elif self.config.plot_scale == "semilogy":
            plt.semilogy()
        else:
            # Default to loglog if invalid option
            plt.loglog()
        
        plot_file = output_dir / f"{self.config.name}_{operation}_performance.png"
        plt.savefig(plot_file, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Plot saved: {plot_file}")
    
    def _print_summary(self):
        """Print performance summary"""
        print("\n" + "="*80)
        print("PERFORMANCE SUMMARY")
        print("="*80)
        
        # Group by operation
        by_operation = defaultdict(lambda: defaultdict(list))
        for r in self.results:
            if r['error'] is None and r['time_ms'] != float('inf'):
                by_operation[r['operation']][r['implementation']].append(r['time_ms'])
        
        print(f"{'Operation':<15} {'Best Implementation':<20} {'Avg Time (ms)':<15} {'Speedup':<10}")
        print("-" * 70)
        
        for op, impl_times in sorted(by_operation.items()):
            # Calculate averages
            avg_times = [(impl, statistics.mean(times)) 
                        for impl, times in impl_times.items()]
            avg_times.sort(key=lambda x: x[1])
            
            if avg_times:
                best_impl, best_time = avg_times[0]
                worst_time = avg_times[-1][1]
                speedup = worst_time / best_time if best_time > 0 else 0
                
                print(f"{op:<15} {best_impl:<20} {best_time:<15.3f} {speedup:<10.1f}x")


'''
╺━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╸
             https://kobejean.github.io/cp-library               
'''




def irank(A: list[int], distinct = False):
    P = Packer(len(A)-1); V = P.enumerate(A); V.sort()
    if distinct:
        for r, ai in enumerate(V): a, i = P.dec(ai); A[i], V[r] = r, a
    elif V:
        r, p = -1, V[-1]+1 # set p to unique value to trigger `if a != p` on first elm
        for ai in V:
            a, i = P.dec(ai)
            if a!=p: V[r:=r+1] = p = a
            A[i] = r
        del V[r+1:]
    return V



class Packer:
    __slots__ = 's', 'm'
    def __init__(P, mx: int): P.s = mx.bit_length(); P.m = (1 << P.s) - 1
    def enc(P, a: int, b: int): return a << P.s | b
    def dec(P, x: int) -> tuple[int, int]: return x >> P.s, x & P.m
    def enumerate(P, A, reverse=False): P.ienumerate(A:=list(A), reverse); return A
    def ienumerate(P, A, reverse=False):
        if reverse:
            for i,a in enumerate(A): A[i] = P.enc(-a, i)
        else:
            for i,a in enumerate(A): A[i] = P.enc(a, i)
    def indices(P, A: list[int]): P.iindices(A:=list(A)); return A
    def iindices(P, A):
        for i,a in enumerate(A): A[i] = P.m&a


def max2(a, b):
    return a if a > b else b

def irank(*A: list[int], distinct = False):
    N = mxj = 0
    for Ai in A: N += len(Ai); mxj = max2(mxj, len(Ai))
    P = Packer3(len(A)-1, mxj); V = P.enumerate(A, N); V.sort()
    if distinct:
        for r,aij in enumerate(V):a,i,j=P.dec(aij);A[i][j],V[r]=r,a
    elif V:
        r, p = -1, V[-1]+1 # set p to unique value to trigger `if a != p` on first elm
        for aij in V:
            a,i,j=P.dec(aij)
            if a!=p:V[r:=r+1]=p=a
            A[i][j]=r
        del V[r+1:]
    return V

class Packer3:
    def __init__(P, mxb: int, mxc: int):
        bb, bc = mxb.bit_length(), mxc.bit_length()
        P.mc, P.mb, P.sb, P.sa = (1<<bc)-1, (1<<bb)-1, bc, bc+bb
    def enc(P, a: int, b: int, c: int): return a << P.sa | b << P.sb | c
    def dec(P, x: int) -> tuple[int, int, int]: return x >> P.sa, (x >> P.sb) & P.mb, x & P.mc
    def enumerate(P, A, N, reverse=False): 
        V, k = [0]*N, 0
        if reverse:
            for i,Ai in enumerate(A):
                for j, a in enumerate(Ai):V[k]=P.enc(-a, i, j);k+=1
        else:
            for i,Ai in enumerate(A):
                for j, a in enumerate(Ai):V[k]=P.enc(a, i, j);k+=1
        return V

# Configure benchmark
config = BenchmarkConfig(
    name="rank",
    sizes=[1000000, 100000, 10000, 1000, 100, 10, 1],  # Reverse order to warm up JIT
    operations=['construction', 'random', 'sorted', 'duplicates', 'reverse'],
    iterations=5,
    warmup=3,
    output_dir="./output/benchmark_results/rank"
)

# Create benchmark instance
benchmark = Benchmark(config)

# Data generator
@benchmark.data_generator("default")
def generate_rank_data(size: int, operation: str):
    """Generate ranking data in different patterns"""
    if operation == 'random':
        data = [random.randint(1, size) for _ in range(size)]
    elif operation == 'sorted':
        data = list(range(size))
    elif operation == 'duplicates':
        # Many duplicates (10% unique values)
        unique_count = max(1, size // 10)
        data = [random.randint(1, unique_count) for _ in range(size)]
    elif operation == 'reverse':
        data = list(range(size, 0, -1))
    else:
        raise ValueError(f"Unknown operation: {operation}")
    
    # Pre-initialize data for fair timing (exclude copy overhead)
    preinitialized = {
        'data_copy1': list(data),
        'data_copy2': list(data),
        'distinct': False
    }
    
    return {
        'data': data,
        'distinct': False,
        'size': size,
        'operation': operation,
        'preinitialized': preinitialized
    }

# Construction operation
@benchmark.implementation("irank", "construction")
def construction_irank(data):
    """Construct data copy for irank"""
    data_copy = list(data['data'])
    checksum = 0
    for x in data_copy:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "construction")
def construction_irank_multi(data):
    """Construct data copy for irank_multi"""
    data_copy = list(data['data'])
    checksum = 0
    for x in data_copy:
        checksum ^= x
    return checksum

# Random operation
@benchmark.implementation("irank", "random")
def random_irank(data):
    """Standard irank implementation for random data"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "random")
def random_irank_multi(data):
    """Multi-pass irank implementation for random data"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Sorted operation
@benchmark.implementation("irank", "sorted")
def sorted_irank(data):
    """Standard irank implementation for sorted data"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "sorted")
def sorted_irank_multi(data):
    """Multi-pass irank implementation for sorted data"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Duplicates operation
@benchmark.implementation("irank", "duplicates")
def duplicates_irank(data):
    """Standard irank implementation for data with duplicates"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "duplicates")
def duplicates_irank_multi(data):
    """Multi-pass irank implementation for data with duplicates"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Reverse operation
@benchmark.implementation("irank", "reverse")
def reverse_irank(data):
    """Standard irank implementation for reverse data"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

@benchmark.implementation("irank_multi", "reverse")
def reverse_irank_multi(data):
    """Multi-pass irank implementation for reverse data"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=pre['distinct'])
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Additional benchmark with distinct=True
@benchmark.data_generator("distinct")
def generate_rank_data_distinct(size: int, operation: str):
    """Generate ranking data with distinct=True"""
    base_data = generate_rank_data(size, operation)
    base_data['distinct'] = True
    base_data['preinitialized']['distinct'] = True
    return base_data

def irank_distinct_implementation(data):
    """irank with distinct=True"""
    pre = data['preinitialized']
    result = irank(pre['data_copy1'], distinct=True)
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

def irank_multi_distinct_implementation(data):
    """irank_multi with distinct=True"""
    pre = data['preinitialized']
    result = irank_multi(pre['data_copy2'], distinct=True)
    checksum = 0
    for x in result:
        checksum ^= x
    return checksum

# Custom validator for rank results (now using XOR checksums)
@benchmark.validator("default")
def validate_rank_result(expected, actual):
    """Validate ranking results using XOR checksums"""
    try:
        # Compare XOR checksums directly
        return int(expected) == int(actual)
    except Exception:
        return False

if __name__ == "__main__":
    # Parse command line args and run appropriate mode
    runner = benchmark.parse_args()
    runner.run()
Back to top page