This documentation is automatically generated by online-judge-tools/verification-helper
#!/usr/bin/env python3
"""
Benchmark comparing view2 (dual-array view) vs tuple list.
Tests tuple access, iteration, sorting, modification, and append/pop operations.
"""
import random
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cp_library.perf.benchmark import Benchmark, BenchmarkConfig
from cp_library.ds.view.view2_cls import view2
# Configure benchmark
config = BenchmarkConfig(
name="view2",
sizes=[1000000, 100000, 10000, 1000, 100], # Reverse order to warm up JIT
operations=['tuple_access', 'iteration', 'sorting', 'modification', 'append_pop'],
iterations=10,
warmup=3,
output_dir="./output/benchmark_results/view2"
)
# Create benchmark instance
benchmark = Benchmark(config)
# Data generator
@benchmark.data_generator("default")
def generate_view2_data(size: int, operation: str):
"""Generate test data for view2 operations"""
# Generate parallel arrays
A = [random.randint(1, 1000000) for _ in range(size)]
B = [random.randint(1, 1000000) for _ in range(size)]
# Create view2 covering full range
view2 = view2(A.copy(), B.copy(), 0, size)
# Create equivalent data structures
tuple_list = [(A[i], B[i]) for i in range(size)]
return {
'view2': view2,
'tuple_list': tuple_list,
'A': A.copy(),
'B': B.copy(),
'size': size
}
# Setup functions for operations that modify data
@benchmark.setup("view2", ["modification", "sorting", "append_pop"])
def setup_view2_modify(data):
"""Setup function that copies view2 data before modification"""
new_data = data.copy()
A_copy = data['A'].copy()
B_copy = data['B'].copy()
new_data['view2'] = view2(A_copy, B_copy, 0, data['size'])
return new_data
@benchmark.setup("tuple_list", ["modification", "sorting", "append_pop"])
def setup_tuple_list_modify(data):
"""Setup function that copies tuple list before modification"""
new_data = data.copy()
new_data['tuple_list'] = data['tuple_list'].copy()
return new_data
# Tuple access operation
@benchmark.implementation("view2", "tuple_access")
def tuple_access_view2(data):
"""Access tuples using view2[i]"""
view2 = data['view2']
checksum = 0
for i in range(len(view2)):
a, b = view2[i]
checksum ^= a ^ b
return checksum
@benchmark.implementation("tuple_list", "tuple_access")
def tuple_access_tuple_list(data):
"""Access tuples using list[i]"""
tuple_list = data['tuple_list']
checksum = 0
for i in range(len(tuple_list)):
a, b = tuple_list[i]
checksum ^= a ^ b
return checksum
# Iteration operation
@benchmark.implementation("view2", "iteration")
def iteration_view2(data):
"""Iterate through view2 using for-in (no __iter__)"""
view2 = data['view2']
checksum = 0
for a, b in view2: # Uses __getitem__ with IndexError
checksum ^= a ^ b
return checksum
@benchmark.implementation("tuple_list", "iteration")
def iteration_tuple_list(data):
"""Iterate through tuple list using for-in"""
tuple_list = data['tuple_list']
checksum = 0
for a, b in tuple_list:
checksum ^= a ^ b
return checksum
# Sorting operation
@benchmark.implementation("view2", "sorting")
def sorting_view2(data):
"""Sort view2 using isort_ranged"""
view2 = data['view2']
view2.sort() # Uses isort_ranged on view range
checksum = 0
for i in range(min(100, len(view2))):
a, b = view2[i]
checksum ^= a ^ b
return checksum
@benchmark.implementation("tuple_list", "sorting")
def sorting_tuple_list(data):
"""Sort tuple list by first element"""
tuple_list = data['tuple_list']
tuple_list.sort(key=lambda x: x[0])
checksum = 0
for i in range(min(100, len(tuple_list))):
a, b = tuple_list[i]
checksum ^= a ^ b
return checksum
# Modification operation
@benchmark.implementation("view2", "modification")
def modification_view2(data):
"""Modify view2 elements using __setitem__"""
view2 = data['view2']
checksum = 0
for i in range(len(view2)):
a, b = view2[i]
new_a = (a * 2) & 0xFFFFFFFF
new_b = (b * 3) & 0xFFFFFFFF
view2[i] = (new_a, new_b)
checksum ^= new_a ^ new_b
return checksum
@benchmark.implementation("tuple_list", "modification")
def modification_tuple_list(data):
"""Modify tuple list elements"""
tuple_list = data['tuple_list']
checksum = 0
for i in range(len(tuple_list)):
a, b = tuple_list[i]
new_a = (a * 2) & 0xFFFFFFFF
new_b = (b * 3) & 0xFFFFFFFF
tuple_list[i] = (new_a, new_b)
checksum ^= new_a ^ new_b
return checksum
# Append/pop operation
@benchmark.implementation("view2", "append_pop")
def append_pop_view2(data):
"""Test view2 append/pop operations"""
view2 = data['view2']
checksum = 0
operations = min(1000, len(view2) // 10)
# Pop from end and append back
for _ in range(operations):
a, b = view2.pop()
checksum ^= a ^ b
for i in range(operations):
view2.append((i + 1000, i + 2000))
checksum ^= (i + 1000) ^ (i + 2000)
return checksum
@benchmark.implementation("tuple_list", "append_pop")
def append_pop_tuple_list(data):
"""Test tuple list append/pop operations"""
tuple_list = data['tuple_list']
checksum = 0
operations = min(1000, len(tuple_list) // 10)
# Pop from end and append back
for _ in range(operations):
a, b = tuple_list.pop()
checksum ^= a ^ b
for i in range(operations):
tuple_list.append((i + 1000, i + 2000))
checksum ^= (i + 1000) ^ (i + 2000)
return checksum
if __name__ == "__main__":
# Parse command line args and run appropriate mode
runner = benchmark.parse_args()
runner.run()
#!/usr/bin/env python3
"""
Benchmark comparing view2 (dual-array view) vs tuple list.
Tests tuple access, iteration, sorting, modification, and append/pop operations.
"""
import random
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
"""
Declarative benchmark framework with minimal boilerplate.
Features:
- Decorator-based benchmark registration
- Automatic data generation and validation
- Built-in timing with warmup
- Configurable operations and sizes
- JSON results and matplotlib plotting
"""
import time
import json
import statistics
import argparse
from typing import Dict, List, Any, Callable, Union
from dataclasses import dataclass
from pathlib import Path
from collections import defaultdict
@dataclass
class BenchmarkConfig:
"""Configuration for benchmark runs"""
name: str
sizes: List[int] = None
operations: List[str] = None
iterations: int = 10
warmup: int = 2
output_dir: str = "./output/benchmark_results"
save_results: bool = True
plot_results: bool = True
plot_scale: str = "loglog" # Options: "loglog", "linear", "semilogx", "semilogy"
progressive: bool = True # Show results operation by operation across sizes
# Profiling mode
profile_mode: bool = False
profile_size: int = None
profile_operation: str = None
profile_implementation: str = None
def __post_init__(self):
if self.sizes is None:
self.sizes = [100, 1000, 10000, 100000]
if self.operations is None:
self.operations = ['default']
class Benchmark:
"""Declarative benchmark framework using decorators"""
def __init__(self, config: BenchmarkConfig):
self.config = config
self.data_generators = {}
self.implementations = {}
self.validators = {}
self.setups = {}
self.results = []
def profile(self, operation: str = None, size: int = None, implementation: str = None):
"""Create a profiling version of this benchmark"""
profile_config = BenchmarkConfig(
name=f"{self.config.name}_profile",
sizes=self.config.sizes,
operations=self.config.operations,
profile_mode=True,
profile_operation=operation,
profile_size=size,
profile_implementation=implementation,
save_results=False,
plot_results=False
)
profile_benchmark = Benchmark(profile_config)
profile_benchmark.data_generators = self.data_generators
profile_benchmark.implementations = self.implementations
profile_benchmark.validators = self.validators
profile_benchmark.setups = self.setups
return profile_benchmark
def parse_args(self):
"""Parse command line arguments for profiling mode"""
parser = argparse.ArgumentParser(
description=f"Benchmark {self.config.name} with optional profiling mode",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Normal benchmark mode
python benchmark.py
# Profile specific operation and implementation
python benchmark.py --profile --operation random_access --implementation grid
# Profile with specific size
python benchmark.py --profile --size 1000000
# Profile all implementations of an operation
python benchmark.py --profile --operation construction
"""
)
parser.add_argument('--profile', action='store_true',
help='Run in profiling mode (minimal overhead for profilers)')
parser.add_argument('--operation', type=str,
help=f'Operation to profile. Options: {", ".join(self.config.operations)}')
parser.add_argument('--size', type=int,
help=f'Size to profile. Options: {", ".join(map(str, self.config.sizes))}')
parser.add_argument('--implementation', type=str,
help='Specific implementation to profile (default: all)')
args = parser.parse_args()
# If profile mode requested, return a profiling benchmark
if args.profile:
return self.profile(
operation=args.operation,
size=args.size,
implementation=args.implementation
)
# Otherwise return self for normal mode
return self
def data_generator(self, name: str = "default"):
"""Decorator to register data generator"""
def decorator(func):
self.data_generators[name] = func
return func
return decorator
def implementation(self, name: str, operations: Union[str, List[str]] = None):
"""Decorator to register implementation"""
if operations is None:
operations = ['default']
elif isinstance(operations, str):
operations = [operations]
def decorator(func):
for op in operations:
if op not in self.implementations:
self.implementations[op] = {}
self.implementations[op][name] = func
return func
return decorator
def validator(self, operation: str = "default"):
"""Decorator to register custom validator"""
def decorator(func):
self.validators[operation] = func
return func
return decorator
def setup(self, name: str, operations: Union[str, List[str]] = None):
"""Decorator to register setup function that runs before timing"""
if operations is None:
operations = ['default']
elif isinstance(operations, str):
operations = [operations]
def decorator(func):
for op in operations:
if op not in self.setups:
self.setups[op] = {}
self.setups[op][name] = func
return func
return decorator
def measure_time(self, func: Callable, data: Any, setup_func: Callable = None) -> tuple[Any, float]:
"""Measure execution time with warmup and optional setup"""
# Warmup runs
for _ in range(self.config.warmup):
try:
if setup_func:
setup_data = setup_func(data)
func(setup_data)
else:
func(data)
except Exception:
# If warmup fails, let the main measurement handle the error
break
# Actual measurement
start = time.perf_counter()
for _ in range(self.config.iterations):
if setup_func:
setup_data = setup_func(data)
result = func(setup_data)
else:
result = func(data)
elapsed_ms = (time.perf_counter() - start) * 1000 / self.config.iterations
return result, elapsed_ms
def validate_result(self, expected: Any, actual: Any, operation: str) -> bool:
"""Validate result using custom validator or default comparison"""
if operation in self.validators:
return self.validators[operation](expected, actual)
return expected == actual
def run(self):
"""Run all benchmarks"""
if self.config.profile_mode:
self._run_profile_mode()
else:
self._run_normal_mode()
def _run_normal_mode(self):
"""Run normal benchmark mode"""
print(f"Running {self.config.name}")
print(f"Sizes: {self.config.sizes}")
print(f"Operations: {self.config.operations}")
print("="*80)
# Always show progressive results: operation by operation across all sizes
for operation in self.config.operations:
for size in self.config.sizes:
self._run_single(operation, size)
# Save and plot results
if self.config.save_results:
self._save_results()
if self.config.plot_results:
self._plot_results()
# Print summary
self._print_summary()
def _run_profile_mode(self):
"""Run profiling mode with minimal overhead for use with vmprof"""
operation = self.config.profile_operation or self.config.operations[0]
size = self.config.profile_size or max(self.config.sizes)
impl_name = self.config.profile_implementation
print(f"PROFILING MODE: {self.config.name}")
print(f"Operation: {operation}, Size: {size}")
if impl_name:
print(f"Implementation: {impl_name}")
print("="*80)
print("Run with vmprof: vmprof --web " + ' '.join(sys.argv))
print("="*80)
# Generate test data
generator = self.data_generators.get(operation, self.data_generators.get('default'))
if not generator:
raise ValueError(f"No data generator for operation: {operation}")
test_data = generator(size, operation)
# Get implementations
impls = self.implementations.get(operation, {})
if not impls:
raise ValueError(f"No implementations for operation: {operation}")
# Filter to specific implementation if requested
if impl_name:
if impl_name not in impls:
raise ValueError(f"Implementation '{impl_name}' not found for operation '{operation}'")
impls = {impl_name: impls[impl_name]}
# Run with minimal overhead - no timing, no validation
for name, func in impls.items():
print(f"\nRunning {name}...")
sys.stdout.flush()
# Setup if needed
setup_func = self.setups.get(operation, {}).get(name)
if setup_func:
data = setup_func(test_data)
else:
data = test_data
# Run the actual function (this is what vmprof will profile)
result = func(data)
print(f"Completed {name}, result checksum: {result}")
sys.stdout.flush()
def _run_single(self, operation: str, size: int):
"""Run a single operation/size combination"""
print(f"\nOperation: {operation}, Size: {size}")
print("-" * 50)
sys.stdout.flush()
# Generate test data
generator = self.data_generators.get(operation,
self.data_generators.get('default'))
if not generator:
raise ValueError(f"No data generator for operation: {operation}")
test_data = generator(size, operation)
# Get implementations for this operation
impls = self.implementations.get(operation, {})
if not impls:
print(f"No implementations for operation: {operation}")
return
# Get setup functions for this operation
setups = self.setups.get(operation, {})
# Run reference implementation first
ref_name, ref_impl = next(iter(impls.items()))
ref_setup = setups.get(ref_name)
expected_result, _ = self.measure_time(ref_impl, test_data, ref_setup)
# Run all implementations
for impl_name, impl_func in impls.items():
try:
setup_func = setups.get(impl_name)
result, time_ms = self.measure_time(impl_func, test_data, setup_func)
correct = self.validate_result(expected_result, result, operation)
# Store result
self.results.append({
'operation': operation,
'size': size,
'implementation': impl_name,
'time_ms': time_ms,
'correct': correct,
'error': None
})
status = "OK" if correct else "FAIL"
print(f" {impl_name:<20} {time_ms:>8.3f} ms {status}")
sys.stdout.flush()
except Exception as e:
self.results.append({
'operation': operation,
'size': size,
'implementation': impl_name,
'time_ms': float('inf'),
'correct': False,
'error': str(e)
})
print(f" {impl_name:<20} ERROR: {str(e)[:40]}")
sys.stdout.flush()
def _save_results(self):
"""Save results to JSON"""
output_dir = Path(self.config.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
filename = output_dir / f"{self.config.name}_{int(time.time())}.json"
with open(filename, 'w') as f:
json.dump(self.results, f, indent=2)
print(f"\nResults saved to {filename}")
def _plot_results(self):
"""Generate plots using matplotlib if available"""
try:
import matplotlib.pyplot as plt
output_dir = Path(self.config.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# Group and prepare data for plotting
data_by_op = self._group_results_by_operation()
# Create plots for each operation
for operation, operation_data in data_by_op.items():
self._create_performance_plot(plt, operation, operation_data, output_dir)
except ImportError:
print("Matplotlib not available - skipping plots")
except Exception as e:
print(f"Plotting failed: {e}")
def _group_results_by_operation(self) -> Dict[str, Dict[int, List[Dict[str, Any]]]]:
"""Group results by operation and size for plotting"""
data_by_op = defaultdict(lambda: defaultdict(list))
for r in self.results:
if r['time_ms'] != float('inf') and r['correct']:
data_by_op[r['operation']][r['size']].append({
'implementation': r['implementation'],
'time_ms': r['time_ms']
})
return data_by_op
def _create_performance_plot(self, plt, operation: str, operation_data: Dict[int, List[Dict[str, Any]]], output_dir: Path):
"""Create a performance plot for a single operation"""
sizes = sorted(operation_data.keys())
implementations = set()
for size_data in operation_data.values():
for entry in size_data:
implementations.add(entry['implementation'])
implementations = sorted(implementations)
plt.figure(figsize=(10, 6))
for impl in implementations:
impl_times = []
impl_sizes = []
for size in sizes:
times = [entry['time_ms'] for entry in operation_data[size]
if entry['implementation'] == impl]
if times:
impl_times.append(statistics.mean(times))
impl_sizes.append(size)
if impl_times:
plt.plot(impl_sizes, impl_times, 'o-', label=impl)
plt.xlabel('Input Size')
plt.ylabel('Time (ms)')
plt.title(f'{self.config.name} - {operation} Operation')
plt.legend()
plt.grid(True, alpha=0.3)
# Apply the configured scaling
if self.config.plot_scale == "loglog":
plt.loglog()
elif self.config.plot_scale == "linear":
pass # Default linear scale
elif self.config.plot_scale == "semilogx":
plt.semilogx()
elif self.config.plot_scale == "semilogy":
plt.semilogy()
else:
# Default to loglog if invalid option
plt.loglog()
plot_file = output_dir / f"{self.config.name}_{operation}_performance.png"
plt.savefig(plot_file, dpi=300, bbox_inches='tight')
plt.close()
print(f"Plot saved: {plot_file}")
def _print_summary(self):
"""Print performance summary"""
print("\n" + "="*80)
print("PERFORMANCE SUMMARY")
print("="*80)
# Group by operation
by_operation = defaultdict(lambda: defaultdict(list))
for r in self.results:
if r['error'] is None and r['time_ms'] != float('inf'):
by_operation[r['operation']][r['implementation']].append(r['time_ms'])
print(f"{'Operation':<15} {'Best Implementation':<20} {'Avg Time (ms)':<15} {'Speedup':<10}")
print("-" * 70)
for op, impl_times in sorted(by_operation.items()):
# Calculate averages
avg_times = [(impl, statistics.mean(times))
for impl, times in impl_times.items()]
avg_times.sort(key=lambda x: x[1])
if avg_times:
best_impl, best_time = avg_times[0]
worst_time = avg_times[-1][1]
speedup = worst_time / best_time if best_time > 0 else 0
print(f"{op:<15} {best_impl:<20} {best_time:<15.3f} {speedup:<10.1f}x")
'''
╺━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╸
https://kobejean.github.io/cp-library
'''
def argsort_ranged(A: list[int], l: int, r: int, reverse=False):
P = Packer(r-l-1); I = [A[l+i] for i in range(r-l)]; P.ienumerate(I, reverse); I.sort()
for i in range(r-l): I[i] = (I[i] & P.m) + l
return I
class Packer:
__slots__ = 's', 'm'
def __init__(P, mx: int): P.s = mx.bit_length(); P.m = (1 << P.s) - 1
def enc(P, a: int, b: int): return a << P.s | b
def dec(P, x: int) -> tuple[int, int]: return x >> P.s, x & P.m
def enumerate(P, A, reverse=False): P.ienumerate(A:=list(A), reverse); return A
def ienumerate(P, A, reverse=False):
if reverse:
for i,a in enumerate(A): A[i] = P.enc(-a, i)
else:
for i,a in enumerate(A): A[i] = P.enc(a, i)
def indices(P, A: list[int]): P.iindices(A:=list(A)); return A
def iindices(P, A):
for i,a in enumerate(A): A[i] = P.m&a
def isort_ranged(*L: list, l: int, r: int, reverse=False):
n = r - l
order = argsort_ranged(L[0], l, r, reverse=reverse)
inv = [0] * n
# order contains indices in range [l, r), need to map to [0, n)
for i in range(n): inv[order[i]-l] = i
for i in range(n):
j = order[i] - l # j is in range [0, n)
for A in L: A[l+i], A[l+j] = A[l+j], A[l+i]
order[inv[i]], order[inv[j]] = order[inv[j]], order[inv[i]]
inv[i], inv[j] = inv[j], inv[i]
return L
from typing import Generic
from typing import TypeVar
_S = TypeVar('S')
_T = TypeVar('T')
_U = TypeVar('U')
class view2(Generic[_S, _T]):
__slots__ = 'A', 'B', 'l', 'r'
def __init__(V, A: list[_S], B: list[_T], l: int, r: int): V.A, V.B, V.l, V.r = A, B, l, r
def __len__(V): return V.r - V.l
def __getitem__(V, i: int):
if 0 <= i < V.r - V.l: return V.A[V.l+i], V.B[V.l+i]
else: raise IndexError
def __setitem__(V, i: int, v: tuple[_S, _T]): V.A[V.l+i], V.B[V.l+i] = v
def __contains__(V, v: tuple[_S, _T]): raise NotImplemented
def set_range(V, l: int, r: int): V.l, V.r = l, r
def index(V, v: tuple[_S, _T]): raise NotImplemented
def reverse(V):
l, r = V.l, V.r-1
while l < r: V.A[l], V.A[r] = V.A[r], V.A[l]; V.B[l], V.B[r] = V.B[r], V.B[l]; l += 1; r -= 1
def sort(V, reverse=False): isort_ranged(V.A, V.B, l=V.l, r=V.r, reverse=reverse)
def pop(V): V.r -= 1; return V.A[V.r], V.B[V.r]
def append(V, v: tuple[_S, _T]): V.A[V.r], V.B[V.r] = v; V.r += 1
def popleft(V): V.l += 1; return V.A[V.l-1], V.B[V.l-1]
def appendleft(V, v: tuple[_S, _T]): V.l -= 1; V.A[V.l], V.B[V.l] = v;
def validate(V): return 0 <= V.l <= V.r <= len(V.A)
# Configure benchmark
config = BenchmarkConfig(
name="view2",
sizes=[1000000, 100000, 10000, 1000, 100], # Reverse order to warm up JIT
operations=['tuple_access', 'iteration', 'sorting', 'modification', 'append_pop'],
iterations=10,
warmup=3,
output_dir="./output/benchmark_results/view2"
)
# Create benchmark instance
benchmark = Benchmark(config)
# Data generator
@benchmark.data_generator("default")
def generate_view2_data(size: int, operation: str):
"""Generate test data for view2 operations"""
# Generate parallel arrays
A = [random.randint(1, 1000000) for _ in range(size)]
B = [random.randint(1, 1000000) for _ in range(size)]
# Create view2 covering full range
view2 = view2(A.copy(), B.copy(), 0, size)
# Create equivalent data structures
tuple_list = [(A[i], B[i]) for i in range(size)]
return {
'view2': view2,
'tuple_list': tuple_list,
'A': A.copy(),
'B': B.copy(),
'size': size
}
# Setup functions for operations that modify data
@benchmark.setup("view2", ["modification", "sorting", "append_pop"])
def setup_view2_modify(data):
"""Setup function that copies view2 data before modification"""
new_data = data.copy()
A_copy = data['A'].copy()
B_copy = data['B'].copy()
new_data['view2'] = view2(A_copy, B_copy, 0, data['size'])
return new_data
@benchmark.setup("tuple_list", ["modification", "sorting", "append_pop"])
def setup_tuple_list_modify(data):
"""Setup function that copies tuple list before modification"""
new_data = data.copy()
new_data['tuple_list'] = data['tuple_list'].copy()
return new_data
# Tuple access operation
@benchmark.implementation("view2", "tuple_access")
def tuple_access_view2(data):
"""Access tuples using view2[i]"""
view2 = data['view2']
checksum = 0
for i in range(len(view2)):
a, b = view2[i]
checksum ^= a ^ b
return checksum
@benchmark.implementation("tuple_list", "tuple_access")
def tuple_access_tuple_list(data):
"""Access tuples using list[i]"""
tuple_list = data['tuple_list']
checksum = 0
for i in range(len(tuple_list)):
a, b = tuple_list[i]
checksum ^= a ^ b
return checksum
# Iteration operation
@benchmark.implementation("view2", "iteration")
def iteration_view2(data):
"""Iterate through view2 using for-in (no __iter__)"""
view2 = data['view2']
checksum = 0
for a, b in view2: # Uses __getitem__ with IndexError
checksum ^= a ^ b
return checksum
@benchmark.implementation("tuple_list", "iteration")
def iteration_tuple_list(data):
"""Iterate through tuple list using for-in"""
tuple_list = data['tuple_list']
checksum = 0
for a, b in tuple_list:
checksum ^= a ^ b
return checksum
# Sorting operation
@benchmark.implementation("view2", "sorting")
def sorting_view2(data):
"""Sort view2 using isort_ranged"""
view2 = data['view2']
view2.sort() # Uses isort_ranged on view range
checksum = 0
for i in range(min(100, len(view2))):
a, b = view2[i]
checksum ^= a ^ b
return checksum
@benchmark.implementation("tuple_list", "sorting")
def sorting_tuple_list(data):
"""Sort tuple list by first element"""
tuple_list = data['tuple_list']
tuple_list.sort(key=lambda x: x[0])
checksum = 0
for i in range(min(100, len(tuple_list))):
a, b = tuple_list[i]
checksum ^= a ^ b
return checksum
# Modification operation
@benchmark.implementation("view2", "modification")
def modification_view2(data):
"""Modify view2 elements using __setitem__"""
view2 = data['view2']
checksum = 0
for i in range(len(view2)):
a, b = view2[i]
new_a = (a * 2) & 0xFFFFFFFF
new_b = (b * 3) & 0xFFFFFFFF
view2[i] = (new_a, new_b)
checksum ^= new_a ^ new_b
return checksum
@benchmark.implementation("tuple_list", "modification")
def modification_tuple_list(data):
"""Modify tuple list elements"""
tuple_list = data['tuple_list']
checksum = 0
for i in range(len(tuple_list)):
a, b = tuple_list[i]
new_a = (a * 2) & 0xFFFFFFFF
new_b = (b * 3) & 0xFFFFFFFF
tuple_list[i] = (new_a, new_b)
checksum ^= new_a ^ new_b
return checksum
# Append/pop operation
@benchmark.implementation("view2", "append_pop")
def append_pop_view2(data):
"""Test view2 append/pop operations"""
view2 = data['view2']
checksum = 0
operations = min(1000, len(view2) // 10)
# Pop from end and append back
for _ in range(operations):
a, b = view2.pop()
checksum ^= a ^ b
for i in range(operations):
view2.append((i + 1000, i + 2000))
checksum ^= (i + 1000) ^ (i + 2000)
return checksum
@benchmark.implementation("tuple_list", "append_pop")
def append_pop_tuple_list(data):
"""Test tuple list append/pop operations"""
tuple_list = data['tuple_list']
checksum = 0
operations = min(1000, len(tuple_list) // 10)
# Pop from end and append back
for _ in range(operations):
a, b = tuple_list.pop()
checksum ^= a ^ b
for i in range(operations):
tuple_list.append((i + 1000, i + 2000))
checksum ^= (i + 1000) ^ (i + 2000)
return checksum
if __name__ == "__main__":
# Parse command line args and run appropriate mode
runner = benchmark.parse_args()
runner.run()