-
Notifications
You must be signed in to change notification settings - Fork 67
Open
Labels
Description
Performance Optimizations
Optimization Goals
Reduce redundant API calls and improve response times by implementing intelligent caching for frequently accessed data.
Current Performance Profile
list_models: ~958ms (due to permission enrichment for 80 models)search_records: 56-269ms depending on data sizeget_record: 21-32ms- Connection setup: 66-186ms
Proposed Optimizations
1. Model Metadata Caching
Cache field definitions and permissions to avoid repeated API calls.
Implementation
# In caching.py or extend existing cache
class ModelMetadataCache:
"""Cache for model metadata with TTL support."""
def __init__(self, ttl: int = 3600): # 1 hour default
self.cache = {}
self.ttl = ttl
self.field_cache = {} # model -> fields info
self.permission_cache = {} # model -> permissions
self.stats = {
"hits": 0,
"misses": 0,
"evictions": 0
}
async def get_fields(self, model: str, connection) -> Dict[str, Dict]:
"""Get field definitions with caching."""
cache_key = f"fields_{model}"
# Check cache
if cache_key in self.field_cache:
entry = self.field_cache[cache_key]
if time.time() - entry["timestamp"] < self.ttl:
self.stats["hits"] += 1
return entry["data"]
else:
# Expired
del self.field_cache[cache_key]
self.stats["evictions"] += 1
# Cache miss - fetch from Odoo
self.stats["misses"] += 1
fields = await connection.fields_get(model)
# Store in cache
self.field_cache[cache_key] = {
"data": fields,
"timestamp": time.time()
}
return fields
async def get_permissions(self, model: str, access_controller) -> Dict[str, bool]:
"""Get model permissions with caching."""
cache_key = f"perms_{model}"
if cache_key in self.permission_cache:
entry = self.permission_cache[cache_key]
if time.time() - entry["timestamp"] < self.ttl:
self.stats["hits"] += 1
return entry["data"]
# Fetch permissions
self.stats["misses"] += 1
perms = await access_controller.check_model_access(model)
self.permission_cache[cache_key] = {
"data": perms,
"timestamp": time.time()
}
return perms
def invalidate(self, model: str = None):
"""Invalidate cache for specific model or all."""
if model:
self.field_cache.pop(f"fields_{model}", None)
self.permission_cache.pop(f"perms_{model}", None)
else:
self.field_cache.clear()
self.permission_cache.clear()
def get_stats(self) -> Dict[str, Any]:
"""Get cache statistics."""
total_requests = self.stats["hits"] + self.stats["misses"]
hit_rate = (self.stats["hits"] / total_requests * 100) if total_requests > 0 else 0
return {
**self.stats,
"hit_rate": f"{hit_rate:.1f}%",
"total_requests": total_requests,
"cache_size": len(self.field_cache) + len(self.permission_cache)
}2. Connection Pooling Enhancement
Optimize connection reuse and health checks.
# In connection_pool.py
class ConnectionPool:
"""Enhanced connection pool with health monitoring."""
def __init__(self, config: OdooConfig, pool_size: int = 5):
self.config = config
self.pool_size = pool_size
self.connections = []
self.available = asyncio.Queue(maxsize=pool_size)
self.health_check_interval = 60 # seconds
self._health_task = None
async def initialize(self):
"""Initialize connection pool."""
for _ in range(self.pool_size):
conn = await self._create_connection()
self.connections.append(conn)
await self.available.put(conn)
# Start health monitoring
self._health_task = asyncio.create_task(self._health_monitor())
async def acquire(self) -> OdooConnection:
"""Acquire connection from pool."""
conn = await self.available.get()
# Quick health check
if not await self._is_healthy(conn):
# Replace unhealthy connection
await self._replace_connection(conn)
conn = await self.available.get()
return conn
async def release(self, conn: OdooConnection):
"""Return connection to pool."""
await self.available.put(conn)
async def _health_monitor(self):
"""Periodic health checks."""
while True:
await asyncio.sleep(self.health_check_interval)
# Check each connection
for i, conn in enumerate(self.connections):
if not await self._is_healthy(conn):
self.connections[i] = await self._create_connection()3. Request Batching
Batch multiple operations for efficiency.
# In batching.py
class RequestBatcher:
"""Batch multiple requests for efficiency."""
def __init__(self, batch_size: int = 100, batch_timeout: float = 0.1):
self.batch_size = batch_size
self.batch_timeout = batch_timeout
self.pending_reads = defaultdict(list)
self.pending_searches = defaultdict(list)
async def batch_read(
self,
model: str,
record_ids: List[int],
fields: List[str] = None
) -> List[Dict]:
"""Batch read requests."""
# Group by model and fields
key = (model, tuple(fields) if fields else None)
# Add to pending
future = asyncio.Future()
self.pending_reads[key].append((record_ids, future))
# Process if batch is full
if len(self.pending_reads[key]) >= self.batch_size:
await self._process_read_batch(key)
else:
# Schedule batch processing
asyncio.create_task(self._delayed_process_read(key))
return await future
async def _process_read_batch(self, key):
"""Process a batch of read requests."""
model, fields = key
batch = self.pending_reads.pop(key, [])
if not batch:
return
# Collect all record IDs
all_ids = []
futures_map = {}
for ids, future in batch:
for record_id in ids:
all_ids.append(record_id)
futures_map.setdefault(record_id, []).append(future)
# Single batched read
try:
results = await self.connection.read(model, all_ids, fields)
# Distribute results
result_map = {r["id"]: r for r in results}
for record_id, futures in futures_map.items():
result = result_map.get(record_id)
for future in futures:
if not future.done():
if result:
future.set_result([result])
else:
future.set_exception(
Exception(f"Record {record_id} not found")
)
except Exception as e:
# Propagate error to all futures
for futures in futures_map.values():
for future in futures:
if not future.done():
future.set_exception(e)4. Smart Prefetching
Prefetch related data based on access patterns.
# In prefetching.py
class SmartPrefetcher:
"""Prefetch related data based on patterns."""
def __init__(self):
self.access_patterns = defaultdict(lambda: defaultdict(int))
self.prefetch_rules = {}
def record_access(self, model: str, fields: List[str]):
"""Record field access patterns."""
for field in fields:
self.access_patterns[model][field] += 1
def analyze_patterns(self, model: str) -> List[str]:
"""Analyze and suggest fields to prefetch."""
if model not in self.access_patterns:
return []
# Get frequently accessed fields
field_counts = self.access_patterns[model]
total_accesses = sum(field_counts.values())
# Fields accessed >50% of the time
frequent_fields = [
field for field, count in field_counts.items()
if count / total_accesses > 0.5
]
return frequent_fields
async def prefetch_related(
self,
model: str,
record_id: int,
requested_fields: List[str]
) -> Dict[str, Any]:
"""Prefetch related data."""
# Get suggested fields
suggested = self.analyze_patterns(model)
# Combine with requested
all_fields = list(set(requested_fields + suggested))
# Fetch all at once
return await self.connection.read(model, [record_id], all_fields)[0]5. Response Compression
Compress large responses to reduce bandwidth.
# In compression.py
import gzip
import json
class ResponseCompressor:
"""Compress large responses."""
def __init__(self, threshold: int = 10240): # 10KB
self.threshold = threshold
self.stats = {
"compressed": 0,
"uncompressed": 0,
"bytes_saved": 0
}
def should_compress(self, data: Any) -> bool:
"""Check if response should be compressed."""
json_str = json.dumps(data)
return len(json_str.encode()) > self.threshold
def compress(self, data: Any) -> Tuple[bytes, Dict[str, str]]:
"""Compress response data."""
json_str = json.dumps(data)
original_size = len(json_str.encode())
compressed = gzip.compress(json_str.encode())
compressed_size = len(compressed)
self.stats["compressed"] += 1
self.stats["bytes_saved"] += original_size - compressed_size
return compressed, {
"Content-Encoding": "gzip",
"X-Original-Size": str(original_size),
"X-Compressed-Size": str(compressed_size)
}Performance Metrics
Before Optimization
- Cold start: 1-2 seconds
- Repeated model access: 958ms each
- Large record fetch: 500ms+
Expected After Optimization
- Warm cache: <100ms for model info
- Batched reads: 30-50% reduction
- Compressed responses: 60-80% size reduction
Implementation Plan
- Phase 1: Model metadata caching (highest impact)
- Phase 2: Request batching for bulk operations
- Phase 3: Connection pooling improvements
- Phase 4: Smart prefetching based on patterns
- Phase 5: Response compression
Testing Requirements
- Cache hit rate > 80% in typical usage
- No memory leaks from caching
- Correct cache invalidation
- Batching doesn't delay single requests
- Compression works with all clients
- Performance benchmarks show improvement
Success Metrics
- 50% reduction in
list_modelstime (958ms → ~450ms) - 80% cache hit rate for field definitions
- 30% reduction in average response time
- No increase in error rates
- Memory usage stays reasonable
Configuration
# In config.py
PERFORMANCE_CONFIG = {
'cache_ttl': int(os.getenv('ODOO_MCP_CACHE_TTL', '3600')),
'enable_batching': os.getenv('ODOO_MCP_ENABLE_BATCHING', 'true').lower() == 'true',
'batch_size': int(os.getenv('ODOO_MCP_BATCH_SIZE', '100')),
'compression_threshold': int(os.getenv('ODOO_MCP_COMPRESSION_THRESHOLD', '10240')),
'connection_pool_size': int(os.getenv('ODOO_MCP_POOL_SIZE', '5'))
}Priority
Low - Current performance is acceptable. These optimizations would improve user experience but aren't critical for functionality.