-
Notifications
You must be signed in to change notification settings - Fork 254
Description
Add Log File Support to MCP Gateway
Priority: Medium (Operations/Debugging)
Related: #138
Description:
MCP Gateway currently lacks file-based logging support. The test suite expects logs to be written to logs/mcpgateway.log
, but this functionality is not implemented. All logging currently goes to stdout/stderr only, making it difficult to debug issues in production, perform log analysis, or integrate with log management systems.
Current State:
- Test commands fail:
tail -n 20 logs/mcpgateway.log | grep -c ERROR # File doesn't exist grep "Application startup complete" logs/mcpgateway.log # File doesn't exist
- Logs only available through console output
- No log rotation or retention policies
- Difficult to debug issues after they occur
Expected Behavior:
- Logs should be written to both console and file
- Log files should be created in a configurable directory
- Support for log rotation (size/time based)
- Different log levels for file vs console
- Structured logging format for easier parsing
Suggested Implementation:
-
Update logging configuration (
mcpgateway/utils/logging_config.py
):import os import logging from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler from pathlib import Path from typing import Optional def setup_logging( log_level: str = "INFO", log_file: Optional[str] = None, log_dir: str = "logs", max_bytes: int = 10_485_760, # 10MB backup_count: int = 5, log_format: Optional[str] = None ) -> None: """Configure logging with both console and file handlers""" # Create logs directory if it doesn't exist if log_file: Path(log_dir).mkdir(parents=True, exist_ok=True) log_path = Path(log_dir) / log_file # Default format if not log_format: log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' # Configure root logger root_logger = logging.getLogger() root_logger.setLevel(getattr(logging, log_level.upper())) # Remove existing handlers root_logger.handlers.clear() # Console handler console_handler = logging.StreamHandler() console_handler.setLevel(getattr(logging, log_level.upper())) console_formatter = logging.Formatter(log_format) console_handler.setFormatter(console_formatter) root_logger.addHandler(console_handler) # File handler (if enabled) if log_file: file_handler = RotatingFileHandler( log_path, maxBytes=max_bytes, backupCount=backup_count ) file_handler.setLevel(getattr(logging, log_level.upper())) file_formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s' ) file_handler.setFormatter(file_formatter) root_logger.addHandler(file_handler) # Log startup message logging.info(f"Logging initialized. Writing to: {log_path}") def setup_structured_logging( log_file: Optional[str] = None, log_dir: str = "logs" ) -> None: """Setup JSON structured logging for production""" import json class JSONFormatter(logging.Formatter): def format(self, record): log_data = { 'timestamp': self.formatTime(record, self.datefmt), 'level': record.levelname, 'logger': record.name, 'message': record.getMessage(), 'module': record.module, 'function': record.funcName, 'line': record.lineno } # Add exception info if present if record.exc_info: log_data['exception'] = self.formatException(record.exc_info) # Add extra fields for key, value in record.__dict__.items(): if key not in ['name', 'msg', 'args', 'created', 'filename', 'funcName', 'levelname', 'levelno', 'lineno', 'module', 'exc_info', 'exc_text', 'stack_info']: log_data[key] = value return json.dumps(log_data) # Apply JSON formatter to file handler if log_file: Path(log_dir).mkdir(parents=True, exist_ok=True) log_path = Path(log_dir) / log_file file_handler = RotatingFileHandler( log_path, maxBytes=10_485_760, backupCount=5 ) file_handler.setFormatter(JSONFormatter()) logging.getLogger().addHandler(file_handler)
-
Add environment variables (
.env
):# Logging Configuration MCPGATEWAY_LOG_LEVEL=INFO MCPGATEWAY_LOG_FILE=mcpgateway.log MCPGATEWAY_LOG_DIR=logs MCPGATEWAY_LOG_MAX_BYTES=10485760 # 10MB MCPGATEWAY_LOG_BACKUP_COUNT=5 MCPGATEWAY_LOG_FORMAT=text # text or json
-
Update application initialization (
mcpgateway/main.py
):from mcpgateway.utils.logging_config import setup_logging, setup_structured_logging def create_app(): # Load config log_level = os.getenv("MCPGATEWAY_LOG_LEVEL", "INFO") log_file = os.getenv("MCPGATEWAY_LOG_FILE", "mcpgateway.log") log_dir = os.getenv("MCPGATEWAY_LOG_DIR", "logs") log_format = os.getenv("MCPGATEWAY_LOG_FORMAT", "text") # Setup logging if log_format == "json": setup_structured_logging(log_file=log_file, log_dir=log_dir) else: setup_logging( log_level=log_level, log_file=log_file, log_dir=log_dir, max_bytes=int(os.getenv("MCPGATEWAY_LOG_MAX_BYTES", "10485760")), backup_count=int(os.getenv("MCPGATEWAY_LOG_BACKUP_COUNT", "5")) ) logger = logging.getLogger(__name__) logger.info("Application startup complete") # ... rest of app initialization
-
Add log management utilities:
# mcpgateway/utils/log_utils.py import os from pathlib import Path from datetime import datetime, timedelta def cleanup_old_logs(log_dir: str = "logs", days_to_keep: int = 7): """Remove log files older than specified days""" log_path = Path(log_dir) if not log_path.exists(): return cutoff_date = datetime.now() - timedelta(days=days_to_keep) for log_file in log_path.glob("*.log*"): if log_file.stat().st_mtime < cutoff_date.timestamp(): log_file.unlink() logging.info(f"Removed old log file: {log_file}") def get_log_stats(log_dir: str = "logs") -> dict: """Get statistics about log files""" log_path = Path(log_dir) if not log_path.exists(): return {"error": "Log directory not found"} total_size = 0 file_count = 0 for log_file in log_path.glob("*.log*"): total_size += log_file.stat().st_size file_count += 1 return { "total_size_mb": round(total_size / 1024 / 1024, 2), "file_count": file_count, "directory": str(log_path.absolute()) }
-
Update .gitignore:
# Logs logs/ *.log *.log.*
-
Add log viewing endpoint (optional):
@router.get("/admin/logs/tail") async def tail_logs( lines: int = Query(default=100, le=1000), level: Optional[str] = Query(default=None), current_user: dict = Depends(verify_admin_token) ): """Get recent log entries""" log_file = Path(os.getenv("MCPGATEWAY_LOG_DIR", "logs")) / os.getenv("MCPGATEWAY_LOG_FILE", "mcpgateway.log") if not log_file.exists(): raise HTTPException(status_code=404, detail="Log file not found") # Read last N lines with open(log_file, 'r') as f: lines_list = f.readlines()[-lines:] # Filter by level if specified if level: lines_list = [l for l in lines_list if f" {level.upper()} " in l] return {"logs": lines_list}
Benefits:
- Persistent logs for debugging and auditing
- Log rotation prevents disk space issues
- Structured logging option for log management systems
- Configurable via environment variables
- Supports both development (text) and production (JSON) formats
Testing Requirements:
- Verify log files are created on startup
- Test log rotation when size limit is reached
- Ensure all log levels work correctly
- Test that errors are properly logged to file
- Verify log directory is created if missing
- Test structured logging format
Acceptance Criteria:
- Logs written to
logs/mcpgateway.log
by default - Log rotation implemented (size-based)
- Configurable via environment variables
- Both console and file logging work simultaneously
- Structured logging option available
- Existing tests pass with log files present
- No performance impact from file logging
Related Issues:
- Consider adding support for external logging services (CloudWatch, Stackdriver, etc.)
- May need log shipping for containerized deployments
- Consider adding request ID tracking for distributed tracing