初始化

This commit is contained in:
lhx
2025-12-12 10:57:31 +08:00
commit f8e85beba1
38 changed files with 2146 additions and 0 deletions

3
app/core/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from .config import settings
from .database import get_railway_db, get_tunnel_db, railway_engine, tunnel_engine
from .logging_config import setup_logging, get_logger

40
app/core/config.py Normal file
View File

@@ -0,0 +1,40 @@
from pydantic_settings import BaseSettings
from functools import lru_cache
class Settings(BaseSettings):
# 应用配置
APP_HOST: str = "0.0.0.0"
APP_PORT: int = 8000
APP_DEBUG: bool = True
# Railway数据库配置
RAILWAY_DB_HOST: str = "localhost"
RAILWAY_DB_PORT: int = 3306
RAILWAY_DB_USER: str = "root"
RAILWAY_DB_PASSWORD: str = ""
RAILWAY_DB_NAME: str = "railway"
# Tunnel数据库配置
TUNNEL_DB_HOST: str = "localhost"
TUNNEL_DB_PORT: int = 3306
TUNNEL_DB_USER: str = "root"
TUNNEL_DB_PASSWORD: str = ""
TUNNEL_DB_NAME: str = "Tunnel"
@property
def RAILWAY_DATABASE_URL(self) -> str:
return f"mysql+pymysql://{self.RAILWAY_DB_USER}:{self.RAILWAY_DB_PASSWORD}@{self.RAILWAY_DB_HOST}:{self.RAILWAY_DB_PORT}/{self.RAILWAY_DB_NAME}?charset=utf8mb4"
@property
def TUNNEL_DATABASE_URL(self) -> str:
return f"mysql+pymysql://{self.TUNNEL_DB_USER}:{self.TUNNEL_DB_PASSWORD}@{self.TUNNEL_DB_HOST}:{self.TUNNEL_DB_PORT}/{self.TUNNEL_DB_NAME}?charset=utf8mb4"
class Config:
env_file = ".env"
extra = "ignore"
@lru_cache()
def get_settings():
return Settings()
settings = get_settings()

58
app/core/database.py Normal file
View File

@@ -0,0 +1,58 @@
from sqlalchemy import create_engine, text, MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
from .config import settings
# Railway数据库引擎账号表
railway_engine = create_engine(
settings.RAILWAY_DATABASE_URL,
poolclass=QueuePool,
pool_pre_ping=True,
echo=False,
pool_size=20,
max_overflow=30,
pool_timeout=60,
pool_recycle=3600,
pool_reset_on_return='commit'
)
# Tunnel数据库引擎业务数据表
tunnel_engine = create_engine(
settings.TUNNEL_DATABASE_URL,
poolclass=QueuePool,
pool_pre_ping=True,
echo=False,
pool_size=100,
max_overflow=200,
pool_timeout=60,
pool_recycle=3600,
pool_reset_on_return='commit'
)
RailwaySessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=railway_engine)
TunnelSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=tunnel_engine)
RailwayBase = declarative_base()
TunnelBase = declarative_base()
def get_railway_db():
"""Railway数据库依赖注入"""
db = RailwaySessionLocal()
try:
yield db
finally:
db.close()
def get_tunnel_db():
"""Tunnel数据库依赖注入"""
db = TunnelSessionLocal()
try:
yield db
finally:
db.close()
def init_db():
"""初始化数据库"""
RailwayBase.metadata.create_all(bind=railway_engine)
TunnelBase.metadata.create_all(bind=tunnel_engine)

165
app/core/db_monitor.py Normal file
View File

@@ -0,0 +1,165 @@
"""数据库连接池监控模块"""
import time
import threading
from typing import Dict, Any
from datetime import datetime
from sqlalchemy import event
from sqlalchemy.engine import Engine
from .logging_config import get_logger
logger = get_logger(__name__)
db_logger = get_logger("sqlalchemy.engine")
# 全局监控数据
_pool_stats = {
'total_connections': 0,
'checked_in': 0,
'checked_out': 0,
'overflow': 0,
'slow_queries': [],
'connection_errors': [],
'peak_connections': 0,
'last_reset': datetime.now()
}
_alert_thresholds = {
'pool_usage_percent': 80,
'slow_query_time': 5.0,
'alert_cooldown': 300
}
_last_alerts = {}
_engines = []
def register_engine(engine):
"""注册引擎用于监控"""
_engines.append(engine)
def get_pool_status() -> Dict[str, Any]:
"""获取连接池状态"""
stats = {
'total': 0,
'checked_in': 0,
'checked_out': 0,
'overflow': 0,
'usage_percent': 0
}
for engine in _engines:
if hasattr(engine.pool, 'size'):
pool = engine.pool
stats['total'] += pool.size() if callable(pool.size) else pool.size
stats['checked_in'] += pool.checkedin() if hasattr(pool, 'checkedin') else 0
stats['checked_out'] += pool.checkedout() if hasattr(pool, 'checkedout') else 0
stats['overflow'] += pool.overflow() if hasattr(pool, 'overflow') else 0
if stats['total'] > 0:
stats['usage_percent'] = round((stats['checked_out'] / stats['total']) * 100, 2)
if stats['checked_out'] > _pool_stats['peak_connections']:
_pool_stats['peak_connections'] = stats['checked_out']
return stats
def check_pool_alerts():
"""检查连接池告警"""
current_time = time.time()
stats = get_pool_status()
if stats.get('usage_percent', 0) >= _alert_thresholds['pool_usage_percent']:
alert_key = 'pool_usage'
if alert_key not in _last_alerts or (current_time - _last_alerts.get(alert_key, 0)) > _alert_thresholds['alert_cooldown']:
db_logger.warning(
f"数据库连接池告警: 使用率 {stats['usage_percent']}% 超过阈值 "
f"(已使用: {stats['checked_out']}/{stats['total']})"
)
_last_alerts[alert_key] = current_time
def log_slow_query(sql: str, duration: float):
"""记录慢查询"""
_pool_stats['slow_queries'].append({
'sql': sql[:200] if len(sql) > 200 else sql,
'duration': duration,
'timestamp': time.time()
})
if len(_pool_stats['slow_queries']) > 1000:
_pool_stats['slow_queries'] = _pool_stats['slow_queries'][-1000:]
def log_connection_error(error: str):
"""记录连接错误"""
_pool_stats['connection_errors'].append({
'error': error,
'timestamp': time.time()
})
if len(_pool_stats['connection_errors']) > 100:
_pool_stats['connection_errors'] = _pool_stats['connection_errors'][-100:]
def get_monitoring_report() -> Dict[str, Any]:
"""获取监控报告"""
stats = get_pool_status()
current_time = time.time()
recent_slow_queries = [q for q in _pool_stats['slow_queries'] if (current_time - q['timestamp']) < 300]
recent_errors = [e for e in _pool_stats['connection_errors'] if (current_time - e['timestamp']) < 300]
return {
'timestamp': datetime.now().isoformat(),
'pool_status': stats,
'peak_connections': _pool_stats['peak_connections'],
'recent_5min': {
'slow_queries_count': len(recent_slow_queries),
'connection_errors_count': len(recent_errors)
},
'last_reset': _pool_stats['last_reset'].isoformat()
}
def monitoring_task():
"""定时监控任务"""
while True:
try:
check_pool_alerts()
time.sleep(30)
except Exception as e:
db_logger.error(f"数据库监控任务异常: {e}")
time.sleep(60)
def start_monitoring():
"""启动后台监控"""
# 延迟导入避免循环依赖
from .database import railway_engine, tunnel_engine
register_engine(railway_engine)
register_engine(tunnel_engine)
monitor_thread = threading.Thread(target=monitoring_task, daemon=True)
monitor_thread.start()
db_logger.info("数据库连接池监控已启动")
# SQL执行时间监控
_query_start_times = {}
@event.listens_for(Engine, "before_cursor_execute")
def receive_before_cursor_execute(conn, cursor, statement, params, context, executemany):
_query_start_times[id(cursor)] = time.time()
@event.listens_for(Engine, "after_cursor_execute")
def receive_after_cursor_execute(conn, cursor, statement, params, context, executemany):
start_time = _query_start_times.pop(id(cursor), None)
if start_time:
duration = time.time() - start_time
if duration >= _alert_thresholds['slow_query_time']:
log_slow_query(statement, duration)
db_logger.warning(f"慢查询: {duration:.2f}s - {statement[:100]}...")
@event.listens_for(Engine, "handle_error")
def receive_handle_error(exception_context):
error_msg = str(exception_context.original_exception)
log_connection_error(error_msg)
db_logger.error(f"数据库错误: {error_msg}")
def log_pool_status():
"""记录连接池状态到日志"""
stats = get_pool_status()
db_logger.info(
f"数据库连接池状态: 使用率 {stats['usage_percent']}% "
f"(已用: {stats['checked_out']}, 空闲: {stats['checked_in']}, 总计: {stats['total']})"
)

128
app/core/logging_config.py Normal file
View File

@@ -0,0 +1,128 @@
import logging
import logging.handlers
import os
from datetime import datetime
from pathlib import Path
def setup_logging():
"""配置日志系统"""
# 创建logs目录
log_dir = Path("logs")
log_dir.mkdir(exist_ok=True)
# 配置根日志记录器
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
# 清除现有的处理器
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
# 创建格式化器
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
# 1. 控制台处理器
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
# 2. 应用日志文件处理器(按日期滚动)
app_log_file = log_dir / "app.log"
app_handler = logging.handlers.TimedRotatingFileHandler(
app_log_file,
when='midnight',
interval=1,
backupCount=30,
encoding='utf-8'
)
app_handler.setLevel(logging.INFO)
app_handler.setFormatter(formatter)
root_logger.addHandler(app_handler)
# 3. 错误日志文件处理器
error_log_file = log_dir / "error.log"
error_handler = logging.handlers.TimedRotatingFileHandler(
error_log_file,
when='midnight',
interval=1,
backupCount=30,
encoding='utf-8'
)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(formatter)
root_logger.addHandler(error_handler)
# 4. 任务调度器专用日志处理器
scheduler_log_file = log_dir / "scheduler.log"
scheduler_handler = logging.handlers.TimedRotatingFileHandler(
scheduler_log_file,
when='midnight',
interval=1,
backupCount=30,
encoding='utf-8'
)
scheduler_handler.setLevel(logging.INFO)
scheduler_handler.setFormatter(formatter)
# 为调度器设置专用logger
scheduler_logger = logging.getLogger('apscheduler')
scheduler_logger.addHandler(scheduler_handler)
scheduler_logger.setLevel(logging.INFO)
# 为应用调度器模块设置专用logger
app_scheduler_logger = logging.getLogger('app.utils.scheduler')
app_scheduler_logger.addHandler(scheduler_handler)
app_scheduler_logger.setLevel(logging.INFO)
# 5. API访问日志处理器
access_log_file = log_dir / "access.log"
access_handler = logging.handlers.TimedRotatingFileHandler(
access_log_file,
when='midnight',
interval=1,
backupCount=30,
encoding='utf-8'
)
access_formatter = logging.Formatter(
'%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
access_handler.setFormatter(access_formatter)
# 为uvicorn访问日志设置处理器
uvicorn_access_logger = logging.getLogger("uvicorn.access")
uvicorn_access_logger.addHandler(access_handler)
uvicorn_access_logger.setLevel(logging.INFO)
# 6. 数据库日志处理器
db_log_file = log_dir / "database.log"
db_handler = logging.handlers.TimedRotatingFileHandler(
db_log_file,
when='midnight',
interval=1,
backupCount=30,
encoding='utf-8'
)
db_handler.setLevel(logging.INFO)
db_handler.setFormatter(formatter)
# 为SQLAlchemy设置日志
sqlalchemy_logger = logging.getLogger('sqlalchemy.engine')
sqlalchemy_logger.addHandler(db_handler)
sqlalchemy_logger.setLevel(logging.WARNING) # 只记录警告和错误
print(f"日志系统已配置,日志文件保存在: {log_dir.absolute()}")
print("日志文件:")
print(f" - 应用日志: {app_log_file}")
print(f" - 错误日志: {error_log_file}")
print(f" - 调度器日志: {scheduler_log_file}")
print(f" - 访问日志: {access_log_file}")
print(f" - 数据库日志: {db_log_file}")
def get_logger(name: str = None):
"""获取日志记录器"""
return logging.getLogger(name or __name__)