合并代码

This commit is contained in:
lhx
2026-01-20 00:39:46 +00:00
parent 9af4766b31
commit a7b7a786a5
4 changed files with 420 additions and 354 deletions

View File

@@ -1,354 +1,357 @@
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from typing import List, Optional, Dict, Any, Set, Tuple,Union from typing import List, Optional, Dict, Any, Set, Tuple,Union
from ..models.level_data import LevelData from ..models.level_data import LevelData
from ..models.daily import DailyData from ..models.daily import DailyData
from .base import BaseService from .base import BaseService
from ..models.settlement_data import SettlementData from ..models.settlement_data import SettlementData
from sqlalchemy import func, select, desc,over from sqlalchemy import func, select, desc,over
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class DailyDataService(BaseService[DailyData]): class DailyDataService(BaseService[DailyData]):
def __init__(self): def __init__(self):
super().__init__(DailyData) super().__init__(DailyData)
def _dict_to_instance(self, data_dict: Dict) -> DailyData: def _dict_to_instance(self, data_dict: Dict) -> DailyData:
"""辅助方法:将单个字典转换为 DailyData 实例""" """辅助方法:将单个字典转换为 DailyData 实例"""
model_fields = [col.name for col in DailyData.__table__.columns] model_fields = [col.name for col in DailyData.__table__.columns]
filtered_data = {k: v for k, v in data_dict.items() if k in model_fields} filtered_data = {k: v for k, v in data_dict.items() if k in model_fields}
return DailyData(**filtered_data) return DailyData(**filtered_data)
def _ensure_instances(self, data: Union[List[Dict], List[DailyData]]) -> List[DailyData]: def _ensure_instances(self, data: Union[List[Dict], List[DailyData]]) -> List[DailyData]:
"""确保输入数据是 DailyData 实例列表""" """确保输入数据是 DailyData 实例列表"""
if not isinstance(data, list): if not isinstance(data, list):
raise TypeError(f"输入必须是列表,而非 {type(data)}") raise TypeError(f"输入必须是列表,而非 {type(data)}")
instances = [] instances = []
for item in data: for item in data:
if isinstance(item, DailyData): if isinstance(item, DailyData):
instances.append(item) instances.append(item)
elif isinstance(item, dict): elif isinstance(item, dict):
instances.append(self._dict_to_instance(item)) instances.append(self._dict_to_instance(item))
else: else:
raise TypeError(f"列表元素必须是 dict 或 DailyData 实例,而非 {type(item)}") raise TypeError(f"列表元素必须是 dict 或 DailyData 实例,而非 {type(item)}")
return instances return instances
def batch_create_by_account_nyid(self, db: Session, data: Union[List[Dict], List[DailyData]]) -> List[DailyData]: def batch_create_by_account_nyid(self, db: Session, data: Union[List[Dict], List[DailyData]]) -> List[DailyData]:
""" """
批量创建记录,支持两种输入格式: 批量创建记录,支持两种输入格式:
- List[DailyData]:模型实例列表 - List[DailyData]:模型实例列表
- List[dict]:字典列表(自动转换为实例) - List[dict]:字典列表(自动转换为实例)
通过 (account_id, NYID) 联合判断是否已存在,存在则忽略 通过 (account_id, NYID) 联合判断是否已存在,存在则忽略 --(暂时取消查重)
""" """
try: try:
data_list = self._ensure_instances(data) data_list = self._ensure_instances(data)
except TypeError as e: except TypeError as e:
logger.error(f"数据格式错误:{str(e)}") logger.error(f"数据格式错误:{str(e)}")
raise raise
target_pairs: List[Tuple[int, int]] = [ target_pairs: List[Tuple[int, int]] = [
(item.account_id, item.NYID) (item.account_id, item.NYID)
for item in data_list for item in data_list
if item.account_id is not None and item.NYID is not None if item.account_id is not None and item.NYID is not None
] ]
if not target_pairs: if not target_pairs:
logger.warning("批量创建失败:所有记录缺少 account_id 或 NYID") logger.warning("批量创建失败:所有记录缺少 account_id 或 NYID")
return [] return []
existing_pairs: Set[Tuple[int, int]] = {
(item.account_id, item.NYID) # 取消查重处理
for item in db.query(DailyData.account_id, DailyData.NYID) # existing_pairs: Set[Tuple[int, int]] = {
.filter(DailyData.account_id.in_([p[0] for p in target_pairs]), # (item.account_id, item.NYID)
DailyData.NYID.in_([p[1] for p in target_pairs])) # for item in db.query(DailyData.account_id, DailyData.NYID)
.all() # .filter(DailyData.account_id.in_([p[0] for p in target_pairs]),
} # DailyData.NYID.in_([p[1] for p in target_pairs]))
# .all()
to_create = [ # }
item for item in data_list
if (item.account_id, item.NYID) not in existing_pairs to_create = [
] item for item in data_list
# if (item.account_id, item.NYID) not in existing_pairs
ignored_count = len(data_list) - len(to_create) ]
if ignored_count > 0:
logger.info(f"批量创建时忽略{ignored_count}条已存在记录account_id和NYID已存在") # ignored_count = len(data_list) - len(to_create)
# if ignored_count > 0:
if not to_create: # logger.info(f"批量创建时忽略{ignored_count}条已存在记录account_id和NYID已存在")
return []
logger.info(f"批量创建 {to_create}")
# 修复点:使用 add_all 替代 bulk_save_objects确保对象被会话跟踪 if not to_create:
db.add_all(to_create) # 这里是关键修改 return []
db.commit()
# 修复点:使用 add_all 替代 bulk_save_objects确保对象被会话跟踪
# 现在可以安全地刷新实例了 db.add_all(to_create) # 这里是关键修改
for item in to_create: db.commit()
db.refresh(item)
# 现在可以安全地刷新实例了
return to_create for item in to_create:
db.refresh(item)
def get_nyid_by_point_id( return to_create
self,
db: Session,
point_ids: Optional[List[int]] = None, def get_nyid_by_point_id(
max_num: int = 1 self,
) -> List[List[dict]]: db: Session,
""" point_ids: Optional[List[int]] = None,
获取指定point_id的记录每个point_id的前max_num条记录放在同一个子列表中 max_num: int = 1
返回格式:[[point1_records...], [point2_records...]] ) -> List[List[dict]]:
""" """
# 处理参数默认值 获取指定point_id的记录每个point_id的前max_num条记录放在同一个子列表中
point_ids = point_ids or [] 返回格式:[[point1_records...], [point2_records...]]
if max_num <= 0: """
return [] # 处理参数默认值
point_ids = point_ids or []
# 窗口函数按point_id分组每组内按NYID降序编号 if max_num <= 0:
row_num = over( return []
func.row_number(),
partition_by=SettlementData.point_id, # 窗口函数按point_id分组每组内按NYID降序编号
order_by=desc(SettlementData.NYID) row_num = over(
).label("row_num") func.row_number(),
partition_by=SettlementData.point_id,
# 模型字段列表 order_by=desc(SettlementData.NYID)
model_columns = [getattr(SettlementData, col.name) for col in SettlementData.__table__.columns] ).label("row_num")
# 基础条件 # 模型字段列表
base_conditions = [ model_columns = [getattr(SettlementData, col.name) for col in SettlementData.__table__.columns]
SettlementData.useflag.isnot(None),
SettlementData.useflag != 0 # 基础条件
] base_conditions = [
if point_ids: SettlementData.useflag.isnot(None),
base_conditions.append(SettlementData.point_id.in_(point_ids)) SettlementData.useflag != 0
]
# 子查询 if point_ids:
subquery = ( base_conditions.append(SettlementData.point_id.in_(point_ids))
select(*model_columns, row_num)
.where(*base_conditions) # 子查询
.subquery() subquery = (
) select(*model_columns, row_num)
.where(*base_conditions)
# 主查询筛选每个point_id的前max_num条 .subquery()
query = ( )
select(subquery)
.where(subquery.c.row_num <= max_num) # 主查询筛选每个point_id的前max_num
.order_by(subquery.c.point_id, subquery.c.row_num) query = (
) select(subquery)
.where(subquery.c.row_num <= max_num)
# 执行查询 .order_by(subquery.c.point_id, subquery.c.row_num)
results = db.execute(query).all() )
grouped: Dict[int, List[dict]] = {} # 键point_id该point_id的所有记录子列表
field_names = [col.name for col in SettlementData.__table__.columns] # 执行查询
results = db.execute(query).all()
for row in results: grouped: Dict[int, List[dict]] = {} # 键point_id该point_id的所有记录子列表
item_dict = {field: getattr(row, field) for field in field_names} field_names = [col.name for col in SettlementData.__table__.columns]
try:
pid = int(item_dict["point_id"]) # 确保point_id为整数 for row in results:
except (KeyError, ValueError): item_dict = {field: getattr(row, field) for field in field_names}
continue # 跳过无效记录 try:
pid = int(item_dict["point_id"]) # 确保point_id为整数
# 同一point_id的记录放入同一个列表 except (KeyError, ValueError):
if pid not in grouped: continue # 跳过无效记录
grouped[pid] = [] # 初始化子列表
grouped[pid].append(item_dict) # 追加到子列表 # 同一point_id的记录放入同一个列表
if pid not in grouped:
# 按输入point_ids顺序整理结果关键每个point_id对应一个子列表 grouped[pid] = [] # 初始化子列表
if not point_ids: grouped[pid].append(item_dict) # 追加到子列表
point_ids = sorted(grouped.keys()) # 若无指定按point_id排序
# 按输入point_ids顺序整理结果关键每个point_id对应一个子列表
# 构建最终结果每个point_id的记录作为一个子列表 if not point_ids:
return [grouped.get(pid, []) for pid in point_ids] point_ids = sorted(grouped.keys()) # 若无指定,按point_id排序
# 获取所有的今日数据 # 构建最终结果每个point_id的记录作为一个子列表
def get_all_daily_data( return [grouped.get(pid, []) for pid in point_ids]
self,
db: Session, # 获取所有的今日数据
user_id: Optional[int] = None # 可选参数按user_id筛选 def get_all_daily_data(
) -> List[Dict[str, Any]]: self,
""" db: Session,
获取所有日常数据DailyData支持按user_id筛选 user_id: Optional[int] = None # 可选参数:按user_id筛选
:param db: 数据库会话 ) -> List[Dict[str, Any]]:
:param user_id: 可选用户ID若提供则只返回该用户的数据 """
:return: 日常数据字典列表,包含所有字段 获取所有日常数据DailyData支持按user_id筛选
""" :param db: 数据库会话
try: :param user_id: 可选用户ID若提供则只返回该用户的数据
# 基础查询 :return: 日常数据字典列表,包含所有字段
query = db.query(DailyData) """
try:
# 若提供了user_id则添加筛选条件 # 基础查询
if user_id is not None: query = db.query(DailyData)
query = query.filter(DailyData.user_id == user_id)
logger.info(f"查询user_id={user_id}的所有日常数据") # 若提供了user_id则添加筛选条件
else: if user_id is not None:
logger.info("查询所有日常数据") query = query.filter(DailyData.user_id == user_id)
logger.info(f"查询user_id={user_id}的所有日常数据")
# 执行查询并获取所有记录 else:
daily_records = query.all() logger.info("查询所有日常数据")
# 转换为字典列表(保留所有字段) # 执行查询并获取所有记录
result = [] daily_records = query.all()
for record in daily_records:
record_dict = { # 转换为字典列表(保留所有字段)
column.name: getattr(record, column.name) result = []
for column in DailyData.__table__.columns for record in daily_records:
} record_dict = {
result.append(record_dict) column.name: getattr(record, column.name)
for column in DailyData.__table__.columns
logger.info(f"查询完成,共获取{len(result)}条日常数据") }
return result result.append(record_dict)
except Exception as e: logger.info(f"查询完成,共获取{len(result)}条日常数据")
logger.error(f"获取日常数据失败:{str(e)}", exc_info=True) return result
raise e
def get_daily_data_by_account( except Exception as e:
self, logger.error(f"获取日常数据失败:{str(e)}", exc_info=True)
db: Session, raise e
account_id: str, # 账号ID必填因为是核心筛选条件 def get_daily_data_by_account(
user_id: Optional[int] = None # 可选参数额外按user_id筛选 self,
) -> List[Dict[str, Any]]: db: Session,
""" account_id: str, # 账号ID必填因为是核心筛选条件
根据account_id获取对应日常数据支持额外按user_id筛选 user_id: Optional[int] = None # 可选参数:额外按user_id筛选
:param db: 数据库会话 ) -> List[Dict[str, Any]]:
:param account_id: 账号ID必填用于精准筛选数据 """
:param user_id: 可选用户ID若提供则则进一步筛选该用户的数据 根据account_id获取对应日常数据支持额外按user_id筛选
:return: 符合条件的日常数据字典列表,包含所有字段 :param db: 数据库会话
""" :param account_id: 账号ID必填用于精准筛选数据
try: :param user_id: 可选用户ID若提供则则进一步筛选该用户的数据
# 基础查询先按account_id筛选必填条件 :return: 符合条件的日常数据字典列表,包含所有字段
query = db.query(DailyData).filter(DailyData.account_id == account_id) """
try:
# 若提供了user_id则添加额外筛选条件 # 基础查询先按account_id筛选必填条件
if user_id is not None: query = db.query(DailyData).filter(DailyData.account_id == account_id)
query = query.filter(DailyData.user_id == user_id)
logger.info(f"查询account_id={account_id}且user_id={user_id}的日常数据") # 若提供了user_id则添加额外筛选条件
else: if user_id is not None:
logger.info(f"查询account_id={account_id}的所有日常数据") query = query.filter(DailyData.user_id == user_id)
logger.info(f"查询account_id={account_id}且user_id={user_id}的日常数据")
# 执行查询并获取记录 else:
daily_records = query.all() logger.info(f"查询account_id={account_id}的所有日常数据")
# 转换为字典列表(保留所有字段) # 执行查询并获取记录
result = [] daily_records = query.all()
for record in daily_records:
record_dict = { # 转换为字典列表(保留所有字段)
column.name: getattr(record, column.name) result = []
for column in DailyData.__table__.columns for record in daily_records:
} record_dict = {
result.append(record_dict) column.name: getattr(record, column.name)
for column in DailyData.__table__.columns
logger.info(f"查询完成account_id={account_id}对应{len(result)}条日常数据") }
return result result.append(record_dict)
except Exception as e: logger.info(f"查询完成account_id={account_id}对应{len(result)}条日常数据")
logger.error(f"获取account_id={account_id}的日常数据失败:{str(e)}", exc_info=True) return result
raise e
except Exception as e:
def create_daily_from_linecode( logger.error(f"获取account_id={account_id}的日常数据失败:{str(e)}", exc_info=True)
self, raise e
db: Session,
linecode: str, def create_daily_from_linecode(
account_id: Optional[int] = None self,
) -> List[DailyData]: db: Session,
""" linecode: str,
通过水准线路编码生成 daily 数据 account_id: Optional[int] = None
) -> List[DailyData]:
业务逻辑: """
1. 在水准数据表level_data中查找符合 linecode 的记录,且 NYID 最大 通过水准线路编码生成 daily 数据
2. 通过 NYID 查询沉降数据表settlement_data
3. 通过沉降数据的 point_id 查询观测点表checkpoint得到 section_id 业务逻辑:
4. 通过 section_id 查询断面表section_data得到 account_id 1. 在水准数据表level_data中查找符合 linecode 的记录,且 NYID 最大
5. 整合这些数据,形成 daily 对象,插入到数据库表 2. 通过 NYID 查询沉降数据表settlement_data
3. 通过沉降数据的 point_id 查询观测点表checkpoint得到 section_id
Args: 4. 通过 section_id 查询断面表section_data得到 account_id
db: 数据库会话 5. 整合这些数据,形成 daily 对象,插入到数据库
linecode: 水准线路编码
account_id: 可选的账户ID筛选条件 Args:
db: 数据库会话
Returns: linecode: 水准线路编码
创建的 DailyData 记录列表 account_id: 可选的账户ID筛选条件
"""
try: Returns:
logger.info(f"开始处理 linecode={linecode} daily 数据生成请求") 创建DailyData 记录列表
"""
from ..models.level_data import LevelData try:
from ..models.settlement_data import SettlementData logger.info(f"开始处理 linecode={linecode} 的 daily 数据生成请求")
from ..models.checkpoint import Checkpoint
from ..models.section_data import SectionData from ..models.level_data import LevelData
from ..models.settlement_data import SettlementData
# 1. 在水准数据表中查找符合 linecode 的记录,且 NYID 最大 from ..models.checkpoint import Checkpoint
level_data_list = db.query(LevelData)\ from ..models.section_data import SectionData
.filter(LevelData.linecode == linecode)\
.all() # 1. 在水准数据表中查找符合 linecode 的记录,且 NYID 最大
level_data_list = db.query(LevelData)\
if not level_data_list: .filter(LevelData.linecode == linecode)\
raise ValueError(f"未找到 linecode={linecode} 对应的水准数据") .all()
# 找到 NYID 最大的记录(将 String 转换为数字进行比较) if not level_data_list:
max_nyid = max(level_data_list, key=lambda x: int(x.NYID) if x.NYID.isdigit() else 0) raise ValueError(f"未找到 linecode={linecode} 对应的水准数据")
target_nyid = max_nyid.NYID
logger.info(f"找到最大 NYID: {target_nyid}") # 找到 NYID 最大的记录(将 String 转换为数字进行比较)
max_nyid = max(level_data_list, key=lambda x: int(x.NYID) if x.NYID.isdigit() else 0)
# 2. 通过 NYID 查询沉降数据 target_nyid = max_nyid.NYID
settlement_data_list = db.query(SettlementData)\ logger.info(f"找到最大 NYID: {target_nyid}")
.filter(SettlementData.NYID == target_nyid)\
.all() # 2. 通过 NYID 查询沉降数据
settlement_data_list = db.query(SettlementData)\
if not settlement_data_list: .filter(SettlementData.NYID == target_nyid)\
raise ValueError(f"未找到 NYID={target_nyid} 对应的沉降数据") .all()
# 3. 遍历沉降数据,构建 daily 记录 if not settlement_data_list:
daily_records = [] raise ValueError(f"未找到 NYID={target_nyid} 对应的沉降数据")
for settlement in settlement_data_list:
# 3.1 通过沉降数据的 point_id 查询观测点表,得到 section_id # 3. 遍历沉降数据,构建 daily 记录
checkpoint = db.query(Checkpoint)\ daily_records = []
.filter(Checkpoint.point_id == settlement.point_id)\ for settlement in settlement_data_list:
.first() # 3.1 通过沉降数据的 point_id 查询观测点表,得到 section_id
if not checkpoint: checkpoint = db.query(Checkpoint)\
logger.warning(f"未找到 point_id={settlement.point_id} 对应的观测点,跳过该记录") .filter(Checkpoint.point_id == settlement.point_id)\
continue .first()
if not checkpoint:
# 3.2 通过 section_id 查询断面表,得到 account_id logger.warning(f"未找到 point_id={settlement.point_id} 对应的观测点,跳过该记录")
section = db.query(SectionData)\ continue
.filter(SectionData.section_id == checkpoint.section_id)\
.first() # 3.2 通过 section_id 查询断面表,得到 account_id
if not section: section = db.query(SectionData)\
logger.warning(f"未找到 section_id={checkpoint.section_id} 对应的断面,跳过该记录") .filter(SectionData.section_id == checkpoint.section_id)\
continue .first()
if not section:
# 3.3 从断面数据中获取 account_id作为 user_id logger.warning(f"未找到 section_id={checkpoint.section_id} 对应的断面,跳过该记录")
user_id = int(section.account_id) if section.account_id else None continue
if not user_id:
logger.warning(f"断面 section_id={checkpoint.section_id} 没有 account_id跳过该记录") # 3.3 从断面数据中获取 account_id作为 user_id
continue user_id = int(section.account_id) if section.account_id else None
if not user_id:
# 如果提供了 account_id 筛选条件,则只处理匹配的记录 logger.warning(f"断面 section_id={checkpoint.section_id} 没有 account_id跳过该记录")
if account_id is not None and user_id != account_id: continue
continue
# 如果提供了 account_id 筛选条件,则只处理匹配的记录
# 3.4 构建 daily 记录 if account_id is not None and user_id != account_id:
daily_record = DailyData( continue
user_id=user_id,
account_id=user_id, # 3.4 构建 daily 记录
point_id=settlement.point_id, daily_record = DailyData(
NYID=settlement.NYID, user_id=user_id,
linecode=linecode, account_id=user_id,
section_id=checkpoint.section_id, point_id=settlement.point_id,
remaining=0, NYID=settlement.NYID,
is_all=0 linecode=linecode,
) section_id=checkpoint.section_id,
daily_records.append(daily_record) remaining=0,
is_all=0
if not daily_records: )
logger.warning(f"没有生成任何 daily 记录") daily_records.append(daily_record)
return []
if not daily_records:
# 4. 批量插入 daily 记录 logger.warning(f"没有生成任何 daily 记录")
created_records = self.batch_create_by_account_nyid(db, daily_records) return []
logger.info(f"成功生成 {len(created_records)} daily 记录") # 4. 批量插入 daily 记录
return created_records created_records = self.batch_create_by_account_nyid(db, daily_records)
except ValueError: logger.info(f"成功生成 {len(created_records)} 条 daily 记录")
raise return created_records
except Exception as e:
logger.error(f"生成 daily 数据失败:{str(e)}", exc_info=True) except ValueError:
raise
except Exception as e:
logger.error(f"生成 daily 数据失败:{str(e)}", exc_info=True)
raise raise

View File

@@ -79,4 +79,4 @@ echo "当前运行的Docker容器:"
echo "$SUDO_PASSWORD" | sudo -S docker ps echo "$SUDO_PASSWORD" | sudo -S docker ps
echo "" echo ""
echo "=== 部署完成 ===" echo "=== 部署完成 ==="

36
fix_mysql.sh Normal file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
echo "=== 修复 MySQL 监听配置 ==="
# 1. 备份配置文件
sudo cp /etc/mysql/mysql.conf.d/mysqld.cnf /etc/mysql/mysql.conf.d/mysqld.cnf.backup.$(date +%Y%m%d_%H%M%S)
echo "✅ 已备份配置文件"
# 2. 修改 bind-address
sudo sed -i 's/^bind-address.*127.0.0.1/bind-address = 0.0.0.0/' /etc/mysql/mysql.conf.d/mysqld.cnf
echo "✅ 已修改 bind-address"
# 3. 显示修改后的配置
echo ""
echo "修改后的配置:"
sudo grep "bind-address" /etc/mysql/mysql.conf.d/mysqld.cnf
echo ""
# 4. 重启 MySQL
echo "正在重启 MySQL..."
sudo systemctl restart mysql
sleep 2
# 5. 验证
echo ""
echo "验证 MySQL 监听状态:"
sudo netstat -tlnp | grep 3306
echo ""
# 6. 测试连接
DOCKER0_IP=$(ip addr show docker0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
echo "测试从本机连接 MySQL (IP: $DOCKER0_IP):"
mysql -h $DOCKER0_IP -u railway -p'Railway01.' -e "SELECT 'Connection OK' as status, DATABASE() as current_db, VERSION() as version;" 2>&1
echo ""
echo "=== 修复完成 ==="

27
test_connection.sh Normal file
View File

@@ -0,0 +1,27 @@
#!/bin/bash
echo "=== Docker 网络诊断 ==="
echo ""
echo "1. Docker0 网桥 IP:"
ip addr show docker0 2>/dev/null | grep -oP '(?<=inet\s)\d+(\.\d+){3}' || echo "❌ docker0 不存在"
echo ""
echo "2. MySQL 监听状态:"
sudo netstat -tlnp | grep 3306 || echo "❌ MySQL 未运行或未监听 3306"
echo ""
echo "3. MySQL 用户权限:"
mysql -u root -p -e "SELECT user, host FROM mysql.user WHERE user='railway';" 2>/dev/null || echo "❌ 无法查询(需要 root 密码)"
echo ""
echo "4. 测试从容器连接 MySQL:"
DOCKER0_IP=$(ip addr show docker0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
if [ ! -z "$DOCKER0_IP" ]; then
docker run --rm mysql:8.0 mysql -h $DOCKER0_IP -u railway -p'Railway01.' -e "SELECT 'OK' as status;" 2>&1
else
echo "❌ 无法获取 docker0 IP"
fi
echo ""
echo "=== 诊断完成 ==="