feat: enhance character data handling in chapter parsing and LLM service

This commit is contained in:
2026-03-13 15:56:56 +08:00
parent 83841f503c
commit d1503b08cb
2 changed files with 20 additions and 5 deletions

View File

@@ -965,7 +965,10 @@ async def parse_one_chapter(project_id: int, chapter_id: int, user: User, db) ->
raise ValueError("No characters found. Please analyze the project first.")
char_map: dict[str, AudiobookCharacter] = {c.name: c for c in characters}
character_names = list(char_map.keys())
characters_data = [
{"name": c.name, "gender": c.gender or "未知", "description": c.description or ""}
for c in characters
]
label = chapter.title or f"{chapter.chapter_index + 1}"
ps.append_line(key, f"[{label}] 开始解析 ({len(chapter.source_text)} 字)")
@@ -998,7 +1001,7 @@ async def parse_one_chapter(project_id: int, chapter_id: int, user: User, db) ->
ps.append_token(key, token)
try:
segments_data = await llm.parse_chapter_segments(chunk, character_names, on_token=on_token, usage_callback=_log_parse_usage)
segments_data = await llm.parse_chapter_segments(chunk, characters_data, on_token=on_token, usage_callback=_log_parse_usage)
except Exception as e:
logger.warning(f"Chapter {chapter_id} chunk {i} failed: {e}")
ps.append_line(key, f"\n[回退] {e}")