feat(audiobook): implement log streaming for project status updates and enhance progress tracking
This commit is contained in:
@@ -13,6 +13,65 @@ class LLMService:
|
||||
self.api_key = api_key
|
||||
self.model = model
|
||||
|
||||
async def stream_chat(self, system_prompt: str, user_message: str, on_token=None) -> str:
|
||||
url = f"{self.base_url}/chat/completions"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"messages": [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_message},
|
||||
],
|
||||
"temperature": 0.3,
|
||||
"stream": True,
|
||||
}
|
||||
full_text = ""
|
||||
timeout = httpx.Timeout(connect=10.0, read=90.0, write=10.0, pool=5.0)
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
async with client.stream("POST", url, json=payload, headers=headers) as resp:
|
||||
if resp.status_code != 200:
|
||||
body = await resp.aread()
|
||||
logger.error(f"LLM streaming error {resp.status_code}: {body}")
|
||||
resp.raise_for_status()
|
||||
async for line in resp.aiter_lines():
|
||||
if not line.startswith("data: "):
|
||||
continue
|
||||
data = line[6:]
|
||||
if data.strip() == "[DONE]":
|
||||
break
|
||||
try:
|
||||
chunk = json.loads(data)
|
||||
delta = chunk["choices"][0]["delta"].get("content", "")
|
||||
if delta:
|
||||
full_text += delta
|
||||
if on_token:
|
||||
on_token(delta)
|
||||
except (json.JSONDecodeError, KeyError, IndexError):
|
||||
continue
|
||||
return full_text
|
||||
|
||||
async def stream_chat_json(self, system_prompt: str, user_message: str, on_token=None):
|
||||
raw = await self.stream_chat(system_prompt, user_message, on_token)
|
||||
raw = raw.strip()
|
||||
if not raw:
|
||||
raise ValueError("LLM returned empty response")
|
||||
if raw.startswith("```"):
|
||||
lines = raw.split("\n")
|
||||
inner = lines[1:]
|
||||
if inner and inner[-1].strip().startswith("```"):
|
||||
inner = inner[:-1]
|
||||
raw = "\n".join(inner).strip()
|
||||
if not raw:
|
||||
raise ValueError("LLM returned empty JSON after stripping markdown")
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"JSON parse failed. Raw (first 500): {raw[:500]}")
|
||||
raise
|
||||
|
||||
async def chat(self, system_prompt: str, user_message: str) -> str:
|
||||
url = f"{self.base_url}/chat/completions"
|
||||
headers = {
|
||||
@@ -56,7 +115,7 @@ class LLMService:
|
||||
logger.error(f"JSON parse failed. Raw response (first 500 chars): {raw[:500]}")
|
||||
raise
|
||||
|
||||
async def extract_characters(self, text: str) -> list[Dict]:
|
||||
async def extract_characters(self, text: str, on_token=None) -> list[Dict]:
|
||||
system_prompt = (
|
||||
"你是一个专业的小说分析助手兼声音导演。请分析给定的小说文本,提取所有出现的角色(包括旁白narrator)。\n"
|
||||
"对每个角色,instruct字段必须是详细的声音导演说明,需覆盖以下六个维度,每个维度单独一句,用换行分隔:\n"
|
||||
@@ -70,10 +129,10 @@ class LLMService:
|
||||
'{"characters": [{"name": "narrator", "description": "第三人称叙述者", "instruct": "音色信息:...\\n身份背景:...\\n年龄设定:...\\n外貌特征:...\\n性格特质:...\\n叙事风格:..."}, ...]}'
|
||||
)
|
||||
user_message = f"请分析以下小说文本并提取角色:\n\n{text[:30000]}"
|
||||
result = await self.chat_json(system_prompt, user_message)
|
||||
result = await self.stream_chat_json(system_prompt, user_message, on_token)
|
||||
return result.get("characters", [])
|
||||
|
||||
async def parse_chapter_segments(self, chapter_text: str, character_names: list[str]) -> list[Dict]:
|
||||
async def parse_chapter_segments(self, chapter_text: str, character_names: list[str], on_token=None) -> list[Dict]:
|
||||
names_str = "、".join(character_names)
|
||||
system_prompt = (
|
||||
"你是一个专业的有声书制作助手。请将给定的章节文本解析为对话片段列表。"
|
||||
@@ -83,7 +142,7 @@ class LLMService:
|
||||
'[{"character": "narrator", "text": "叙述文字"}, {"character": "角色名", "text": "对话内容"}, ...]'
|
||||
)
|
||||
user_message = f"请解析以下章节文本:\n\n{chapter_text}"
|
||||
result = await self.chat_json(system_prompt, user_message)
|
||||
result = await self.stream_chat_json(system_prompt, user_message, on_token)
|
||||
if isinstance(result, list):
|
||||
return result
|
||||
return []
|
||||
|
||||
Reference in New Issue
Block a user