diff --git a/qwen3-tts-backend/api/audiobook.py b/qwen3-tts-backend/api/audiobook.py index 9ccd84c..7551fb6 100644 --- a/qwen3-tts-backend/api/audiobook.py +++ b/qwen3-tts-backend/api/audiobook.py @@ -117,7 +117,7 @@ async def create_project( title=data.title, source_type=data.source_type, source_text=data.source_text, - llm_model=current_user.llm_model, + llm_model=crud.get_system_setting(db, "llm_model"), ) return _project_to_response(project) @@ -150,7 +150,7 @@ async def upload_epub_project( title=title, source_type="epub", source_path=str(file_path), - llm_model=current_user.llm_model, + llm_model=crud.get_system_setting(db, "llm_model"), ) return _project_to_response(project) @@ -319,8 +319,16 @@ async def continue_script( raise HTTPException(status_code=400, detail=f"Project must be in 'ready' or 'done' state, current: {project.status}") from db.crud import get_system_setting - if not get_system_setting(db, "llm_api_key") or not get_system_setting(db, "llm_base_url") or not get_system_setting(db, "llm_model"): - raise HTTPException(status_code=400, detail="LLM config not set. Please configure LLM API key first.") + cfg = project.script_config or {} + if cfg.get("nsfw_mode"): + from db.crud import can_user_use_nsfw + if not can_user_use_nsfw(current_user): + raise HTTPException(status_code=403, detail="NSFW access not granted") + if not get_system_setting(db, "grok_api_key") or not get_system_setting(db, "grok_base_url"): + raise HTTPException(status_code=400, detail="Grok config not set. Please configure Grok API key first.") + else: + if not get_system_setting(db, "llm_api_key") or not get_system_setting(db, "llm_base_url") or not get_system_setting(db, "llm_model"): + raise HTTPException(status_code=400, detail="LLM config not set. Please configure LLM API key first.") from core.audiobook_service import continue_ai_script_chapters from core.database import SessionLocal @@ -450,7 +458,8 @@ async def analyze_project( if project.status in ("analyzing", "generating", "parsing"): raise HTTPException(status_code=400, detail=f"Project is currently {project.status}, please wait") - if not current_user.llm_api_key or not current_user.llm_base_url or not current_user.llm_model: + from db.crud import get_system_setting + if not get_system_setting(db, "llm_api_key") or not get_system_setting(db, "llm_base_url") or not get_system_setting(db, "llm_model"): raise HTTPException(status_code=400, detail="LLM config not set. Please configure LLM API key first.") from core.audiobook_service import analyze_project as _analyze @@ -585,7 +594,8 @@ async def parse_chapter( if chapter.status == "parsing": raise HTTPException(status_code=400, detail="Chapter is already being parsed") - if not current_user.llm_api_key or not current_user.llm_base_url or not current_user.llm_model: + from db.crud import get_system_setting + if not get_system_setting(db, "llm_api_key") or not get_system_setting(db, "llm_base_url") or not get_system_setting(db, "llm_model"): raise HTTPException(status_code=400, detail="LLM config not set") from core.audiobook_service import parse_one_chapter @@ -616,7 +626,8 @@ async def parse_all_chapters_endpoint( if project.status not in ("ready", "generating", "done", "error"): raise HTTPException(status_code=400, detail=f"Project must be in 'ready' state, current: {project.status}") - if not current_user.llm_api_key or not current_user.llm_base_url or not current_user.llm_model: + from db.crud import get_system_setting + if not get_system_setting(db, "llm_api_key") or not get_system_setting(db, "llm_base_url") or not get_system_setting(db, "llm_model"): raise HTTPException(status_code=400, detail="LLM config not set") from core.audiobook_service import parse_all_chapters @@ -648,7 +659,8 @@ async def process_all_endpoint( if project.status not in ("ready", "generating", "done", "error"): raise HTTPException(status_code=400, detail=f"Project must be in 'ready' state, current: {project.status}") - if not current_user.llm_api_key or not current_user.llm_base_url or not current_user.llm_model: + from db.crud import get_system_setting + if not get_system_setting(db, "llm_api_key") or not get_system_setting(db, "llm_base_url") or not get_system_setting(db, "llm_model"): raise HTTPException(status_code=400, detail="LLM config not set") from core.audiobook_service import process_all @@ -972,9 +984,8 @@ async def download_project( Path(settings.OUTPUT_DIR) / "audiobook" / str(project_id) / "full.wav" ) - if not Path(output_path).exists(): - from core.audiobook_service import merge_audio_files - merge_audio_files(audio_paths, output_path) + from core.audiobook_service import merge_audio_files + merge_audio_files(audio_paths, output_path) filename = f"chapter_{chapter}.wav" if chapter is not None else f"{project.title}.wav" return FileResponse(output_path, media_type="audio/wav", filename=filename) diff --git a/qwen3-tts-backend/core/audiobook_service.py b/qwen3-tts-backend/core/audiobook_service.py index a236ffe..2bfbe86 100644 --- a/qwen3-tts-backend/core/audiobook_service.py +++ b/qwen3-tts-backend/core/audiobook_service.py @@ -777,7 +777,7 @@ async def analyze_project(project_id: int, user: User, db: Session, turbo: bool previews_dir.mkdir(parents=True, exist_ok=True) mode_label = "极速并发" if turbo else "顺序" - ps.append_line(key, f"\n[LLM] 模型:{user.llm_model},共 {n} 个采样段({mode_label}模式),正在分析角色...\n") + ps.append_line(key, f"\n[LLM] 模型:{crud.get_system_setting(db, 'llm_model')},共 {n} 个采样段({mode_label}模式),正在分析角色...\n") ps.append_line(key, "") def on_token(token: str) -> None: @@ -1144,7 +1144,7 @@ async def generate_project(project_id: int, user: User, db: Session, chapter_ind continue indextts2 = IndexTTS2Backend() - audio_bytes = await indextts2.generate( + await indextts2.generate( text=seg.text, spk_audio_prompt=ref_audio, output_path=str(audio_path), @@ -1152,9 +1152,6 @@ async def generate_project(project_id: int, user: User, db: Session, chapter_ind emo_alpha=seg.emo_alpha if seg.emo_alpha is not None else 0.3, ) - with open(audio_path, "wb") as f: - f.write(audio_bytes) - crud.update_audiobook_segment_status(db, seg.id, "done", audio_path=str(audio_path)) logger.info(f"Segment {seg.id} generated: {audio_path}") @@ -1226,7 +1223,7 @@ async def generate_single_segment(segment_id: int, user: User, db: Session) -> N return indextts2 = IndexTTS2Backend() - audio_bytes = await indextts2.generate( + await indextts2.generate( text=seg.text, spk_audio_prompt=ref_audio, output_path=str(audio_path), @@ -1234,9 +1231,6 @@ async def generate_single_segment(segment_id: int, user: User, db: Session) -> N emo_alpha=seg.emo_alpha if seg.emo_alpha is not None else 0.3, ) - with open(audio_path, "wb") as f: - f.write(audio_bytes) - crud.update_audiobook_segment_status(db, segment_id, "done", audio_path=str(audio_path)) logger.info(f"Single segment {segment_id} generated: {audio_path}") @@ -1277,7 +1271,7 @@ async def parse_all_chapters(project_id: int, user: User, db: Session, statuses: semaphore = asyncio.Semaphore(max_concurrent) logger.info(f"parse_all_chapters: project={project_id}, {len(pending)} chapters, concurrency={max_concurrent}") - key = f"project_{project_id}" + key = str(project_id) ps.append_line(key, f"\n[状态] 开启章节并发解析,共 {len(pending)} 章待处理,最大并发: {max_concurrent}...\n") async def parse_with_limit(chapter):