fix: Adjust chunk size in parse_one_chapter to 1500 and add enable_thinking parameter to LLMService methods

This commit is contained in:
2026-03-11 19:05:03 +08:00
parent 75aa310799
commit 4f0d9f5ed6
2 changed files with 4 additions and 1 deletions

View File

@@ -29,6 +29,7 @@ class LLMService:
"temperature": 0.3,
"max_tokens": max_tokens,
"stream": True,
"enable_thinking": False,
}
full_text = ""
timeout = httpx.Timeout(connect=10.0, read=90.0, write=10.0, pool=5.0)
@@ -87,6 +88,8 @@ class LLMService:
{"role": "user", "content": user_message},
],
"temperature": 0.3,
"max_tokens": 8192,
"enable_thinking": False,
}
timeout = httpx.Timeout(connect=10.0, read=90.0, write=10.0, pool=5.0)