diff --git a/prompt/llm.py b/prompt/llm.py index 99b5433..0bfbb8a 100644 --- a/prompt/llm.py +++ b/prompt/llm.py @@ -13,8 +13,10 @@ SYSTEM_PROMPT = """你是一个网页生成助手。根据用户的需求描述 5. 在已有代码基础上修改时,返回完整的修改后代码,不要只返回片段 6. 由于任何外部链接都被屏蔽,使用纯 HTML、CSS 和 JS 实现功能,不要依赖外部库""" +DEFAULT_MODEL = "deepseek-chat" + # Models served by the ARK (Volcengine) endpoint -ARK_MODELS = {"doubao-seed-2-0-mini-260215"} +ARK_MODELS = {"doubao-seed-2-0-lite-260215"} def build_messages(history: list[dict]) -> list[dict]: @@ -35,21 +37,20 @@ def _get_client(model: str) -> tuple[AsyncOpenAI, str]: ), model, ) - resolved_model = model or settings.LLM_MODEL return ( AsyncOpenAI( api_key=settings.LLM_API_KEY, base_url=settings.LLM_BASE_URL, timeout=120.0, ), - resolved_model, + model or DEFAULT_MODEL, ) async def stream_chat(history: list[dict], model: str = ""): """Stream chat completion from the LLM. Yields content chunks.""" messages = build_messages(history) - client, resolved_model = _get_client(model or settings.LLM_MODEL) + client, resolved_model = _get_client(model) async with client as c: stream = await c.chat.completions.create( model=resolved_model,