Skip to content

Llm

LLMSDK

Bases: PerplexityConfig, OpenAIConfig

Methods:

Name Description
get_search_result
get_oai_reply
get_oai_reply_stream

model

model: ChatModel = Field(
    default="gpt-4.1",
    title="LLM Model Selection",
    description="This model should be OpenAI Model.",
    frozen=False,
    deprecated=False,
)

system_prompt

system_prompt: str = Field(
    default="\n        角色定位:我被設定為一個知識豐富、語氣專業但親切的助手,目的是幫你解決問題、提供準確資訊,或一起創作內容。\n        行為準則:我會避免給出虛假、自相矛盾或無依據的答案,並且如果我不知道某件事,我會直接說明或幫你找答案。\n        互動風格:我應該簡潔、直接,有需要時會主動提出追問幫你釐清目標,特別是技術或寫作相關的任務。\n        ",
    title="System Prompt",
    description="This is the system prompt for the LLM.",
    frozen=False,
    deprecated=False,
)

client

client: AsyncOpenAI | AsyncAzureOpenAI

api_type

api_type: str = Field(
    default="openai",
    description="The api type from openai for calling models.",
    examples=["openai", "azure"],
    validation_alias=AliasChoices("OPENAI_API_TYPE"),
    frozen=False,
    deprecated=False,
)

base_url

base_url: str = Field(
    ...,
    description="The base url from openai for calling models.",
    examples=["https://api.openai.com/v1", "https://xxxx.openai.azure.com"],
    validation_alias=AliasChoices("OPENAI_BASE_URL", "AZURE_OPENAI_ENDPOINT"),
    frozen=False,
    deprecated=False,
)

api_key

api_key: str = Field(
    ...,
    description="The api key from openai for calling models.",
    examples=["sk-proj-...", "141698ac..."],
    validation_alias=AliasChoices("OPENAI_API_KEY", "AZURE_OPENAI_API_KEY"),
    frozen=False,
    deprecated=False,
)

api_version

api_version: str = Field(
    default="2025-04-01-preview",
    description="The api version from openai for calling models.",
    examples=["2025-04-01-preview"],
    validation_alias=AliasChoices("OPENAI_API_VERSION"),
    frozen=False,
    deprecated=False,
)

pplx_api_key

pplx_api_key: str = Field(
    ...,
    description="The api key from perplexity for calling models.",
    examples=["pplx-..."],
    validation_alias=AliasChoices("PERPLEXITY_API_KEY"),
    frozen=False,
    deprecated=False,
)

get_search_result

get_search_result(prompt: str) -> ChatCompletion
Source code in src/sdk/llm.py
async def get_search_result(self, prompt: str) -> ChatCompletion:
    client = AsyncOpenAI(api_key=self.pplx_api_key, base_url="https://api.perplexity.ai")
    response = await client.chat.completions.create(
        model="llama-3.1-sonar-large-128k-online",
        messages=[
            {
                "role": "system",
                "content": "You are an artificial intelligence assistant and you need to engage in a helpful, detailed, polite conversation with a user.",
            },
            {"role": "user", "content": prompt},
        ],
    )
    return response

get_oai_reply

get_oai_reply(prompt: str, image_urls: Optional[list[str]] = None) -> ChatCompletion
Source code in src/sdk/llm.py
async def get_oai_reply(
    self, prompt: str, image_urls: Optional[list[str]] = None
) -> ChatCompletion:
    content = await self._prepare_content(prompt, image_urls)
    completion = self.client.chat.completions.create(
        model=self.model,
        messages=[
            {"role": "system", "content": self.system_prompt},
            {"role": "user", "content": content},
        ],
    )
    return await completion

get_oai_reply_stream

get_oai_reply_stream(
    prompt: str, image_urls: Optional[list[str]] = None
) -> AsyncGenerator[ChatCompletionChunk, None]
Source code in src/sdk/llm.py
async def get_oai_reply_stream(
    self, prompt: str, image_urls: Optional[list[str]] = None
) -> AsyncGenerator[ChatCompletionChunk, None]:
    content = await self._prepare_content(prompt, image_urls)
    completion: AsyncStream[ChatCompletionChunk] = await self.client.chat.completions.create(
        model=self.model,
        messages=[
            {"role": "system", "content": self.system_prompt},
            {"role": "user", "content": content},
        ],
        stream=True,
    )
    async for chunk in completion:
        if len(chunk.choices) > 0:
            yield chunk