diff --git a/llm_client/llm_api_client/google_client.py b/llm_client/llm_api_client/google_client.py index 0e70521..6a11338 100644 --- a/llm_client/llm_api_client/google_client.py +++ b/llm_client/llm_api_client/google_client.py @@ -37,8 +37,8 @@ async def text_completion(self, prompt: str, model: Optional[str] = None, max_to model = model or self._default_model kwargs[PROMPT_KEY] = {TEXT_KEY: prompt} kwargs[MAX_TOKENS_KEY] = kwargs.pop(MAX_TOKENS_KEY, max_tokens) - if top_p or ("topP" in kwargs): - kwargs["topP"] = kwargs.pop("topP", top_p) + if top_p: + kwargs["topP"] = top_p kwargs["temperature"] = kwargs.pop("temperature", temperature) response = await self._session.post(self._base_url + model + ":" + COMPLETE_PATH, params=self._params,