From 213ca8ef789d508a671ddcd0189ebb79d6491c17 Mon Sep 17 00:00:00 2001 From: uripeled2 Date: Sat, 1 Jul 2023 00:23:18 +0300 Subject: [PATCH 1/4] update anthropic version --- llm_client/__init__.py | 2 +- llm_client/llm_api_client/anthropic_client.py | 8 ++- pyproject.toml | 2 +- .../anthropic_client/conftest.py | 22 +++++++- .../anthropic_client/test_anthropic_client.py | 50 +++++++++++++------ 5 files changed, 65 insertions(+), 19 deletions(-) diff --git a/llm_client/__init__.py b/llm_client/__init__.py index 3ef6e77..0a5c2d3 100644 --- a/llm_client/__init__.py +++ b/llm_client/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.6.1" +__version__ = "0.6.2" from llm_client.base_llm_client import BaseLLMClient diff --git a/llm_client/llm_api_client/anthropic_client.py b/llm_client/llm_api_client/anthropic_client.py index ee601cd..31a1070 100644 --- a/llm_client/llm_api_client/anthropic_client.py +++ b/llm_client/llm_api_client/anthropic_client.py @@ -1,6 +1,6 @@ from typing import Optional -from anthropic import count_tokens +from anthropic import AsyncAnthropic from llm_client.llm_api_client.base_llm_api_client import BaseLLMAPIClient, LLMAPIClientConfig from llm_client.consts import PROMPT_KEY @@ -10,6 +10,7 @@ COMPLETIONS_KEY = "completion" AUTH_HEADER = "x-api-key" ACCEPT_HEADER = "Accept" +VERSION_HEADER = "anthropic-version" ACCEPT_VALUE = "application/json" MAX_TOKENS_KEY = "max_tokens_to_sample" @@ -19,6 +20,9 @@ def __init__(self, config: LLMAPIClientConfig): super().__init__(config) if self._base_url is None: self._base_url = BASE_URL + self._anthropic = AsyncAnthropic() + if self._headers.get(VERSION_HEADER) is None: + self._headers[VERSION_HEADER] = self._anthropic.default_headers[VERSION_HEADER] self._headers[ACCEPT_HEADER] = ACCEPT_VALUE self._headers[AUTH_HEADER] = self._api_key @@ -40,4 +44,4 @@ async def text_completion(self, prompt: str, model: Optional[str] = None, max_to return [response_json[COMPLETIONS_KEY]] async def get_tokens_count(self, text: str, **kwargs) -> int: - return count_tokens(text) + return await self._anthropic.count_tokens(text) diff --git a/pyproject.toml b/pyproject.toml index ae39902..2eb4fd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ huggingface = [ "transformers >= 4.0.0" ] anthropic = [ - "anthropic >= 0.2.0" + "anthropic >= 0.3.0" ] google = [ "google-generativeai >= 0.1.0" diff --git a/tests/llm_api_client/anthropic_client/conftest.py b/tests/llm_api_client/anthropic_client/conftest.py index 1f04f49..f0b5463 100644 --- a/tests/llm_api_client/anthropic_client/conftest.py +++ b/tests/llm_api_client/anthropic_client/conftest.py @@ -1,7 +1,9 @@ +from unittest.mock import patch, AsyncMock + import pytest from llm_client import AnthropicClient -from llm_client.llm_api_client.anthropic_client import BASE_URL, COMPLETE_PATH +from llm_client.llm_api_client.anthropic_client import BASE_URL, COMPLETE_PATH, VERSION_HEADER from llm_client.llm_api_client.base_llm_api_client import LLMAPIClientConfig @@ -23,3 +25,21 @@ def llm_client(config): @pytest.fixture def complete_url(): return BASE_URL + COMPLETE_PATH + + +@pytest.fixture +def number_of_tokens(): + return 10 + + +@pytest.fixture +def anthropic_version(): + return "2023-06-01" + + +@pytest.fixture(autouse=True) +def mock_anthropic(number_of_tokens, anthropic_version): + with patch("llm_client.llm_api_client.anthropic_client.AsyncAnthropic") as mock_anthropic: + mock_anthropic.return_value.count_tokens = AsyncMock(return_value=number_of_tokens) + mock_anthropic.return_value.default_headers = {VERSION_HEADER: anthropic_version} + yield mock_anthropic diff --git a/tests/llm_api_client/anthropic_client/test_anthropic_client.py b/tests/llm_api_client/anthropic_client/test_anthropic_client.py index d9706e2..3ecb218 100644 --- a/tests/llm_api_client/anthropic_client/test_anthropic_client.py +++ b/tests/llm_api_client/anthropic_client/test_anthropic_client.py @@ -1,11 +1,9 @@ -from unittest.mock import patch - import pytest from llm_client import LLMAPIClientFactory, LLMAPIClientType, AnthropicClient from llm_client.consts import PROMPT_KEY, MODEL_KEY from llm_client.llm_api_client.anthropic_client import AUTH_HEADER, COMPLETIONS_KEY, MAX_TOKENS_KEY, ACCEPT_HEADER, \ - ACCEPT_VALUE + ACCEPT_VALUE, VERSION_HEADER @pytest.mark.asyncio @@ -18,18 +16,41 @@ async def test_get_llm_api_client__with_anthropic(config): @pytest.mark.asyncio -async def test_text_completion__sanity(mock_aioresponse, llm_client, complete_url): +async def test_text_completion__sanity(mock_aioresponse, llm_client, complete_url, anthropic_version): + mock_aioresponse.post( + complete_url, + payload={COMPLETIONS_KEY: "completion text"} + ) + + actual = await llm_client.text_completion(prompt="These are a few of my favorite", max_tokens=10) + + assert actual == ["completion text"] + mock_aioresponse.assert_called_once_with(complete_url, method='POST', + headers={AUTH_HEADER: llm_client._api_key, + ACCEPT_HEADER: ACCEPT_VALUE, + VERSION_HEADER: anthropic_version}, + json={PROMPT_KEY: 'These are a few of my favorite', + MAX_TOKENS_KEY: 10, "temperature": 1, + MODEL_KEY: llm_client._default_model}, + raise_for_status=True) + + +@pytest.mark.asyncio +async def test_text_completion__with_version_header(mock_aioresponse, config, complete_url): mock_aioresponse.post( complete_url, payload={COMPLETIONS_KEY: "completion text"} ) + config.headers[VERSION_HEADER] = "1.0.0" + llm_client = AnthropicClient(config) actual = await llm_client.text_completion(prompt="These are a few of my favorite", max_tokens=10) assert actual == ["completion text"] mock_aioresponse.assert_called_once_with(complete_url, method='POST', headers={AUTH_HEADER: llm_client._api_key, - ACCEPT_HEADER: ACCEPT_VALUE}, + ACCEPT_HEADER: ACCEPT_VALUE, + VERSION_HEADER: "1.0.0"}, json={PROMPT_KEY: 'These are a few of my favorite', MAX_TOKENS_KEY: 10, "temperature": 1, MODEL_KEY: llm_client._default_model}, @@ -43,7 +64,7 @@ async def test_text_completion__without_max_tokens_raise_value_error(mock_aiores @pytest.mark.asyncio -async def test_text_completion__override_model(mock_aioresponse, llm_client, complete_url): +async def test_text_completion__override_model(mock_aioresponse, llm_client, complete_url, anthropic_version): new_model_name = "claude-instant" mock_aioresponse.post( complete_url, @@ -56,7 +77,8 @@ async def test_text_completion__override_model(mock_aioresponse, llm_client, com assert actual == ["completion text"] mock_aioresponse.assert_called_once_with(complete_url, method='POST', headers={AUTH_HEADER: llm_client._api_key, - ACCEPT_HEADER: ACCEPT_VALUE}, + ACCEPT_HEADER: ACCEPT_VALUE, + VERSION_HEADER: anthropic_version}, json={PROMPT_KEY: 'These are a few of my favorite', MAX_TOKENS_KEY: 10, "temperature": 1, MODEL_KEY: new_model_name}, @@ -64,7 +86,7 @@ async def test_text_completion__override_model(mock_aioresponse, llm_client, com @pytest.mark.asyncio -async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, complete_url): +async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, complete_url, anthropic_version): mock_aioresponse.post( complete_url, payload={COMPLETIONS_KEY: "completion text"} @@ -75,7 +97,8 @@ async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, comple assert actual == ["completion text"] mock_aioresponse.assert_called_once_with(complete_url, method='POST', headers={AUTH_HEADER: llm_client._api_key, - ACCEPT_HEADER: ACCEPT_VALUE}, + ACCEPT_HEADER: ACCEPT_VALUE, + VERSION_HEADER: anthropic_version}, json={PROMPT_KEY: 'These are a few of my favorite', MAX_TOKENS_KEY: 10, MODEL_KEY: llm_client._default_model, @@ -84,9 +107,8 @@ async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, comple @pytest.mark.asyncio -async def test_get_tokens_count__sanity(llm_client): - with patch("llm_client.llm_api_client.anthropic_client.count_tokens") as mock_count_tokens: - actual = await llm_client.get_tokens_count(text="These are a few of my favorite things!") +async def test_get_tokens_count__sanity(llm_client, number_of_tokens, mock_anthropic): + actual = await llm_client.get_tokens_count(text="These are a few of my favorite things!") - assert actual == mock_count_tokens.return_value - mock_count_tokens.assert_called_once_with("These are a few of my favorite things!") + assert actual == 10 + mock_anthropic.return_value.count_tokens.assert_awaited_once_with("These are a few of my favorite things!") From 4370e01a663fb798ede8ed712df92f23e044f8bd Mon Sep 17 00:00:00 2001 From: uripeled2 Date: Sat, 1 Jul 2023 00:43:15 +0300 Subject: [PATCH 2/4] fix import in test_anthropic_client.py --- tests/llm_api_client/anthropic_client/test_anthropic_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/llm_api_client/anthropic_client/test_anthropic_client.py b/tests/llm_api_client/anthropic_client/test_anthropic_client.py index 3ecb218..28c6829 100644 --- a/tests/llm_api_client/anthropic_client/test_anthropic_client.py +++ b/tests/llm_api_client/anthropic_client/test_anthropic_client.py @@ -1,6 +1,7 @@ import pytest -from llm_client import LLMAPIClientFactory, LLMAPIClientType, AnthropicClient +from llm_client import LLMAPIClientFactory, LLMAPIClientType +from llm_client.llm_api_client.anthropic_client import AnthropicClient from llm_client.consts import PROMPT_KEY, MODEL_KEY from llm_client.llm_api_client.anthropic_client import AUTH_HEADER, COMPLETIONS_KEY, MAX_TOKENS_KEY, ACCEPT_HEADER, \ ACCEPT_VALUE, VERSION_HEADER From a607e2675e06d8b9ddfb6d3e95d7dbc2014dda54 Mon Sep 17 00:00:00 2001 From: uripeled2 Date: Sat, 1 Jul 2023 00:45:28 +0300 Subject: [PATCH 3/4] fix import in test_anthropic_client.py --- tests/llm_api_client/anthropic_client/conftest.py | 3 +-- tests/llm_api_client/anthropic_client/test_anthropic_client.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/llm_api_client/anthropic_client/conftest.py b/tests/llm_api_client/anthropic_client/conftest.py index f0b5463..97e49cb 100644 --- a/tests/llm_api_client/anthropic_client/conftest.py +++ b/tests/llm_api_client/anthropic_client/conftest.py @@ -2,8 +2,7 @@ import pytest -from llm_client import AnthropicClient -from llm_client.llm_api_client.anthropic_client import BASE_URL, COMPLETE_PATH, VERSION_HEADER +from llm_client.llm_api_client.anthropic_client import BASE_URL, COMPLETE_PATH, VERSION_HEADER, AnthropicClient from llm_client.llm_api_client.base_llm_api_client import LLMAPIClientConfig diff --git a/tests/llm_api_client/anthropic_client/test_anthropic_client.py b/tests/llm_api_client/anthropic_client/test_anthropic_client.py index 28c6829..cdadf56 100644 --- a/tests/llm_api_client/anthropic_client/test_anthropic_client.py +++ b/tests/llm_api_client/anthropic_client/test_anthropic_client.py @@ -1,10 +1,9 @@ import pytest from llm_client import LLMAPIClientFactory, LLMAPIClientType -from llm_client.llm_api_client.anthropic_client import AnthropicClient from llm_client.consts import PROMPT_KEY, MODEL_KEY from llm_client.llm_api_client.anthropic_client import AUTH_HEADER, COMPLETIONS_KEY, MAX_TOKENS_KEY, ACCEPT_HEADER, \ - ACCEPT_VALUE, VERSION_HEADER + ACCEPT_VALUE, VERSION_HEADER, AnthropicClient @pytest.mark.asyncio From 170cf5e8c7fc9e67c314ef8776a9a62fca64df02 Mon Sep 17 00:00:00 2001 From: Uri Peled Date: Sat, 1 Jul 2023 09:26:04 +0300 Subject: [PATCH 4/4] Update antrofic version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2eb4fd5..a739846 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ huggingface = [ "transformers >= 4.0.0" ] anthropic = [ - "anthropic >= 0.3.0" + "anthropic >= 0.3.2" ] google = [ "google-generativeai >= 0.1.0"