Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gemini chat wrapper fix #233

Merged
merged 35 commits into from
May 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
7b754a9
added features to download models from the hugging face model hub/loa…
zyzhang1130 Apr 19, 2024
ea00db0
added customized hyperparameters specification
zyzhang1130 Apr 23, 2024
3e8c468
added docstring and made changes in accordance with the comments
zyzhang1130 Apr 25, 2024
10a9870
decoupled model loading and tokenizer loading. Now can load tokenizer…
zyzhang1130 Apr 25, 2024
5237356
removed unnecessary info in README
zyzhang1130 Apr 25, 2024
a6918eb
resolved all issues flagged by `pre-commit run`
zyzhang1130 Apr 25, 2024
b4f4f40
further removed info irrelevant to model loading and finetuning
zyzhang1130 Apr 25, 2024
e33b3de
Update huggingface_model.py
zyzhang1130 Apr 26, 2024
8023820
updated according to suggestions given
zyzhang1130 May 2, 2024
0a079b9
added updated README
zyzhang1130 May 2, 2024
a4d1f1b
updated README for two examples and tested on 3 model_type.
zyzhang1130 May 5, 2024
6b5410e
undo update to conversation_with_mentions README (created a dedicated…
zyzhang1130 May 6, 2024
6d10051
reverted changes made to conversation_with_RAG_agents\README.md
zyzhang1130 May 6, 2024
db27edd
resolved pre-commit related issues
zyzhang1130 May 6, 2024
b371226
resolved pre-commit related issues
zyzhang1130 May 6, 2024
7f3a012
resolved pre-commit related issues
zyzhang1130 May 6, 2024
15bf79a
resolve issues mentioned
zyzhang1130 May 8, 2024
9998e66
resolve issues raised
zyzhang1130 May 8, 2024
f6b46ed
resolve issues raised
zyzhang1130 May 8, 2024
6bf09f1
Update README.md
zyzhang1130 May 10, 2024
8d7e880
Update README.md
zyzhang1130 May 10, 2024
195ac69
Merge branch 'modelscope:main' into main
zyzhang1130 May 17, 2024
ddfc6f2
Update gemini_model.py
zyzhang1130 May 17, 2024
17918c4
decoupled `conversation_with_agent_with_finetuned_model` from GeminiC…
zyzhang1130 May 21, 2024
fe4d13e
Merge branch 'modelscope:main' into GeminiChatWrapper_fix
zyzhang1130 May 22, 2024
5dfbf00
revert unecessary changes
zyzhang1130 May 22, 2024
7c3f84d
revert unnecessary changes
zyzhang1130 May 22, 2024
3c58098
Merge branch 'GeminiChatWrapper_fix' of https://github.com/zyzhang113…
zyzhang1130 May 22, 2024
56933fe
revert unnecessary change
zyzhang1130 May 22, 2024
cc568fe
revert unnecessary changes
zyzhang1130 May 22, 2024
d544878
revert unnecessary changes
zyzhang1130 May 22, 2024
202dac3
reformat
DavdGao May 24, 2024
8d24af3
Add detailed information for error report; Reformat
DavdGao May 24, 2024
a37a66e
Fix bug in unittest and reformat
DavdGao May 24, 2024
25d3f58
fix bug
DavdGao May 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 55 additions & 4 deletions src/agentscope/models/gemini_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,12 @@

try:
import google.generativeai as genai

# This package will be installed when the google-generativeai is installed
import google.ai.generativelanguage as glm
except ImportError:
genai = None
glm = None


class GeminiWrapperBase(ModelWrapperBase, ABC):
Expand Down Expand Up @@ -42,6 +46,13 @@ def __init__(
"""
super().__init__(config_name=config_name)

# Test if the required package is installed
if genai is None:
raise ImportError(
"The google-generativeai package is not installed, "
"please install it first.",
)

# Load the api_key from argument or environment variable
api_key = api_key or os.environ.get("GOOGLE_API_KEY")

Expand Down Expand Up @@ -149,7 +160,50 @@ def __call__(
**kwargs,
)

# step3: record the api invocation if needed
# step3: Check for candidates and handle accordingly
if (
not response.candidates[0].content
or not response.candidates[0].content.parts
or not response.candidates[0].content.parts[0].text
):
# If we cannot get the response text from the model
finish_reason = response.candidates[0].finish_reason
reasons = glm.Candidate.FinishReason

if finish_reason == reasons.STOP:
error_info = (
"Natural stop point of the model or provided stop "
"sequence."
)
elif finish_reason == reasons.MAX_TOKENS:
error_info = (
"The maximum number of tokens as specified in the request "
"was reached."
)
elif finish_reason == reasons.SAFETY:
error_info = (
"The candidate content was flagged for safety reasons."
)
elif finish_reason == reasons.RECITATION:
error_info = (
"The candidate content was flagged for recitation reasons."
)
elif finish_reason in [
reasons.FINISH_REASON_UNSPECIFIED,
reasons.OTHER,
]:
error_info = "Unknown error."
else:
error_info = "No information provided from Gemini API."

raise ValueError(
"The Google Gemini API failed to generate text response with "
f"the following finish reason: {error_info}\n"
f"YOUR INPUT: {contents}\n"
f"RAW RESPONSE FROM GEMINI API: {response}\n",
)

# step4: record the api invocation if needed
self._save_model_invocation(
arguments={
"contents": contents,
Expand All @@ -160,9 +214,6 @@ def __call__(
)

# step5: update monitor accordingly
# TODO: Up to 2024/03/11, the response from Gemini doesn't contain
# the detailed information about cost. Here we simply count
# the tokens manually.
token_prompt = self.model.count_tokens(contents).total_tokens
token_response = self.model.count_tokens(response.text).total_tokens
self.update_monitor(
Expand Down
20 changes: 20 additions & 0 deletions tests/gemini_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,31 @@ def flush() -> None:
MonitorFactory.flush()


class DummyPart:
"""Dummy part for testing."""

text = "Hello! How can I help you?"


class DummyContent:
"""Dummy content for testing."""

parts = [DummyPart()]


class DummyCandidate:
"""Dummy candidate for testing."""

content = DummyContent()


class DummyResponse:
"""Dummy response for testing."""

text = "Hello! How can I help you?"

candidates = [DummyCandidate]

def __str__(self) -> str:
"""Return string representation."""
return str({"text": self.text})
Expand Down