From c8cf27b544d89eed87c6bcdb0299bc1cbebc1527 Mon Sep 17 00:00:00 2001 From: 6vision Date: Fri, 13 Sep 2024 10:13:23 +0800 Subject: [PATCH 1/3] feat: support o1-preview and o1-mini model --- bot/chatgpt/chat_gpt_bot.py | 16 ++++++++++++---- common/const.py | 3 +++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/bot/chatgpt/chat_gpt_bot.py b/bot/chatgpt/chat_gpt_bot.py index 241b3a7be..8ff334521 100644 --- a/bot/chatgpt/chat_gpt_bot.py +++ b/bot/chatgpt/chat_gpt_bot.py @@ -5,7 +5,7 @@ import openai import openai.error import requests - +from common import const from bot.bot import Bot from bot.chatgpt.chat_gpt_session import ChatGPTSession from bot.openai.open_ai_image import OpenAIImage @@ -15,7 +15,7 @@ from common.log import logger from common.token_bucket import TokenBucket from config import conf, load_config - +from bot.baidu.baidu_wenxin_session import BaiduWenxinSession # OpenAI对话模型API (可用) class ChatGPTBot(Bot, OpenAIImage): @@ -30,10 +30,13 @@ def __init__(self): openai.proxy = proxy if conf().get("rate_limit_chatgpt"): self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20)) - + conf_model = conf().get("model") or "gpt-3.5-turbo" self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo") + # o1相关模型不支持system prompt,暂时用文心模型的session + if conf_model == const.O1 or const.O1_MINI: + self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI) self.args = { - "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称 + "model": conf_model, # 对话模型的名称 "temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性 # "max_tokens":4096, # 回复最大的字符数 "top_p": conf().get("top_p", 1), @@ -42,6 +45,11 @@ def __init__(self): "request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间 "timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试 } + # o1相关模型固定了部分参数,暂时去掉 + if conf_model == const.O1 or const.O1_MINI: + remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"] + for key in remove_keys: + self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误 def reply(self, query, context=None): # acquire reply content diff --git a/common/const.py b/common/const.py index e2e6a00e1..9ce7b6742 100644 --- a/common/const.py +++ b/common/const.py @@ -38,6 +38,9 @@ GPT4_06_13 = "gpt-4-0613" GPT4_32k_06_13 = "gpt-4-32k-0613" +O1 = "o1-preview" +O1_MINI = "o1-mini" + WHISPER_1 = "whisper-1" TTS_1 = "tts-1" TTS_1_HD = "tts-1-hd" From 5ef929dc42f8c41a5aff35792648dba0b3172460 Mon Sep 17 00:00:00 2001 From: 6vision Date: Fri, 13 Sep 2024 10:21:38 +0800 Subject: [PATCH 2/3] o1 model support #model --- common/const.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/const.py b/common/const.py index 9ce7b6742..db64f5f0c 100644 --- a/common/const.py +++ b/common/const.py @@ -62,7 +62,7 @@ MODEL_LIST = [ GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k", - GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13, + O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13, WEN_XIN, WEN_XIN_4, XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax, GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO, From c4f9acd5c587847fb355f88fcf7eb2212bbb0075 Mon Sep 17 00:00:00 2001 From: 6vision Date: Fri, 13 Sep 2024 10:48:51 +0800 Subject: [PATCH 3/3] update --- bot/chatgpt/chat_gpt_bot.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/chatgpt/chat_gpt_bot.py b/bot/chatgpt/chat_gpt_bot.py index 8ff334521..154a4229b 100644 --- a/bot/chatgpt/chat_gpt_bot.py +++ b/bot/chatgpt/chat_gpt_bot.py @@ -33,8 +33,7 @@ def __init__(self): conf_model = conf().get("model") or "gpt-3.5-turbo" self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo") # o1相关模型不支持system prompt,暂时用文心模型的session - if conf_model == const.O1 or const.O1_MINI: - self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI) + self.args = { "model": conf_model, # 对话模型的名称 "temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性 @@ -46,7 +45,8 @@ def __init__(self): "timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试 } # o1相关模型固定了部分参数,暂时去掉 - if conf_model == const.O1 or const.O1_MINI: + if conf_model in [const.O1, const.O1_MINI]: + self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI) remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"] for key in remove_keys: self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误