diff --git a/bot/chatgpt/chat_gpt_bot.py b/bot/chatgpt/chat_gpt_bot.py index 241b3a7be..154a4229b 100644 --- a/bot/chatgpt/chat_gpt_bot.py +++ b/bot/chatgpt/chat_gpt_bot.py @@ -5,7 +5,7 @@ import openai import openai.error import requests - +from common import const from bot.bot import Bot from bot.chatgpt.chat_gpt_session import ChatGPTSession from bot.openai.open_ai_image import OpenAIImage @@ -15,7 +15,7 @@ from common.log import logger from common.token_bucket import TokenBucket from config import conf, load_config - +from bot.baidu.baidu_wenxin_session import BaiduWenxinSession # OpenAI对话模型API (可用) class ChatGPTBot(Bot, OpenAIImage): @@ -30,10 +30,12 @@ def __init__(self): openai.proxy = proxy if conf().get("rate_limit_chatgpt"): self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20)) - + conf_model = conf().get("model") or "gpt-3.5-turbo" self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo") + # o1相关模型不支持system prompt,暂时用文心模型的session + self.args = { - "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称 + "model": conf_model, # 对话模型的名称 "temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性 # "max_tokens":4096, # 回复最大的字符数 "top_p": conf().get("top_p", 1), @@ -42,6 +44,12 @@ def __init__(self): "request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间 "timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试 } + # o1相关模型固定了部分参数,暂时去掉 + if conf_model in [const.O1, const.O1_MINI]: + self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI) + remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"] + for key in remove_keys: + self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误 def reply(self, query, context=None): # acquire reply content diff --git a/common/const.py b/common/const.py index e2e6a00e1..db64f5f0c 100644 --- a/common/const.py +++ b/common/const.py @@ -38,6 +38,9 @@ GPT4_06_13 = "gpt-4-0613" GPT4_32k_06_13 = "gpt-4-32k-0613" +O1 = "o1-preview" +O1_MINI = "o1-mini" + WHISPER_1 = "whisper-1" TTS_1 = "tts-1" TTS_1_HD = "tts-1-hd" @@ -59,7 +62,7 @@ MODEL_LIST = [ GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k", - GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13, + O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13, WEN_XIN, WEN_XIN_4, XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax, GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,