Skip to content

Commit

Permalink
💄 style: update Ollama model 240421 (#2130)
Browse files Browse the repository at this point in the history
  • Loading branch information
MapleEve authored Apr 24, 2024
1 parent 31c16b9 commit e797af0
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 57 deletions.
76 changes: 38 additions & 38 deletions src/config/modelProviders/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,148 +2,148 @@ import { ModelProviderCard } from '@/types/llm';

const Ollama: ModelProviderCard = {
chatModels: [
{
displayName: 'Command R 35B',
enabled: true,
id: 'command-r',
tokens: 128_000,
},
{
displayName: 'Command R+ 104B (Q2_K)',
id: 'command-r-plus:104b-q2_K',
tokens: 128_000,
},
{
displayName: 'Gemma 7B',
enabled: true,
functionCall: false,
id: 'gemma',
tokens: 4000,
vision: false,
},
{
displayName: 'Gemma 2B',
enabled: true,
functionCall: false,
id: 'gemma:2b',
tokens: 4000,
vision: false,
},
{
displayName: 'Llama2 Chat 13B',
functionCall: false,
id: 'llama2:13b',
tokens: 4000,
vision: false,
},
{
displayName: 'Llama2 Chat 7B',
enabled: true,
functionCall: false,
id: 'llama2',
tokens: 4000,
vision: false,
},
{
displayName: 'Llama2 Chat 70B',
functionCall: false,
id: 'llama2:70b',
tokens: 4000,
vision: false,
},
{
displayName: 'Llama2 CN 13B',
functionCall: false,
id: 'llama2-chinese:13b',
tokens: 4000,
vision: false,
},
{
displayName: 'Llama2 CN 7B',
functionCall: false,
id: 'llama2-chinese',
tokens: 4000,
vision: false,
},
{
displayName: 'Llama3 8B',
enabled: true,
id: 'llama3',
tokens: 8000,
},
{
displayName: 'Llama3 70B',
id: 'llama3:70b',
tokens: 8000,
},
{
displayName: 'WizardLM 2 7B',
enabled: true,
id: 'wizardlm2',
tokens: 65_536,
},
{
displayName: 'WizardLM 2 8x22B',
id: 'wizardlm2:8x22b',
tokens: 65_536,
},
{
displayName: 'Code Llama 7B',
functionCall: false,
id: 'codellama',
tokens: 16_000,
vision: false,
},
{
displayName: 'Code Llama 34B',
functionCall: false,
id: 'codellama:34b',
tokens: 16_000,
vision: false,
},
{
displayName: 'Code Llama 70B',
functionCall: false,
id: 'codellama:70b',
tokens: 16_000,
vision: false,
},
{
displayName: 'Code Llama 7B (Python)',
functionCall: false,
id: 'codellama:python',
tokens: 16_000,
vision: false,
},
{
displayName: 'Mistral',
enabled: true,
functionCall: false,
id: 'mistral',
tokens: 4800,
vision: false,
},
{
displayName: 'Mixtral 8x7B',
enabled: true,
functionCall: false,
id: 'mixtral',
tokens: 32_000,
vision: false,
},
{
displayName: 'Mixtral 8x22B',
id: 'mixtral:8x22b',
tokens: 64_000,
},
{
displayName: 'Qwen Chat 4B',
functionCall: false,
id: 'qwen',
tokens: 32_768,
vision: false,
},
{
displayName: 'Qwen Chat 7B',
enabled: true,
functionCall: false,
id: 'qwen:7b',
tokens: 32_768,
vision: false,
},
{
displayName: 'Qwen Chat 14B',
functionCall: false,
id: 'qwen:14b',
tokens: 32_768,
vision: false,
},
{
displayName: 'Qwen Chat 72B',
functionCall: false,
id: 'qwen:72b',
tokens: 32_768,
vision: false,
},
{
displayName: 'LLaVA 7B',
functionCall: false,
id: 'llava',
tokens: 4000,
vision: true,
},
{
displayName: 'LLaVA 13B',
functionCall: false,
id: 'llava:13b',
tokens: 4000,
vision: true,
},
{
displayName: 'LLaVA 34B',
functionCall: false,
id: 'llava:34b',
tokens: 4000,
vision: true,
Expand Down
32 changes: 16 additions & 16 deletions src/libs/agent-runtime/zeroone/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ describe('LobeZeroOneAI', () => {
// Act
const result = await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});

Expand All @@ -69,7 +69,7 @@ describe('LobeZeroOneAI', () => {
const result = await instance.chat({
max_tokens: 1024,
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0.7,
top_p: 1,
});
Expand All @@ -79,7 +79,7 @@ describe('LobeZeroOneAI', () => {
{
max_tokens: 1024,
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0.7,
top_p: 1,
},
Expand All @@ -89,7 +89,7 @@ describe('LobeZeroOneAI', () => {
});

describe('Error', () => {
it('should return OpenRouterBizError with an openai error response when OpenAI.APIError is thrown', async () => {
it('should return ZeroOneBizError with an openai error response when OpenAI.APIError is thrown', async () => {
// Arrange
const apiError = new OpenAI.APIError(
400,
Expand All @@ -109,7 +109,7 @@ describe('LobeZeroOneAI', () => {
try {
await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});
} catch (e) {
Expand All @@ -125,15 +125,15 @@ describe('LobeZeroOneAI', () => {
}
});

it('should throw AgentRuntimeError with InvalidOpenRouterAPIKey if no apiKey is provided', async () => {
it('should throw AgentRuntimeError with InvalidZeroOneAPIKey if no apiKey is provided', async () => {
try {
new LobeZeroOneAI({});
} catch (e) {
expect(e).toEqual({ errorType: invalidErrorType });
}
});

it('should return OpenRouterBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
it('should return ZeroOneBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
// Arrange
const errorInfo = {
stack: 'abc',
Expand All @@ -149,7 +149,7 @@ describe('LobeZeroOneAI', () => {
try {
await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});
} catch (e) {
Expand All @@ -165,7 +165,7 @@ describe('LobeZeroOneAI', () => {
}
});

it('should return OpenRouterBizError with an cause response with desensitize Url', async () => {
it('should return ZeroOneBizError with an cause response with desensitize Url', async () => {
// Arrange
const errorInfo = {
stack: 'abc',
Expand All @@ -185,7 +185,7 @@ describe('LobeZeroOneAI', () => {
try {
await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});
} catch (e) {
Expand All @@ -201,7 +201,7 @@ describe('LobeZeroOneAI', () => {
}
});

it('should throw an InvalidOpenRouterAPIKey error type on 401 status code', async () => {
it('should throw an InvalidZeroOneAPIKey error type on 401 status code', async () => {
// Mock the API call to simulate a 401 error
const error = new Error('Unauthorized') as any;
error.status = 401;
Expand All @@ -210,7 +210,7 @@ describe('LobeZeroOneAI', () => {
try {
await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});
} catch (e) {
Expand All @@ -234,7 +234,7 @@ describe('LobeZeroOneAI', () => {
try {
await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});
} catch (e) {
Expand Down Expand Up @@ -265,7 +265,7 @@ describe('LobeZeroOneAI', () => {
id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
object: 'chat.completion.chunk',
created: 1709125675,
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
system_fingerprint: 'fp_86156a94a0',
choices: [
{ index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
Expand All @@ -287,7 +287,7 @@ describe('LobeZeroOneAI', () => {
const result = await instance.chat(
{
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
},
{ callback: mockCallback, headers: mockHeaders },
Expand Down Expand Up @@ -335,7 +335,7 @@ describe('LobeZeroOneAI', () => {
// 假设的测试函数调用,你可能需要根据实际情况调整
await instance.chat({
messages: [{ content: 'Hello', role: 'user' }],
model: 'mistralai/mistral-7b-instruct:free',
model: 'yi-34b-chat-0205',
temperature: 0,
});

Expand Down
1 change: 0 additions & 1 deletion src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
{
"displayName": "LLaVA 7B",
"enabled": true,
"functionCall": false,
"id": "llava",
"tokens": 4000,
"vision": true
Expand Down
1 change: 0 additions & 1 deletion src/store/global/slices/settings/actions/llm.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ describe('LLMSettingsSliceAction', () => {
// Assert that setModelProviderConfig was not called
expect(ollamaList?.chatModels.find((c) => c.id === 'llava')).toEqual({
displayName: 'LLaVA 7B',
functionCall: false,
enabled: true,
id: 'llava',
tokens: 4000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ describe('modelProviderSelectors', () => {
});

describe('modelEnabledFiles', () => {
it.skip('should return false if the model does not have file ability', () => {
it('should return false if the model does not have file ability', () => {
const enabledFiles = modelProviderSelectors.isModelEnabledFiles('gpt-4-vision-preview')(
useGlobalStore.getState(),
);
Expand Down

0 comments on commit e797af0

Please sign in to comment.