Skip to content

Commit

Permalink
e2e: init llamaindex e2e test (#697)
Browse files Browse the repository at this point in the history
  • Loading branch information
himself65 authored Apr 7, 2024
1 parent e85893a commit aac1ee3
Show file tree
Hide file tree
Showing 18 changed files with 407 additions and 6 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,26 @@ name: Run Tests

on: [push, pull_request]

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

jobs:
e2e:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version-file: ".nvmrc"
cache: "pnpm"
- name: Install dependencies
run: pnpm install
- name: Run E2E Tests
run: pnpm run e2e

test:
strategy:
fail-fast: false
Expand Down
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"format:write": "prettier --ignore-unknown --write .",
"lint": "turbo run lint",
"prepare": "husky",
"e2e": "turbo run e2e",
"test": "turbo run test",
"type-check": "tsc -b --diagnostics",
"release": "pnpm run check-minor-version && pnpm run build:release && changeset publish",
Expand Down
1 change: 1 addition & 0 deletions packages/core/e2e/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
logs
38 changes: 38 additions & 0 deletions packages/core/e2e/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# LlamaIndexTS Core E2E Tests

## Overview

We are using Node.js Test Runner to run E2E tests for LlamaIndexTS Core.

It supports the following features:

- Run tests in parallel
- Pure Node.js Environment
- Switch between mock and real LLM API
- Customizable logics

## Usage

- Run with mock register:

```shell
node --import tsx --import ./mock-register.js --test ./node/basic.e2e.ts
```

- Run without mock register:

```shell
node --import tsx --test ./node/basic.e2e.ts
```

- Run with specific test:

```shell
node --import tsx --import ./mock-register.js --test-name-pattern=agent --test ./node/basic.e2e.ts
```

- Run with debug logs:

```shell
CONSOLA_LEVEL=5 node --import tsx --import ./mock-register.js --test-name-pattern=agent --test ./node/basic.e2e.ts
```
68 changes: 68 additions & 0 deletions packages/core/e2e/fixtures/llm/open_ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import { faker } from "@faker-js/faker";
import type {
ChatResponse,
ChatResponseChunk,
CompletionResponse,
LLM,
LLMChatParamsNonStreaming,
LLMChatParamsStreaming,
LLMCompletionParamsNonStreaming,
LLMCompletionParamsStreaming,
} from "llamaindex/llm/types";

export function getOpenAISession() {
return {};
}

export function isFunctionCallingModel() {
return true;
}

export class OpenAI implements LLM {
get metadata() {
return {
model: "mock-model",
temperature: 0.1,
topP: 1,
contextWindow: 2048,
tokenizer: undefined,
isFunctionCallingModel: true,
};
}
chat(
params: LLMChatParamsStreaming<Record<string, unknown>>,
): Promise<AsyncIterable<ChatResponseChunk>>;
chat(
params: LLMChatParamsNonStreaming<Record<string, unknown>>,
): Promise<ChatResponse>;
chat(
params:
| LLMChatParamsStreaming<Record<string, unknown>>
| LLMChatParamsNonStreaming<Record<string, unknown>>,
): unknown {
if (params.stream) {
return {
[Symbol.asyncIterator]: async function* () {
yield {
delta: faker.word.words(),
} satisfies ChatResponseChunk;
},
};
}
return {
message: {
content: faker.lorem.paragraph(),
role: "assistant",
},
} satisfies ChatResponse;
}
complete(
params: LLMCompletionParamsStreaming,
): Promise<AsyncIterable<CompletionResponse>>;
complete(
params: LLMCompletionParamsNonStreaming,
): Promise<CompletionResponse>;
async complete(params: unknown): Promise<unknown> {
throw new Error("Method not implemented.");
}
}
36 changes: 36 additions & 0 deletions packages/core/e2e/mock-module.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/**
* This script will replace the resolved module with the corresponding fixture file.
*/
import { stat } from "node:fs/promises";
import { join, relative } from "node:path";
import { fileURLToPath, pathToFileURL } from "node:url";
const packageDistDir = fileURLToPath(new URL("../dist", import.meta.url));
const fixturesDir = fileURLToPath(new URL("./fixtures", import.meta.url));

export async function resolve(specifier, context, nextResolve) {
const result = await nextResolve(specifier, context);
if (result.format === "builtin" || result.url.startsWith("node:")) {
return result;
}
const targetUrl = fileURLToPath(result.url).replace(/\.js$/, ".ts");
const relativePath = relative(packageDistDir, targetUrl);
if (relativePath.startsWith(".") || relativePath.startsWith("/")) {
return result;
}
const url = pathToFileURL(join(fixturesDir, relativePath)).toString();
const exist = await stat(fileURLToPath(url))
.then((stat) => stat.isFile())
.catch((err) => {
if (err.code === "ENOENT") {
return false;
}
throw err;
});
if (!exist) {
return result;
}
return {
url,
format: "module",
};
}
3 changes: 3 additions & 0 deletions packages/core/e2e/mock-register.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
import { register } from "node:module";

register("./mock-module.js", import.meta.url);
139 changes: 139 additions & 0 deletions packages/core/e2e/node/basic.e2e.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
/* eslint-disable @typescript-eslint/no-floating-promises */
import { consola } from "consola";
import {
OpenAI,
OpenAIAgent,
Settings,
type LLM,
type LLMEndEvent,
type LLMStartEvent,
} from "llamaindex";
import { ok } from "node:assert";
import type { WriteStream } from "node:fs";
import { createWriteStream } from "node:fs";
import { mkdir } from "node:fs/promises";
import { join } from "node:path";
import { after, before, beforeEach, describe, test } from "node:test";
import { inspect } from "node:util";

let llm: LLM;
let fsStream: WriteStream;
before(async () => {
const logUrl = new URL(
join(
"..",
"logs",
`basic.e2e.${new Date().toISOString().replace(/:/g, "-").replace(/\./g, "-")}.log`,
),
import.meta.url,
);
await mkdir(new URL(".", logUrl), { recursive: true });
fsStream = createWriteStream(logUrl, {
encoding: "utf-8",
});
});

after(() => {
fsStream.end();
});

beforeEach((s) => {
fsStream.write("start: " + s.name + "\n");
});

const llmEventStartHandler = (event: LLMStartEvent) => {
const { payload } = event.detail;
fsStream.write(
"llmEventStart: " +
inspect(payload, {
depth: Infinity,
}) +
"\n",
);
};

const llmEventEndHandler = (event: LLMEndEvent) => {
const { payload } = event.detail;
fsStream.write(
"llmEventEnd: " +
inspect(payload, {
depth: Infinity,
}) +
"\n",
);
};

before(() => {
Settings.llm = new OpenAI({
model: "gpt-3.5-turbo",
});
llm = Settings.llm;
Settings.callbackManager.on("llm-start", llmEventStartHandler);
Settings.callbackManager.on("llm-end", llmEventEndHandler);
});

after(() => {
Settings.callbackManager.off("llm-start", llmEventStartHandler);
Settings.callbackManager.off("llm-end", llmEventEndHandler);
});

describe("llm", () => {
test("llm.chat", async () => {
const response = await llm.chat({
messages: [
{
content: "Hello",
role: "user",
},
],
});
consola.debug("response:", response);
ok(typeof response.message.content === "string");
});

test("stream llm.chat", async () => {
const iter = await llm.chat({
stream: true,
messages: [
{
content: "hello",
role: "user",
},
],
});
for await (const chunk of iter) {
consola.debug("chunk:", chunk);
ok(typeof chunk.delta === "string");
}
});
});

describe("agent", () => {
test("agent.chat", async () => {
const agent = new OpenAIAgent({
tools: [
{
call: async () => {
return "35 degrees and sunny in San Francisco";
},
metadata: {
name: "Weather",
description: "Get the weather",
parameters: {
type: "object",
properties: {
location: { type: "string" },
},
required: ["location"],
},
},
},
],
});
const result = await agent.chat({
message: "What is the weather in San Francisco?",
});
consola.debug("response:", result.response);
ok(typeof result.response === "string");
});
});
16 changes: 16 additions & 0 deletions packages/core/e2e/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"name": "@llamaindex/core-e2e",
"private": true,
"version": "0.0.2",
"type": "module",
"scripts": {
"e2e": "node --import tsx --import ./mock-register.js --test ./node/*.e2e.ts",
"e2e:nomock": "node --import tsx --test ./node/*.e2e.ts"
},
"devDependencies": {
"@faker-js/faker": "^8.4.1",
"consola": "^3.2.3",
"llamaindex": "workspace:*",
"tsx": "^4.7.2"
}
}
23 changes: 23 additions & 0 deletions packages/core/e2e/tsconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
{
"extends": "../../../tsconfig.json",
"compilerOptions": {
"outDir": "./lib",
"module": "node16",
"moduleResolution": "node16",
"target": "ESNext"
},
"include": [
"./**/*.ts",
"./mock-module.js",
"./mock-register.js",
"./fixtures"
],
"references": [
{
"path": "../../core/tsconfig.json"
},
{
"path": "../../env/tsconfig.json"
}
]
}
10 changes: 9 additions & 1 deletion packages/core/src/agent/openai/base.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import { Settings } from "../../Settings.js";
import type { ChatMessage } from "../../llm/index.js";
import { OpenAI } from "../../llm/index.js";
import type { ObjectRetriever } from "../../objects/base.js";
Expand Down Expand Up @@ -32,7 +33,14 @@ export class OpenAIAgent extends AgentRunner {
toolRetriever,
systemPrompt,
}: OpenAIAgentParams) {
llm = llm ?? new OpenAI({ model: "gpt-3.5-turbo-0613" });
if (!llm) {
if (Settings.llm instanceof OpenAI) {
llm = Settings.llm;
} else {
console.warn("No OpenAI model provided, creating a new one");
llm = new OpenAI({ model: "gpt-3.5-turbo-0613" });
}
}

if (systemPrompt) {
if (prefixMessages) {
Expand Down
Loading

0 comments on commit aac1ee3

Please sign in to comment.