Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[OpenAI] Update samples #25941

Merged
merged 1 commit into from
May 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion sdk/openai/openai/samples-dev/chatCompletions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ export async function main() {
console.log("== Chat Completions Sample ==");

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "gpt-3.5-turbo";
const deploymentId = "gpt-35-turbo";
const result = await client.getChatCompletions(deploymentId, messages);

for (const choice of result.choices) {
Expand Down
2 changes: 1 addition & 1 deletion sdk/openai/openai/samples-dev/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export async function main() {

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const result = await client.getCompletions(deploymentId, prompt);
const result = await client.getCompletions(deploymentId, prompt, { maxTokens: 128 });

for (const choice of result.choices) {
console.log(choice.text);
Expand Down
44 changes: 44 additions & 0 deletions sdk/openai/openai/samples-dev/completionsRest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

/**
* Demonstrates how to get completions for the provided prompt.
*
* @summary get completions.
* @azsdk-weight 100
*/

import { AzureKeyCredential } from "@azure/core-auth";
import OpenAIClient, { isUnexpected } from "@azure/openai/rest";

// Load the .env file if it exists
import * as dotenv from "dotenv";
dotenv.config();

// You will need to set these environment variables or edit the following values
const endpoint = process.env["ENDPOINT"] || "<endpoint>";
const azureApiKey = process.env["AZURE_API_KEY"] || "<api key>";

const prompt = ["What is Azure OpenAI?"];

export async function main() {
console.log("== Get completions Sample ==");

const client = OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const result = await client.path("/deployments/{deploymentId}/completions", deploymentId).post({
body: { prompt, max_tokens: 128 },
});

if (isUnexpected(result)) {
throw result;
}

for (const choice of result.body.choices) {
console.log(choice.text);
}
}

main().catch((err) => {
console.error("The sample encountered an error:", err);
});
4 changes: 2 additions & 2 deletions sdk/openai/openai/samples-dev/listChatCompletions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ export async function main() {
console.log("== Streaming Chat Completions Sample ==");

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "gpt-3.5-turbo";
const events = await client.listChatCompletions(deploymentId, messages);
const deploymentId = "gpt-35-turbo";
const events = await client.listChatCompletions(deploymentId, messages, { maxTokens: 128 });

for await (const event of events) {
for (const choice of event.choices) {
Expand Down
2 changes: 1 addition & 1 deletion sdk/openai/openai/samples-dev/listCompletions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export async function main() {

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const events = await client.listCompletions(deploymentId, prompt);
const events = await client.listCompletions(deploymentId, prompt, { maxTokens: 128 });

for await (const event of events) {
for (const choice of event.choices) {
Expand Down
2 changes: 1 addition & 1 deletion sdk/openai/openai/samples-dev/openAi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export async function main() {

const client = new OpenAIClient(new OpenAIKeyCredential(openApiKey));
const model = "text-davinci-003";
const result = await client.getCompletions(model, prompt);
const result = await client.getCompletions(model, prompt, { maxTokens: 128 });

for (const choice of result.choices) {
console.log(choice.text);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async function main() {
console.log("== Chat Completions Sample ==");

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "gpt-3.5-turbo";
const deploymentId = "gpt-35-turbo";
const result = await client.getChatCompletions(deploymentId, messages);

for (const choice of result.choices) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ async function main() {

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const result = await client.getCompletions(deploymentId, prompt);
const result = await client.getCompletions(deploymentId, prompt, { maxTokens: 128 });

for (const choice of result.choices) {
console.log(choice.text);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ async function main() {
console.log("== Streaming Chat Completions Sample ==");

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "gpt-3.5-turbo";
const events = await client.listChatCompletions(deploymentId, messages);
const deploymentId = "gpt-35-turbo";
const events = await client.listChatCompletions(deploymentId, messages, { maxTokens: 128 });

for await (const event of events) {
for (const choice of event.choices) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ async function main() {

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const events = await client.listCompletions(deploymentId, prompt);
const events = await client.listCompletions(deploymentId, prompt, { maxTokens: 128 });

for await (const event of events) {
for (const choice of event.choices) {
Expand Down
4 changes: 2 additions & 2 deletions sdk/openai/openai/samples/v1-beta/javascript/openAi.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Licensed under the MIT License.

/**
* Demonstrates how to get completions for the provided prompt using OpenAI API.
* Demonstrates how to get completions for the provided prompt using OpenAI hosted service.
*
* @summary get completions using the OpenAI API.
*/
Expand All @@ -24,7 +24,7 @@ async function main() {

const client = new OpenAIClient(new OpenAIKeyCredential(openApiKey));
const model = "text-davinci-003";
const result = await client.getCompletions(model, prompt);
const result = await client.getCompletions(model, prompt, { maxTokens: 128 });

for (const choice of result.choices) {
console.log(choice.text);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export async function main() {
console.log("== Chat Completions Sample ==");

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "gpt-3.5-turbo";
const deploymentId = "gpt-35-turbo";
const result = await client.getChatCompletions(deploymentId, messages);

for (const choice of result.choices) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ export async function main() {

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const result = await client.getCompletions(deploymentId, prompt);
const result = await client.getCompletions(deploymentId, prompt, { maxTokens: 128 });

for (const choice of result.choices) {
console.log(choice.text);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ export async function main() {
console.log("== Streaming Chat Completions Sample ==");

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "gpt-3.5-turbo";
const events = await client.listChatCompletions(deploymentId, messages);
const deploymentId = "gpt-35-turbo";
const events = await client.listChatCompletions(deploymentId, messages, { maxTokens: 128 });

for await (const event of events) {
for (const choice of event.choices) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ export async function main() {

const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
const deploymentId = "text-davinci-003";
const events = await client.listCompletions(deploymentId, prompt);
const events = await client.listCompletions(deploymentId, prompt, { maxTokens: 128 });

for await (const event of events) {
for (const choice of event.choices) {
Expand Down
4 changes: 2 additions & 2 deletions sdk/openai/openai/samples/v1-beta/typescript/src/openAi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Licensed under the MIT License.

/**
* Demonstrates how to get completions for the provided prompt using OpenAI API.
* Demonstrates how to get completions for the provided prompt using OpenAI hosted service.
*
* @summary get completions using the OpenAI API.
*/
Expand All @@ -24,7 +24,7 @@ export async function main() {

const client = new OpenAIClient(new OpenAIKeyCredential(openApiKey));
const model = "text-davinci-003";
const result = await client.getCompletions(model, prompt);
const result = await client.getCompletions(model, prompt, { maxTokens: 128 });

for (const choice of result.choices) {
console.log(choice.text);
Expand Down