}}
}
@@ -79,7 +79,8 @@ to a shiny session, escaping shiny's reactivity.
\subsection{Arguments}{
\if{html}{\out{
}}
\describe{
-\item{\code{parsed_event}}{An already parsed server-sent event to append to the events field.}
+\item{\code{parsed_event}}{An already parsed server-sent event to append to the
+events field.}
}
\if{html}{\out{
}}
}
diff --git a/man/chat.Rd b/man/chat.Rd
index 9121f404..24c86afc 100644
--- a/man/chat.Rd
+++ b/man/chat.Rd
@@ -15,6 +15,7 @@ chat(
task = getOption("gptstudio.task", "coding"),
custom_prompt = NULL,
process_response = FALSE,
+ session = NULL,
...
)
}
@@ -62,6 +63,8 @@ response. If \code{TRUE}, the response will be passed to
\code{gptstudio_response_process()} for further processing. Defaults to \code{FALSE}.
Refer to \code{gptstudio_response_process()} for more details.}
+\item{session}{An optional parameter for a shiny session object.}
+
\item{...}{Reserved for future use.}
}
\value{
diff --git a/man/create_ide_matching_colors.Rd b/man/create_ide_matching_colors.Rd
index 33bd38e4..233ebfa9 100644
--- a/man/create_ide_matching_colors.Rd
+++ b/man/create_ide_matching_colors.Rd
@@ -4,7 +4,10 @@
\alias{create_ide_matching_colors}
\title{Chat message colors in RStudio}
\usage{
-create_ide_matching_colors(role, ide_colors = get_ide_theme_info())
+create_ide_matching_colors(
+ role = c("user", "assistant"),
+ ide_colors = get_ide_theme_info()
+)
}
\arguments{
\item{role}{The role of the message author}
diff --git a/man/create_tmp_job_script.Rd b/man/create_tmp_job_script.Rd
deleted file mode 100644
index b681cfdf..00000000
--- a/man/create_tmp_job_script.Rd
+++ /dev/null
@@ -1,39 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/addin_chatgpt.R
-\name{create_tmp_job_script}
-\alias{create_tmp_job_script}
-\title{Create a temporary job script}
-\usage{
-create_tmp_job_script(appDir, port, host)
-}
-\arguments{
-\item{appDir}{The application to run. Should be one of the following:
-\itemize{
-\item A directory containing \code{server.R}, plus, either \code{ui.R} or
-a \code{www} directory that contains the file \code{index.html}.
-\item A directory containing \code{app.R}.
-\item An \code{.R} file containing a Shiny application, ending with an
-expression that produces a Shiny app object.
-\item A list with \code{ui} and \code{server} components.
-\item A Shiny app object created by \code{\link[shiny:shinyApp]{shinyApp()}}.
-}}
-
-\item{port}{The TCP port that the application should listen on. If the
-\code{port} is not specified, and the \code{shiny.port} option is set (with
-\code{options(shiny.port = XX)}), then that port will be used. Otherwise,
-use a random port between 3000:8000, excluding ports that are blocked
-by Google Chrome for being considered unsafe: 3659, 4045, 5060,
-5061, 6000, 6566, 6665:6669 and 6697. Up to twenty random
-ports will be tried.}
-
-\item{host}{The IPv4 address that the application should listen on. Defaults
-to the \code{shiny.host} option, if set, or \code{"127.0.0.1"} if not. See
-Details.}
-}
-\value{
-A string containing the path of a temporary job script
-}
-\description{
-This function creates a temporary R script file that runs the Shiny
-application from the specified directory with the specified port and host.
-}
diff --git a/man/encode_image.Rd b/man/encode_image.Rd
new file mode 100644
index 00000000..bf55f48d
--- /dev/null
+++ b/man/encode_image.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/service-openai_api_calls.R
+\name{encode_image}
+\alias{encode_image}
+\title{Encode an image file to base64}
+\usage{
+encode_image(image_path)
+}
+\arguments{
+\item{image_path}{String containing the path to the image file}
+}
+\value{
+A base64 encoded string of the image
+}
+\description{
+Encode an image file to base64
+}
diff --git a/man/get_ide_theme_info.Rd b/man/get_ide_theme_info.Rd
index 74b3d251..1e07fa8b 100644
--- a/man/get_ide_theme_info.Rd
+++ b/man/get_ide_theme_info.Rd
@@ -2,16 +2,24 @@
% Please edit documentation in R/mod_app.R
\name{get_ide_theme_info}
\alias{get_ide_theme_info}
-\title{Get IDE theme information.}
+\title{Get IDE Theme Information}
\usage{
get_ide_theme_info()
}
\value{
-A list with three components:
-\item{is_dark}{A boolean indicating whether the current IDE theme is dark.}
-\item{bg}{The current IDE theme's background color.}
-\item{fg}{The current IDE theme's foreground color.}
+A list with the following components:
+\item{is_dark}{A logical indicating whether the current IDE theme is dark.}
+\item{bg}{A character string representing the background color of the IDE theme in hex format.}
+\item{fg}{A character string representing the foreground color of the IDE theme in hex format.}
+
+If RStudio is unavailable, returns the fallback theme details.
}
\description{
-This function returns a list with the current IDE theme's information.
+Retrieves the current RStudio IDE theme information including whether it is a dark theme,
+and the background and foreground colors in hexadecimal format.
+}
+\examples{
+theme_info <- get_ide_theme_info()
+print(theme_info)
+
}
diff --git a/man/gptstudio_chat.Rd b/man/gptstudio_chat.Rd
index 4eb70ec5..c89ad472 100644
--- a/man/gptstudio_chat.Rd
+++ b/man/gptstudio_chat.Rd
@@ -2,25 +2,36 @@
% Please edit documentation in R/addin_chatgpt.R
\name{gptstudio_chat}
\alias{gptstudio_chat}
-\title{Run Chat GPT
-Run the Chat GPT Shiny App as a background job and show it in the viewer pane}
+\title{Run GPTStudio Chat App}
\usage{
gptstudio_chat(host = getOption("shiny.host", "127.0.0.1"))
}
\arguments{
-\item{host}{The IPv4 address that the application should listen on. Defaults
-to the \code{shiny.host} option, if set, or \code{"127.0.0.1"} if not. See
-Details.}
+\item{host}{A character string specifying the host on which to run the app.
+Defaults to the value of \code{getOption("shiny.host", "127.0.0.1")}.}
}
\value{
-This function has no return value.
+This function does not return a value. It runs the Shiny app as a side effect.
}
\description{
-Run Chat GPT
-Run the Chat GPT Shiny App as a background job and show it in the viewer pane
+This function initializes and runs the Chat GPT Shiny App as a background job
+in RStudio and opens it in the viewer pane or browser window.
+}
+\details{
+The function performs the following steps:
+\enumerate{
+\item Verifies that RStudio API is available.
+\item Finds an available port for the Shiny app.
+\item Creates a temporary directory for the app files.
+\item Runs the app as a background job in RStudio.
+\item Opens the app in the RStudio viewer pane or browser window.
+}
+}
+\note{
+This function is designed to work within the RStudio IDE and requires
+the rstudioapi package.
}
\examples{
-# Call the function as an RStudio addin
\dontrun{
gptstudio_chat()
}
diff --git a/man/run_chatgpt_app.Rd b/man/gptstudio_run_chat_app.Rd
similarity index 85%
rename from man/run_chatgpt_app.Rd
rename to man/gptstudio_run_chat_app.Rd
index 557b92ec..91b824d6 100644
--- a/man/run_chatgpt_app.Rd
+++ b/man/gptstudio_run_chat_app.Rd
@@ -1,11 +1,12 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_chatgpt_app.R
-\name{run_chatgpt_app}
-\alias{run_chatgpt_app}
+\name{gptstudio_run_chat_app}
+\alias{gptstudio_run_chat_app}
\title{Run the ChatGPT app}
\usage{
-run_chatgpt_app(
+gptstudio_run_chat_app(
ide_colors = get_ide_theme_info(),
+ code_theme_url = get_highlightjs_theme(),
host = getOption("shiny.host", "127.0.0.1"),
port = getOption("shiny.port")
)
@@ -13,6 +14,8 @@ run_chatgpt_app(
\arguments{
\item{ide_colors}{List containing the colors of the IDE theme.}
+\item{code_theme_url}{URL to the highlight.js theme}
+
\item{host}{The IPv4 address that the application should listen on. Defaults
to the \code{shiny.host} option, if set, or \code{"127.0.0.1"} if not. See
Details.}
diff --git a/man/gptstudio_sitrep.Rd b/man/gptstudio_sitrep.Rd
index f9541066..ec88501c 100644
--- a/man/gptstudio_sitrep.Rd
+++ b/man/gptstudio_sitrep.Rd
@@ -19,7 +19,8 @@ This function prints out the current configuration settings for gptstudio and
checks API connections if verbose is TRUE.
}
\examples{
+\dontrun{
gptstudio_sitrep(verbose = FALSE) # Print basic settings, no API checks
gptstudio_sitrep() # Print settings and check API connections
-
+}
}
diff --git a/man/input_audio_clip.Rd b/man/input_audio_clip.Rd
new file mode 100644
index 00000000..461a2c29
--- /dev/null
+++ b/man/input_audio_clip.Rd
@@ -0,0 +1,53 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/record-audio.R
+\name{input_audio_clip}
+\alias{input_audio_clip}
+\title{An audio clip input control that records short audio clips from the
+microphone}
+\usage{
+input_audio_clip(
+ id,
+ record_label = "Record",
+ stop_label = "Stop",
+ reset_on_record = TRUE,
+ mime_type = NULL,
+ audio_bits_per_second = NULL,
+ show_mic_settings = TRUE,
+ ...
+)
+}
+\arguments{
+\item{id}{The input slot that will be used to access the value.}
+
+\item{record_label}{Display label for the "record" control, or NULL for no
+label. Default is 'Record'.}
+
+\item{stop_label}{Display label for the "stop" control, or NULL for no label.
+Default is 'Record'.}
+
+\item{reset_on_record}{Whether to reset the audio clip input value when
+recording starts. If TRUE, the audio clip input value will become NULL at
+the moment the Record button is pressed; if FALSE, the value will not
+change until the user stops recording. Default is TRUE.}
+
+\item{mime_type}{The MIME type of the audio clip to record. By default, this
+is NULL, which means the browser will choose a suitable MIME type for audio
+recording. Common MIME types include 'audio/webm' and 'audio/mp4'.}
+
+\item{audio_bits_per_second}{The target audio bitrate in bits per second. By
+default, this is NULL, which means the browser will choose a suitable
+bitrate for audio recording. This is only a suggestion; the browser may
+choose a different bitrate.}
+
+\item{show_mic_settings}{Whether to show the microphone settings in the
+settings menu. Default is TRUE.}
+
+\item{...}{Additional parameters to pass to the underlying HTML tag.}
+}
+\value{
+An audio clip input control that can be added to a UI definition.
+}
+\description{
+An audio clip input control that records short audio clips from the
+microphone
+}
diff --git a/man/mod_app_ui.Rd b/man/mod_app_ui.Rd
index 7e7b8044..37c8ba3f 100644
--- a/man/mod_app_ui.Rd
+++ b/man/mod_app_ui.Rd
@@ -4,12 +4,18 @@
\alias{mod_app_ui}
\title{App UI}
\usage{
-mod_app_ui(id, ide_colors = get_ide_theme_info())
+mod_app_ui(
+ id,
+ ide_colors = get_ide_theme_info(),
+ code_theme_url = get_highlightjs_theme()
+)
}
\arguments{
\item{id}{id of the module}
\item{ide_colors}{List containing the colors of the IDE theme.}
+
+\item{code_theme_url}{URL to the highlight.js theme}
}
\description{
App UI
diff --git a/man/mod_chat_ui.Rd b/man/mod_chat_ui.Rd
index f9bc9ca5..5050d3ec 100644
--- a/man/mod_chat_ui.Rd
+++ b/man/mod_chat_ui.Rd
@@ -4,12 +4,18 @@
\alias{mod_chat_ui}
\title{Chat UI}
\usage{
-mod_chat_ui(id, translator = create_translator())
+mod_chat_ui(
+ id,
+ translator = create_translator(),
+ code_theme_url = get_highlightjs_theme()
+)
}
\arguments{
\item{id}{id of the module}
\item{translator}{A Translator from \code{shiny.i18n::Translator}}
+
+\item{code_theme_url}{URL to the highlight.js theme}
}
\description{
Chat UI
diff --git a/man/multimodal_dep.Rd b/man/multimodal_dep.Rd
new file mode 100644
index 00000000..974fb87b
--- /dev/null
+++ b/man/multimodal_dep.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/record-audio.R
+\name{multimodal_dep}
+\alias{multimodal_dep}
+\title{Create HTML dependency for multimodal component}
+\usage{
+multimodal_dep()
+}
+\description{
+Create HTML dependency for multimodal component
+}
diff --git a/man/open_bg_shinyapp.Rd b/man/open_bg_shinyapp.Rd
deleted file mode 100644
index 313b2994..00000000
--- a/man/open_bg_shinyapp.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/addin_chatgpt.R
-\name{open_bg_shinyapp}
-\alias{open_bg_shinyapp}
-\title{Open browser to local Shiny app}
-\usage{
-open_bg_shinyapp(host, port)
-}
-\arguments{
-\item{host}{A character string representing the IP address or domain name of
-the server where the Shiny app is hosted.}
-
-\item{port}{An integer representing the port number on which the Shiny app is
-hosted.}
-}
-\value{
-None (opens the Shiny app in the viewer pane or browser window)
-}
-\description{
-This function takes in the host and port of a local Shiny app and opens the
-app in the default browser.
-}
diff --git a/man/parse_data_uri.Rd b/man/parse_data_uri.Rd
new file mode 100644
index 00000000..d93d273d
--- /dev/null
+++ b/man/parse_data_uri.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/api-transcribe-audio.R
+\name{parse_data_uri}
+\alias{parse_data_uri}
+\title{Parse a Data URI}
+\usage{
+parse_data_uri(data_uri)
+}
+\arguments{
+\item{data_uri}{A string. The data URI to parse.}
+}
+\value{
+A list with two elements: 'mime_type' and 'data'.
+}
+\description{
+This function parses a data URI and returns the MIME type and decoded data.
+}
diff --git a/man/query_openai_api.Rd b/man/query_api_openai.Rd
similarity index 91%
rename from man/query_openai_api.Rd
rename to man/query_api_openai.Rd
index d1e33175..a83e31df 100644
--- a/man/query_openai_api.Rd
+++ b/man/query_api_openai.Rd
@@ -1,10 +1,10 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/service-openai_api_calls.R
-\name{query_openai_api}
-\alias{query_openai_api}
+\name{query_api_openai}
+\alias{query_api_openai}
\title{A function that sends a request to the OpenAI API and returns the response.}
\usage{
-query_openai_api(
+query_api_openai(
task,
request_body,
openai_api_key = Sys.getenv("OPENAI_API_KEY")
diff --git a/man/random_port.Rd b/man/random_port.Rd
deleted file mode 100644
index 6e341606..00000000
--- a/man/random_port.Rd
+++ /dev/null
@@ -1,14 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/addin_chatgpt.R
-\name{random_port}
-\alias{random_port}
-\title{Generate a random safe port number}
-\usage{
-random_port()
-}
-\value{
-A single integer representing the randomly selected safe port number.
-}
-\description{
-This function generates a random port allowed by shiny::runApp.
-}
diff --git a/man/run_app_as_bg_job.Rd b/man/run_app_as_bg_job.Rd
deleted file mode 100644
index 93dfb0e4..00000000
--- a/man/run_app_as_bg_job.Rd
+++ /dev/null
@@ -1,42 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/addin_chatgpt.R
-\name{run_app_as_bg_job}
-\alias{run_app_as_bg_job}
-\title{Run an R Shiny app in the background}
-\usage{
-run_app_as_bg_job(appDir = ".", job_name, host, port)
-}
-\arguments{
-\item{appDir}{The application to run. Should be one of the following:
-\itemize{
-\item A directory containing \code{server.R}, plus, either \code{ui.R} or
-a \code{www} directory that contains the file \code{index.html}.
-\item A directory containing \code{app.R}.
-\item An \code{.R} file containing a Shiny application, ending with an
-expression that produces a Shiny app object.
-\item A list with \code{ui} and \code{server} components.
-\item A Shiny app object created by \code{\link[shiny:shinyApp]{shinyApp()}}.
-}}
-
-\item{job_name}{The name of the background job to be created}
-
-\item{host}{The IPv4 address that the application should listen on. Defaults
-to the \code{shiny.host} option, if set, or \code{"127.0.0.1"} if not. See
-Details.}
-
-\item{port}{The TCP port that the application should listen on. If the
-\code{port} is not specified, and the \code{shiny.port} option is set (with
-\code{options(shiny.port = XX)}), then that port will be used. Otherwise,
-use a random port between 3000:8000, excluding ports that are blocked
-by Google Chrome for being considered unsafe: 3659, 4045, 5060,
-5061, 6000, 6566, 6665:6669 and 6697. Up to twenty random
-ports will be tried.}
-}
-\value{
-This function returns nothing because is meant to run an app as a
-side effect.
-}
-\description{
-This function runs an R Shiny app as a background job using the specified
-directory, name, host, and port.
-}
diff --git a/man/stream_chat_completion.Rd b/man/stream_chat_completion.Rd
index 152cf9dd..25ed5736 100644
--- a/man/stream_chat_completion.Rd
+++ b/man/stream_chat_completion.Rd
@@ -5,8 +5,8 @@
\title{Stream Chat Completion}
\usage{
stream_chat_completion(
- messages = NULL,
- element_callback = cat,
+ messages = list(list(role = "user", content = "Hi there!")),
+ element_callback = openai_handler,
model = "gpt-4o-mini",
openai_api_key = Sys.getenv("OPENAI_API_KEY")
)
@@ -27,7 +27,7 @@ Please note that the OpenAI API key is sensitive information and should be
treated accordingly.}
}
\value{
-The same as \code{curl::curl_fetch_stream}
+The same as \code{httr2::req_perform_stream}
}
\description{
\code{stream_chat_completion} sends the prepared chat completion request to the
diff --git a/man/transcribe_audio.Rd b/man/transcribe_audio.Rd
new file mode 100644
index 00000000..5de9b3db
--- /dev/null
+++ b/man/transcribe_audio.Rd
@@ -0,0 +1,29 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/api-transcribe-audio.R
+\name{transcribe_audio}
+\alias{transcribe_audio}
+\title{Transcribe Audio from Data URI Using OpenAI's Whisper Model}
+\usage{
+transcribe_audio(audio_input, api_key = Sys.getenv("OPENAI_API_KEY"))
+}
+\arguments{
+\item{audio_input}{A string. The audio data in data URI format.}
+
+\item{api_key}{A string. Your OpenAI API key. Defaults to the OPENAI_API_KEY
+environment variable.}
+}
+\value{
+A string containing the transcribed text.
+}
+\description{
+This function takes an audio file in data URI format, converts it to WAV, and
+sends it to OpenAI's transcription API to get the transcribed text.
+}
+\examples{
+\dontrun{
+audio_uri <- "data:audio/webm;base64,SGVsbG8gV29ybGQ=" # Example data URI
+transcription <- transcribe_audio(audio_uri)
+print(transcription)
+}
+
+}
diff --git a/revdep/library.noindex/gptstudio/new/gptstudio/mod_app/app.R b/revdep/library.noindex/gptstudio/new/gptstudio/mod_app/app.R
index ba4f650e..00090d61 100644
--- a/revdep/library.noindex/gptstudio/new/gptstudio/mod_app/app.R
+++ b/revdep/library.noindex/gptstudio/new/gptstudio/mod_app/app.R
@@ -1 +1 @@
-gptstudio::run_chatgpt_app()
+gptstudio::gptstudio_run_chat_app()
diff --git a/revdep/library.noindex/gptstudio/old/gptstudio/mod_app/app.R b/revdep/library.noindex/gptstudio/old/gptstudio/mod_app/app.R
index ba4f650e..00090d61 100644
--- a/revdep/library.noindex/gptstudio/old/gptstudio/mod_app/app.R
+++ b/revdep/library.noindex/gptstudio/old/gptstudio/mod_app/app.R
@@ -1 +1 @@
-gptstudio::run_chatgpt_app()
+gptstudio::gptstudio_run_chat_app()
diff --git a/revdep/library.noindex/gptstudio/old/gptstudio/shiny/app.R b/revdep/library.noindex/gptstudio/old/gptstudio/shiny/app.R
index ec72302d..0a434a9e 100644
--- a/revdep/library.noindex/gptstudio/old/gptstudio/shiny/app.R
+++ b/revdep/library.noindex/gptstudio/old/gptstudio/shiny/app.R
@@ -16,7 +16,8 @@ chat_card <- bslib::card(
shiny::actionButton(
width = "100%",
inputId = "chat", label = "Chat",
- icon = shiny::icon("robot"), class = "btn-primary"
+ icon = bsicons::bs_icon("robot"),
+ class = "btn-primary"
),
shiny::br(), shiny::br(),
shiny::fluidRow(
@@ -34,7 +35,7 @@ chat_card <- bslib::card(
shiny::actionButton(
width = "100%",
inputId = "clear_history", label = "Clear History",
- icon = shiny::icon("eraser")
+ icon = bsicons::bs_icon("eraser")
),
)
)
diff --git a/tests/testthat/_snaps/addin-chatgpt.md b/tests/testthat/_snaps/addin-chatgpt.md
new file mode 100644
index 00000000..0e60c583
--- /dev/null
+++ b/tests/testthat/_snaps/addin-chatgpt.md
@@ -0,0 +1,12 @@
+# create_temp_app_file creates a valid R script
+
+ Code
+ content
+ Output
+ [1] "ide_colors <- list(editor_theme = \"textmate\", editor_theme_is_dark = FALSE)"
+ [2] " ui <- gptstudio:::mod_app_ui('app', ide_colors, 'https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.10.0/build/styles/github-dark.min.css')"
+ [3] " server <- function(input, output, session) {"
+ [4] " gptstudio:::mod_app_server('app', ide_colors)"
+ [5] " }"
+ [6] " shiny::shinyApp(ui, server)"
+
diff --git a/tests/testthat/_snaps/api_skeletons.md b/tests/testthat/_snaps/api_skeletons.md
new file mode 100644
index 00000000..0b2fa2d4
--- /dev/null
+++ b/tests/testthat/_snaps/api_skeletons.md
@@ -0,0 +1,391 @@
+# multiplication works
+
+ Code
+ gptstudio_create_skeleton()
+ Output
+ $url
+ https://api.openai.com/v1/chat/completions
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] TRUE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_openai" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "anthropic")
+ Output
+ $url
+ [1] "https://api.anthropic.com/v1/complete"
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] FALSE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_anthropic" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "cohere")
+ Output
+ $url
+ [1] "https://api.cohere.ai/v1/chat"
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] FALSE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_cohere" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "google")
+ Output
+ $url
+ [1] "https://generativelanguage.googleapis.com/v1beta2/models/"
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] FALSE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_google" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "huggingface")
+ Output
+ $url
+ [1] "https://api-inference.huggingface.co/models"
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] FALSE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_huggingface" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "ollama")
+ Output
+ $url
+ [1] "JUST A PLACEHOLDER"
+
+ $api_key
+ [1] "JUST A PLACEHOLDER"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] TRUE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_ollama" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "openai")
+ Output
+ $url
+ https://api.openai.com/v1/chat/completions
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] TRUE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_openai" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "perplexity")
+ Output
+ $url
+ [1] "https://api.perplexity.ai/chat/completions"
+
+ $api_key
+ [1] "a-fake-key"
+
+ $model
+ [1] "gpt-4o-mini"
+
+ $prompt
+ [1] "Name the top 5 packages in R."
+
+ $history
+ $history[[1]]
+ $history[[1]]$role
+ [1] "system"
+
+ $history[[1]]$content
+ [1] "You are an R chat assistant"
+
+
+
+ $stream
+ [1] FALSE
+
+ $extras
+ list()
+
+ attr(,"class")
+ [1] "gptstudio_request_perplexity" "gptstudio_request_skeleton"
+
+---
+
+ Code
+ gptstudio_create_skeleton(service = "azure-openai")
+
+# new_gptstudio_request_skeleton_openai creates correct structure
+
+ Code
+ skeleton <- new_gptstudio_request_skeleton_openai(url = "https://api.openai.com/v1/chat/completions",
+ api_key = "test_key", model = "gpt-4-turbo-preview", prompt = "What is R?",
+ history = list(list(role = "system", content = "You are an R assistant")),
+ stream = TRUE, n = 1)
+ str(skeleton)
+ Output
+ List of 7
+ $ url : chr "https://api.openai.com/v1/chat/completions"
+ $ api_key: chr "test_key"
+ $ model : chr "gpt-4-turbo-preview"
+ $ prompt : chr "What is R?"
+ $ history:List of 1
+ ..$ :List of 2
+ .. ..$ role : chr "system"
+ .. ..$ content: chr "You are an R assistant"
+ $ stream : logi TRUE
+ $ extras : list()
+ - attr(*, "class")= chr [1:2] "gptstudio_request_openai" "gptstudio_request_skeleton"
+
+# new_gptstudio_request_skeleton_huggingface creates correct structure
+
+ Code
+ skeleton <- new_gptstudio_request_skeleton_huggingface(url = "https://api-inference.huggingface.co/models",
+ api_key = "test_key", model = "gpt2", prompt = "What is R?", history = list(
+ list(role = "system", content = "You are an R assistant")), stream = FALSE)
+ str(skeleton)
+ Output
+ List of 7
+ $ url : chr "https://api-inference.huggingface.co/models"
+ $ api_key: chr "test_key"
+ $ model : chr "gpt2"
+ $ prompt : chr "What is R?"
+ $ history:List of 1
+ ..$ :List of 2
+ .. ..$ role : chr "system"
+ .. ..$ content: chr "You are an R assistant"
+ $ stream : logi FALSE
+ $ extras : list()
+ - attr(*, "class")= chr [1:2] "gptstudio_request_huggingface" "gptstudio_request_skeleton"
+
+# validate_skeleton throws error for invalid URL
+
+ Code
+ validate_skeleton(url = 123, api_key = "valid_key", model = "test_model",
+ prompt = "What is R?", history = list(), stream = TRUE)
+ Condition
+ Error in `validate_skeleton()`:
+ ! `url` is not a valid character scalar. It is a .
+
+# validate_skeleton throws error for empty API key
+
+ Code
+ validate_skeleton(url = "https://api.example.com", api_key = "", model = "test_model",
+ prompt = "What is R?", history = list(), stream = TRUE)
+ Condition
+ Error in `validate_skeleton()`:
+ ! `api_key` is not a valid character scalar. It is a .
+
+# validate_skeleton throws error for empty model
+
+ Code
+ validate_skeleton(url = "https://api.example.com", api_key = "valid_key",
+ model = "", prompt = "What is R?", history = list(), stream = TRUE)
+ Condition
+ Error in `validate_skeleton()`:
+ ! `model` is not a valid character scalar. It is a .
+
+# validate_skeleton throws error for non-character prompt
+
+ Code
+ validate_skeleton(url = "https://api.example.com", api_key = "valid_key",
+ model = "test_model", prompt = list("not a string"), history = list(),
+ stream = TRUE)
+ Condition
+ Error in `validate_skeleton()`:
+ ! `prompt` is not a valid character scalar. It is a .
+
+# validate_skeleton throws error for invalid history
+
+ Code
+ validate_skeleton(url = "https://api.example.com", api_key = "valid_key",
+ model = "test_model", prompt = "What is R?", history = "not a list", stream = TRUE)
+ Condition
+ Error in `validate_skeleton()`:
+ ! `history` is not a valid list or NULL. It is a .
+
+# validate_skeleton throws error for non-boolean stream
+
+ Code
+ validate_skeleton(url = "https://api.example.com", api_key = "valid_key",
+ model = "test_model", prompt = "What is R?", history = list(), stream = "not a boolean")
+ Condition
+ Error in `validate_skeleton()`:
+ ! `stream` is not a valid boolean. It is a .
+
diff --git a/tests/testthat/_snaps/models.md b/tests/testthat/_snaps/models.md
index c44bfd9a..65c42258 100644
--- a/tests/testthat/_snaps/models.md
+++ b/tests/testthat/_snaps/models.md
@@ -15,9 +15,10 @@
Code
models
Output
- [1] "command-r" "command-nightly" "command-r-plus"
- [4] "c4ai-aya-23-35b" "command-light-nightly" "c4ai-aya-23-8b"
- [7] "command" "command-light"
+ [1] "c4ai-aya-23-35b" "command-r" "command-r-plus"
+ [4] "command-nightly" "command-light-nightly" "command"
+ [7] "command-r-08-2024" "command-r-plus-08-2024" "command-light"
+ [10] "c4ai-aya-23-8b"
# get_available_models works for google
@@ -30,8 +31,10 @@
[7] "gemini-1.0-pro-001" "gemini-1.0-pro-vision-latest"
[9] "gemini-pro-vision" "gemini-1.5-pro-latest"
[11] "gemini-1.5-pro-001" "gemini-1.5-pro"
- [13] "gemini-1.5-pro-exp-0801" "gemini-1.5-flash-latest"
- [15] "gemini-1.5-flash-001" "gemini-1.5-flash"
- [17] "gemini-1.5-flash-001-tuning" "embedding-001"
- [19] "text-embedding-004" "aqa"
+ [13] "gemini-1.5-pro-exp-0801" "gemini-1.5-pro-exp-0827"
+ [15] "gemini-1.5-flash-latest" "gemini-1.5-flash-001"
+ [17] "gemini-1.5-flash-001-tuning" "gemini-1.5-flash"
+ [19] "gemini-1.5-flash-exp-0827" "gemini-1.5-flash-8b-exp-0827"
+ [21] "embedding-001" "text-embedding-004"
+ [23] "aqa"
diff --git a/tests/testthat/test-addin-chatgpt.R b/tests/testthat/test-addin-chatgpt.R
index 785389d1..91bf8b04 100644
--- a/tests/testthat/test-addin-chatgpt.R
+++ b/tests/testthat/test-addin-chatgpt.R
@@ -1,15 +1,106 @@
-test_that("random_port() works", {
- set.seed(123)
+test_that("find_available_port returns a valid port", {
+ port <- find_available_port()
+ expect_true(port >= 3000 && port <= 8000)
+ expect_false(port %in% c(3659, 4045, 5060, 5061, 6000, 6566, 6665:6669, 6697))
+})
+
+test_that("create_temp_app_file creates a valid R script", {
+ mock_get_ide_theme_info <- function() {
+ list(
+ editor_theme = "textmate",
+ editor_theme_is_dark = FALSE
+ )
+ }
+
+ local_mocked_bindings(
+ get_ide_theme_info = mock_get_ide_theme_info,
+ .package = "gptstudio" # Specify the package explicitly
+ )
+
+ temp_file <- create_temp_app_file()
+
+ expect_true(file.exists(temp_file))
+ expect_true(grepl("\\.R$", temp_file))
+
+ content <- readLines(temp_file)
+ expect_snapshot(content)
+})
+
+test_that("run_app_background creates a job", {
+ mock_job_run_script <- function(...) NULL
+ mock_cli_alert_success <- function(...) NULL
- random_port() %>%
- expect_equal(5466)
+ with_mocked_bindings(
+ jobRunScript = mock_job_run_script,
+ .package = "rstudioapi",
+ {
+ with_mocked_bindings(
+ cli_alert_success = mock_cli_alert_success,
+ .package = "cli",
+ {
+ expect_no_error(run_app_background("test_dir", "test_job", "127.0.0.1", 3000))
+ }
+ )
+ }
+ )
})
-test_that("create_tmp_job_script() returns string", {
- create_tmp_job_script(
- appDir = system.file("shiny", package = "gptstudio"),
- host = "127.0.0.1",
- port = 3838
- ) %>%
- expect_type("character")
+test_that("open_app_in_viewer opens the app correctly", {
+ mock_translate_local_url <- function(...) "http://translated.url"
+ mock_viewer <- function(...) NULL
+ mock_cli_inform <- function(...) NULL
+ mock_cli_alert_info <- function(...) NULL
+ mock_wait_for_bg_app <- function(...) NULL
+
+ with_mocked_bindings(
+ translateLocalUrl = mock_translate_local_url,
+ viewer = mock_viewer,
+ .package = "rstudioapi",
+ {
+ with_mocked_bindings(
+ cli_inform = mock_cli_inform,
+ cli_alert_info = mock_cli_alert_info,
+ .package = "cli",
+ {
+ local_mocked_bindings(
+ wait_for_bg_app = mock_wait_for_bg_app
+ )
+
+ # Test for localhost
+ expect_no_error(open_app_in_viewer("127.0.0.1", 3000))
+
+ # Test for non-localhost
+ expect_no_error(open_app_in_viewer("192.168.1.100", 3000))
+ }
+ )
+ }
+ )
+})
+
+test_that("gptstudio_chat runs the app correctly", {
+ mock_verify_available <- function() NULL
+ mock_find_available_port <- function() 3000
+ mock_create_temp_app_dir <- function() "test_dir"
+ mock_run_app_background <- function(...) NULL
+ mock_open_app_in_viewer <- function(...) NULL
+ mock_version_availalbe <- function() list(mode = "desktop")
+
+ with_mocked_bindings(
+ verifyAvailable = mock_verify_available,
+ versionInfo = mock_version_availalbe,
+ .package = "rstudioapi",
+ {
+ local_mocked_bindings(
+ find_available_port = mock_find_available_port,
+ create_temp_app_dir = mock_create_temp_app_dir,
+ run_app_background = mock_run_app_background,
+ open_app_in_viewer = mock_open_app_in_viewer
+ )
+
+ expect_no_error(gptstudio_chat())
+
+ # Test with custom host
+ expect_no_error(gptstudio_chat(host = "192.168.1.100"))
+ }
+ )
})
diff --git a/tests/testthat/test-api-transcribe-audio.R b/tests/testthat/test-api-transcribe-audio.R
new file mode 100644
index 00000000..c6ac7ccb
--- /dev/null
+++ b/tests/testthat/test-api-transcribe-audio.R
@@ -0,0 +1,56 @@
+test_that("parse_data_uri correctly parses valid data URIs", {
+ # Test case 1: Simple data URI
+ uri1 <- "data:text/plain;base64,SGVsbG8gV29ybGQ="
+ result1 <- parse_data_uri(uri1)
+ expect_equal(result1$mime_type, "text/plain")
+ expect_equal(result1$data, charToRaw("Hello World"))
+
+ # Test case 2: Data URI with padding
+ uri2 <- "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg==" # nolint
+ result2 <- parse_data_uri(uri2)
+ expect_equal(result2$mime_type, "image/png")
+ expect_true(length(result2$data) > 0)
+
+ # Test case 3: Data URI without padding
+ uri3 <- "data:audio/mp3;base64,AAAAHGZ0eXBNNEEgAAAAAE00QSBtcDQyaXNvbQ"
+ result3 <- parse_data_uri(uri3)
+ expect_equal(result3$mime_type, "audio/mp3")
+ expect_true(length(result3$data) > 0)
+})
+
+test_that("parse_data_uri handles invalid inputs correctly", {
+ # Test case 4: Invalid data URI format
+ expect_error(parse_data_uri("not a data uri"), "Invalid data URI format")
+
+ # Test case 5: Empty string
+ expect_error(parse_data_uri(""), "Invalid data URI format")
+
+ # Test case 6: NULL input
+ expect_error(parse_data_uri(NULL), "Invalid input: data_uri must be a single character string")
+
+ # Test case 7: Non-character input
+ expect_error(parse_data_uri(123), "Invalid input: data_uri must be a single character string")
+
+ # Test case 8: Character vector with length > 1
+ expect_error(
+ parse_data_uri(c(
+ "data:text/plain;base64,SGVsbG8=",
+ "data:text/plain;base64,V29ybGQ="
+ )),
+ "Invalid input: data_uri must be a single character string"
+ )
+})
+
+test_that("parse_data_uri handles edge cases", {
+ # Test case 9: Data URI with empty data
+ uri9 <- "data:text/plain;base64,"
+ result9 <- parse_data_uri(uri9)
+ expect_equal(result9$mime_type, "text/plain")
+ expect_equal(result9$data, raw(0))
+
+ # Test case 10: Data URI with special characters in MIME type
+ uri10 <- "data:application/x-custom+xml;base64,PGhlbGxvPndvcmxkPC9oZWxsbz4="
+ result10 <- parse_data_uri(uri10)
+ expect_equal(result10$mime_type, "application/x-custom+xml")
+ expect_equal(result10$data, charToRaw("world"))
+})
diff --git a/tests/testthat/test-api_skeletons.R b/tests/testthat/test-api_skeletons.R
new file mode 100644
index 00000000..7f5f0b06
--- /dev/null
+++ b/tests/testthat/test-api_skeletons.R
@@ -0,0 +1,288 @@
+withr::local_envvar(
+ list(
+ "OPENAI_API_KEY" = "a-fake-key",
+ "ANTHROPIC_API_KEY" = "a-fake-key",
+ "HF_API_KEY" = "a-fake-key",
+ "GOOGLE_API_KEY" = "a-fake-key",
+ "AZURE_OPENAI_API_KEY" = "a-fake-key",
+ "PERPLEXITY_API_KEY" = "a-fake-key",
+ "COHERE_API_KEY" = "a-fake-key"
+ )
+)
+
+test_that("multiplication works", {
+ config <- yaml::read_yaml(system.file("rstudio/config.yml",
+ package = "gptstudio"
+ ))
+ set_user_options(config)
+
+ withr::with_envvar(
+ new = c(
+ "OPENAI_API_KEY" = "a-fake-key",
+ "ANTHROPIC_API_KEY" = "a-fake-key",
+ "HF_API_KEY" = "a-fake-key",
+ "GOOGLE_API_KEY" = "a-fake-key",
+ "AZURE_OPENAI_API_KEY" = "a-fake-key",
+ "PERPLEXITY_API_KEY" = "a-fake-key",
+ "COHERE_API_KEY" = "a-fake-key",
+ "OLLAMA_HOST" = "JUST A PLACEHOLDER"
+ ),
+ {
+ expect_snapshot(gptstudio_create_skeleton())
+ expect_snapshot(gptstudio_create_skeleton(service = "anthropic"))
+ expect_snapshot(gptstudio_create_skeleton(service = "cohere"))
+ expect_snapshot(gptstudio_create_skeleton(service = "google"))
+ expect_snapshot(gptstudio_create_skeleton(service = "huggingface"))
+ expect_snapshot(gptstudio_create_skeleton(service = "ollama"))
+ expect_snapshot(gptstudio_create_skeleton(service = "openai"))
+ expect_snapshot(gptstudio_create_skeleton(service = "perplexity"))
+ expect_snapshot(gptstudio_create_skeleton(service = "azure-openai"))
+ }
+ )
+})
+
+
+test_that("gptstudio_create_skeleton creates correct skeleton for OpenAI", {
+ skeleton <- gptstudio_create_skeleton(
+ service = "openai",
+ prompt = "What is R?",
+ model = "gpt-4-turbo-preview"
+ )
+
+ expect_s3_class(skeleton, "gptstudio_request_openai")
+ expect_equal(skeleton$model, "gpt-4-turbo-preview")
+ expect_equal(skeleton$prompt, "What is R?")
+ expect_true(skeleton$stream)
+})
+
+test_that("gptstudio_create_skeleton creates correct skeleton for Hugging Face", {
+ skeleton <- gptstudio_create_skeleton(
+ service = "huggingface",
+ prompt = "What is R?",
+ model = "gpt2"
+ )
+
+ expect_s3_class(skeleton, "gptstudio_request_huggingface")
+ expect_equal(skeleton$model, "gpt2")
+ expect_equal(skeleton$prompt, "What is R?")
+ expect_false(skeleton$stream)
+})
+
+test_that("gptstudio_create_skeleton creates correct skeleton for Anthropic", {
+ skeleton <- gptstudio_create_skeleton(
+ service = "anthropic",
+ prompt = "What is R?",
+ model = "claude-3-5-sonnet-20240620"
+ )
+
+ expect_s3_class(skeleton, "gptstudio_request_anthropic")
+ expect_equal(skeleton$model, "claude-3-5-sonnet-20240620")
+ expect_equal(skeleton$prompt, "What is R?")
+ expect_false(skeleton$stream)
+})
+
+test_that("gptstudio_create_skeleton creates correct skeleton for Cohere", {
+ skeleton <- gptstudio_create_skeleton(
+ service = "cohere",
+ prompt = "What is R?",
+ model = "command"
+ )
+
+ expect_s3_class(skeleton, "gptstudio_request_cohere")
+ expect_equal(skeleton$model, "command")
+ expect_equal(skeleton$prompt, "What is R?")
+ expect_false(skeleton$stream)
+})
+
+test_that("new_gptstudio_request_skeleton_openai creates correct structure", {
+ expect_snapshot({
+ skeleton <- new_gptstudio_request_skeleton_openai(
+ url = "https://api.openai.com/v1/chat/completions",
+ api_key = "test_key",
+ model = "gpt-4-turbo-preview",
+ prompt = "What is R?",
+ history = list(list(role = "system", content = "You are an R assistant")),
+ stream = TRUE,
+ n = 1
+ )
+ str(skeleton)
+ })
+})
+
+test_that("new_gptstudio_request_skeleton_huggingface creates correct structure", {
+ expect_snapshot({
+ skeleton <- new_gptstudio_request_skeleton_huggingface(
+ url = "https://api-inference.huggingface.co/models",
+ api_key = "test_key",
+ model = "gpt2",
+ prompt = "What is R?",
+ history = list(list(role = "system", content = "You are an R assistant")),
+ stream = FALSE
+ )
+ str(skeleton)
+ })
+})
+
+
+library(testthat)
+library(gptstudio)
+
+# Tests for new_gpstudio_request_skeleton
+test_that("new_gpstudio_request_skeleton creates correct structure with valid inputs", {
+ result <- new_gpstudio_request_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = list(list(role = "system", content = "You are an R assistant")),
+ stream = TRUE,
+ extra_param = "value"
+ )
+
+ expect_s3_class(result, "gptstudio_request_skeleton")
+ expect_equal(result$url, "https://api.example.com")
+ expect_equal(result$api_key, "valid_key")
+ expect_equal(result$model, "test_model")
+ expect_equal(result$prompt, "What is R?")
+ expect_equal(result$history, list(list(role = "system", content = "You are an R assistant")))
+ expect_true(result$stream)
+ expect_equal(result$extras, list(extra_param = "value"))
+})
+
+test_that("new_gpstudio_request_skeleton handles NULL history", {
+ result <- new_gpstudio_request_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = NULL,
+ stream = FALSE
+ )
+
+ expect_null(result$history)
+})
+
+test_that("new_gpstudio_request_skeleton adds custom class", {
+ result <- new_gpstudio_request_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = list(),
+ stream = TRUE,
+ class = "custom_class"
+ )
+
+ expect_s3_class(result, c("custom_class", "gptstudio_request_skeleton"))
+})
+
+# Tests for validate_skeleton
+test_that("validate_skeleton passes with valid inputs", {
+ expect_silent(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = list(list(role = "system", content = "You are an R assistant")),
+ stream = TRUE
+ )
+ )
+})
+
+test_that("validate_skeleton handles NULL history", {
+ expect_silent(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = NULL,
+ stream = TRUE
+ )
+ )
+})
+
+test_that("validate_skeleton throws error for invalid URL", {
+ expect_snapshot(
+ validate_skeleton(
+ url = 123,
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = list(),
+ stream = TRUE
+ ),
+ error = TRUE
+ )
+})
+
+test_that("validate_skeleton throws error for empty API key", {
+ expect_snapshot(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "",
+ model = "test_model",
+ prompt = "What is R?",
+ history = list(),
+ stream = TRUE
+ ),
+ error = TRUE
+ )
+})
+
+test_that("validate_skeleton throws error for empty model", {
+ expect_snapshot(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "",
+ prompt = "What is R?",
+ history = list(),
+ stream = TRUE
+ ),
+ error = TRUE
+ )
+})
+
+test_that("validate_skeleton throws error for non-character prompt", {
+ expect_snapshot(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = list("not a string"),
+ history = list(),
+ stream = TRUE
+ ),
+ error = TRUE
+ )
+})
+
+test_that("validate_skeleton throws error for invalid history", {
+ expect_snapshot(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = "not a list",
+ stream = TRUE
+ ),
+ error = TRUE
+ )
+})
+
+test_that("validate_skeleton throws error for non-boolean stream", {
+ expect_snapshot(
+ validate_skeleton(
+ url = "https://api.example.com",
+ api_key = "valid_key",
+ model = "test_model",
+ prompt = "What is R?",
+ history = list(),
+ stream = "not a boolean"
+ ),
+ error = TRUE
+ )
+})
diff --git a/tests/testthat/test-models.R b/tests/testthat/test-models.R
index 868cabb1..bb815e4f 100644
--- a/tests/testthat/test-models.R
+++ b/tests/testthat/test-models.R
@@ -55,7 +55,8 @@ test_that("get_available_models works for perplexity", {
test_that("get_available_models works for ollama", {
with_mocked_bindings(
`ollama_is_available` = mock_ollama_is_available,
- `ollama_list` = mock_ollama_list, {
+ `ollama_list` = mock_ollama_list,
+ {
service <- "ollama"
models <- get_available_models(service)
expect_equal(models, c("ollama-3.5", "ollama-3", "ollama-2"))
diff --git a/tests/testthat/test-service-azure_openai.R b/tests/testthat/test-service-azure_openai.R
index e64d4c0f..c78192f9 100644
--- a/tests/testthat/test-service-azure_openai.R
+++ b/tests/testthat/test-service-azure_openai.R
@@ -41,8 +41,10 @@ test_that("request_base_azure_openai constructs correct request", {
}
mock_req_headers <- function(req, ...) {
- req$headers <- list("api-key" = "test_token",
- "Content-Type" = "application/json")
+ req$headers <- list(
+ "api-key" = "test_token",
+ "Content-Type" = "application/json"
+ )
req
}
@@ -64,9 +66,11 @@ test_that("request_base_azure_openai constructs correct request", {
api_version = "test_version"
)
- expect_equal(result$url, "https://test.openai.azure.com/openai/deployments/test_deployment/test_task?api-version=test_version") #nolint
- expect_equal(result$headers, list("api-key" = "test_token",
- "Content-Type" = "application/json"))
+ expect_equal(result$url, "https://test.openai.azure.com/openai/deployments/test_deployment/test_task?api-version=test_version") # nolint
+ expect_equal(result$headers, list(
+ "api-key" = "test_token",
+ "Content-Type" = "application/json"
+ ))
}
)
})
@@ -74,12 +78,14 @@ test_that("request_base_azure_openai constructs correct request", {
test_that("query_api_azure_openai handles successful response", {
mock_request_base <- function(...) {
structure(list(url = "https://test.openai.azure.com", headers = list()),
- class = "httr2_request")
+ class = "httr2_request"
+ )
}
mock_req_perform <- function(req) {
structure(list(status_code = 200, body = '{"result": "success"}'),
- class = "httr2_response")
+ class = "httr2_response"
+ )
}
mock_resp_body_json <- function(resp) list(result = "success")
@@ -110,12 +116,14 @@ test_that("query_api_azure_openai handles successful response", {
test_that("query_api_azure_openai handles error response", {
mock_request_base <- function(...) {
structure(list(url = "https://test.openai.azure.com", headers = list()),
- class = "httr2_request")
+ class = "httr2_request"
+ )
}
mock_req_perform <- function(req) {
structure(list(status_code = 400, body = '{"error": "Bad Request"}'),
- class = "httr2_response")
+ class = "httr2_response"
+ )
}
local_mocked_bindings(
diff --git a/tests/testthat/test-service-openai_streaming.R b/tests/testthat/test-service-openai_streaming.R
index bdfc58db..f81f3f8f 100644
--- a/tests/testthat/test-service-openai_streaming.R
+++ b/tests/testthat/test-service-openai_streaming.R
@@ -1,5 +1,4 @@
test_that("OpenaiStreamParser works with different kinds of data values", {
-
openai_parser <- function(sse) {
parser <- OpenaiStreamParser$new()
parser$parse_sse(sse)
@@ -16,5 +15,4 @@ test_that("OpenaiStreamParser works with different kinds of data values", {
expect_type(openai_parser(event2), "list")
expect_type(openai_parser(event3), "list")
expect_type(openai_parser(event4), "list")
-
})