Skip to content

Commit

Permalink
add history for google
Browse files Browse the repository at this point in the history
  • Loading branch information
JamesHWade committed Sep 3, 2024
1 parent 784063b commit 4ee82f1
Show file tree
Hide file tree
Showing 9 changed files with 123 additions and 277 deletions.
2 changes: 1 addition & 1 deletion NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@ S3method(list_available_models,openai)
S3method(list_available_models,perplexity)
export(chat)
export(create_chat_azure_openai)
export(create_chat_google)
export(create_chat_ollama)
export(create_chat_openai)
export(create_completion_anthropic)
export(create_completion_google)
export(create_completion_huggingface)
export(get_available_endpoints)
export(get_available_models)
Expand Down
32 changes: 22 additions & 10 deletions R/api_perform_request.R
Original file line number Diff line number Diff line change
Expand Up @@ -69,18 +69,30 @@ gptstudio_request_perform.gptstudio_request_huggingface <-
}

#' @export
gptstudio_request_perform.gptstudio_request_google <-
function(skeleton, ...) {
response <- create_completion_google(prompt = skeleton$prompt)
structure(
list(
skeleton = skeleton,
response = response
),
class = "gptstudio_response_google"
)
gptstudio_request_perform.gptstudio_request_google <- function(skeleton, ...) {
skeleton$history <- chat_history_append(
history = skeleton$history,
role = "user",
name = "user_message",
content = skeleton$prompt
)

if (getOption("gptstudio.read_docs")) {
skeleton$history <- add_docs_messages_to_history(skeleton$history)
}

response <- create_chat_google(prompt = skeleton$history,
model = skeleton$model)

structure(
list(
skeleton = skeleton,
response = response
),
class = "gptstudio_response_google"
)
}

#' @export
gptstudio_request_perform.gptstudio_request_anthropic <- function(skeleton,
shiny_session = NULL,
Expand Down
50 changes: 21 additions & 29 deletions R/api_process_response.R
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ gptstudio_response_process <- function(skeleton, ...) {
}

#' @export
gptstudio_response_process.gptstudio_response_openai <-
function(skeleton, ...) {
gptstudio_response_process.gptstudio_response_openai <- function(skeleton, ...) {
last_response <- skeleton$response

Check warning on line 23 in R/api_process_response.R

View workflow job for this annotation

GitHub Actions / lint

file=R/api_process_response.R,line=23,col=4,[indentation_linter] Indentation should be 2 spaces but is 4 spaces.
skeleton <- skeleton$skeleton

Expand All @@ -41,8 +40,7 @@ gptstudio_response_process.gptstudio_response_openai <-
}

#' @export
gptstudio_response_process.gptstudio_response_huggingface <-
function(skeleton, ...) {
gptstudio_response_process.gptstudio_response_huggingface <- function(skeleton, ...) {
response <- skeleton$response

Check warning on line 44 in R/api_process_response.R

View workflow job for this annotation

GitHub Actions / lint

file=R/api_process_response.R,line=44,col=4,[indentation_linter] Indentation should be 2 spaces but is 4 spaces.
skeleton <- skeleton$skeleton
last_response <- response[[1]]$generated_text
Expand All @@ -65,8 +63,7 @@ gptstudio_response_process.gptstudio_response_huggingface <-
}

#' @export
gptstudio_response_process.gptstudio_response_anthropic <-
function(skeleton, ...) {
gptstudio_response_process.gptstudio_response_anthropic <- function(skeleton, ...) {
last_response <- skeleton$response

Check warning on line 67 in R/api_process_response.R

View workflow job for this annotation

GitHub Actions / lint

file=R/api_process_response.R,line=67,col=4,[indentation_linter] Indentation should be 2 spaces but is 4 spaces.
skeleton <- skeleton$skeleton

Expand All @@ -87,31 +84,27 @@ gptstudio_response_process.gptstudio_response_anthropic <-
}

#' @export
gptstudio_response_process.gptstudio_response_google <-
function(skeleton, ...) {
response <- skeleton$response
skeleton <- skeleton$skeleton
gptstudio_response_process.gptstudio_response_google <- function(skeleton, ...) {
last_response <- skeleton$response
skeleton <- skeleton$skeleton

new_history <- c(
skeleton$history,
list(
list(role = "user", content = skeleton$prompt),
list(role = "assistant", content = response)
)
)
new_history <- chat_history_append(
history = skeleton$history,
role = "assistant",
content = last_response
)

skeleton$history <- new_history
skeleton$prompt <- NULL # remove the last prompt
class(skeleton) <- c(
"gptstudio_request_skeleton",
"gptstudio_request_google"
)
skeleton
}
skeleton$history <- new_history
skeleton$prompt <- NULL # remove the last prompt
class(skeleton) <- c(
"gptstudio_request_skeleton",
"gptstudio_request_google"
)
skeleton
}

#' @export
gptstudio_response_process.gptstudio_response_azure_openai <-
function(skeleton, ...) {
gptstudio_response_process.gptstudio_response_azure_openai <- function(skeleton, ...) {
last_response <- skeleton$response
skeleton <- skeleton$skeleton

Expand Down Expand Up @@ -153,8 +146,7 @@ gptstudio_response_process.gptstudio_response_ollama <- function(skeleton, ...)
}

#' @export
gptstudio_response_process.gptstudio_response_perplexity <-
function(skeleton, ...) {
gptstudio_response_process.gptstudio_response_perplexity <- function(skeleton, ...) {
response <- skeleton$response
skeleton <- skeleton$skeleton

Expand Down
3 changes: 1 addition & 2 deletions R/gptstudio-sitrep.R
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ check_api_connection_google <- function(service, api_key) {
request_body <-
list(contents = list(list(parts = list(list(text = "Hello there")))))

response <- request_base_google(model = "gemini-pro", key = api_key) |>
response <- request_base_google(model = "gemini-pro", api_key = api_key) |>
req_body_json(data = request_body) |>
req_error(is_error = function(resp) FALSE) |>
req_perform()
Expand All @@ -74,7 +74,6 @@ check_api_connection_google <- function(service, api_key) {

#' @inheritParams check_api_connection_openai
check_api_connection_azure_openai <- function(service, api_key) {
""
api_check <- check_api_key(service, api_key)
if (rlang::is_false(api_check)) {
return(invisible(NULL))
Expand Down
146 changes: 68 additions & 78 deletions R/service-google.R
Original file line number Diff line number Diff line change
@@ -1,106 +1,75 @@
#' Base for a request to the Google AI Studio API
#'
#' This function sends a request to a specific Google AI Studio API endpoint and
#' authenticates with an API key.
#'
#' @param model character string specifying a Google AI Studio API model
#' @param key String containing a Google AI Studio API key. Defaults to the
#' GOOGLE_API_KEY environmental variable if not specified.
#' @return An httr2 request object
request_base_google <- function(model, key = Sys.getenv("GOOGLE_API_KEY")) {
url <- glue::glue(
"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
)

request(url) |>
req_url_query(key = key)
}


#' A function that sends a request to the Google AI Studio API and returns the
#' response.
#'
#' @param model A character string that specifies the model to send to the API.
#' @param request_body A list that contains the parameters for the task.
#' @param key String containing a Google AI Studio API key. Defaults
#' to the GOOGLE_API_KEY environmental variable if not specified.
#'
#' @return The response from the API.
#'
query_api_google <- function(model,
request_body,
key = Sys.getenv("GOOGLE_API_KEY")) {
response <- request_base_google(model, key) |>
req_body_json(data = request_body) |>
req_retry(max_tries = 3) |>
req_error(is_error = function(resp) FALSE) |>
req_perform()

# error handling
if (resp_is_error(response)) {
status <- resp_status(response) # nolint
description <- resp_status_desc(response) # nolint

cli::cli_abort(message = c(
"x" = "Google AI Studio API request failed. Error {status} - {description}",
"i" = "Visit the Google AI Studio API documentation for more details"
))
}

response |>
resp_body_json()
}

#' Generate text completions using Google AI Studio's API
#'
#' @param prompt The prompt for generating completions
#' @param model The model to use for generating text. By default, the
#' function will try to use "text-bison-001"
#' @param key The API key for accessing Google AI Studio's API. By default, the
#' function will try to use the `GOOGLE_API_KEY` environment variable.
#' @param model The model to use for generating text. By default, the function
#' will try to use "text-bison-001"
#' @param api_key The API key for accessing Google AI Studio's API. By default,
#' the function will try to use the `GOOGLE_API_KEY` environment variable.
#'
#' @return A list with the generated completions and other information returned
#' by the API.
#' @examples
#' \dontrun{
#' create_completion_google(
#' create_chat_google(
#' prompt = "Write a story about a magic backpack",
#' temperature = 1.0,
#' candidate_count = 3
#' )
#' }
#' @export
create_completion_google <- function(prompt,
model = "gemini-pro",
key = Sys.getenv("GOOGLE_API_KEY")) {
# Constructing the request body as per the API documentation
create_chat_google <- function(prompt = list(list(role = "user", content = "tell me a joke")),
model = "gemini-pro",
api_key = Sys.getenv("GOOGLE_API_KEY")) {

messages <- openai_to_google_format(prompt)

request_body <- list(
contents = list(
list(
parts = list(
list(
text = prompt
)
)
)
)
# system_instruction = messages$system_instruction,
contents = messages$contents
)

response <- query_api_google(model = model, request_body = request_body, key = key)
query_api_google(model = model,
request_body = request_body,
api_key = api_key)
}

request_base_google <- function(model,
api_key = Sys.getenv("GOOGLE_API_KEY")) {
request("https://generativelanguage.googleapis.com/v1beta/models") |>
req_url_path_append(glue("{model}:generateContent")) |>
req_url_query(key = api_key)
}

query_api_google <- function(request_body,
api_key = Sys.getenv("GOOGLE_API_KEY"),
model) {
resp <-
request_base_google(model = model, api_key = api_key) |>
req_body_json(data = request_body, auto_unbox = TRUE) |>
req_retry(max_tries = 3) |>
req_error(is_error = function(resp) FALSE) |>
req_perform()

if (resp_is_error(resp)) {
status <- resp_status(resp) # nolint
description <- resp_status_desc(resp) # nolint

# Assuming the response structure follows the API documentation example, parsing it accordingly.
# Please adjust if the actual API response has a different structure.
purrr::map_chr(response$candidates, ~ .x$content$parts[[1]]$text)
cli::cli_abort(c(
"x" = "Google AI Studio API request failed. Error {status} - {description}",
"i" = "Visit the Google AI Studio API documentation for more details"
))
}
results <- resp |> resp_body_json()
results$candidates[[1]]$content$parts[[1]]$text
}

get_available_models_google <- function(key = Sys.getenv("GOOGLE_API_KEY")) {
get_available_models_google <- function(api_key = Sys.getenv("GOOGLE_API_KEY")) {
response <-
request("https://generativelanguage.googleapis.com/v1beta") |>
req_url_path_append("models") |>
req_url_query(key = key) |>
req_url_query(key = api_key) |>
req_perform()

# error handling
if (resp_is_error(response)) {
status <- resp_status(response) # nolint
description <- resp_status_desc(response) # nolint
Expand All @@ -118,3 +87,24 @@ get_available_models_google <- function(key = Sys.getenv("GOOGLE_API_KEY")) {
models$name |>
stringr::str_remove("models/")
}

openai_to_google_format <- function(openai_messages) {
google_format <- list(contents = list())

for (message in openai_messages) {
role <- message$role
content <- message$content

if (role == "system") {
google_format$system_instruction <- list(parts = list(text = content))
} else if (role %in% c("user", "assistant")) {
google_role <- ifelse(role == "user", "user", "model")
google_format$contents <- c(google_format$contents,
list(list(
role = google_role,
parts = list(list(text = content))
)))
}
}
invisible(google_format)
}
20 changes: 10 additions & 10 deletions man/create_completion_google.Rd → man/create_chat_google.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 4ee82f1

Please sign in to comment.