Skip to content

Commit

Permalink
Merge pull request #85 from JamesHWade/ghost-writer
Browse files Browse the repository at this point in the history
Ghost writer
  • Loading branch information
JamesHWade authored Mar 29, 2024
2 parents 686a07c + 335b03f commit 3b784a0
Show file tree
Hide file tree
Showing 16 changed files with 178 additions and 83 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Package: gpttools
Title: Extensions and Tools for gptstudio
Version: 0.0.8.9015
Version: 0.0.8.9016
Authors@R:
person("James", "Wade", , "[email protected]", role = c("aut", "cre"),
comment = c(ORCID = "0000-0002-9740-1905"))
Expand Down
2 changes: 2 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ export(extract_code_chunks)
export(get_selection)
export(get_transformer_model)
export(ghost_chat)
export(ghost_writer)
export(gpt_sitrep)
export(gpttools_index_all_scraped_data)
export(ingest_pdf)
Expand All @@ -41,6 +42,7 @@ export(set_user_config)
export(suggest_unit_test_addin)
export(transcribe_audio)
import(cli)
import(httr2)
import(rlang)
importFrom(glue,glue)
importFrom(graphics,text)
Expand Down
13 changes: 13 additions & 0 deletions R/addin_ghost_writer.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#' Writing suggestions
#'
#'
#' @export
ghost_writer_addin <- function() {
cli::cli_alert_info("Attempting to add suggestion")
ghost_writer(
service = getOption("gpttools.service", "openai"),
stream = TRUE,
where = "source"
)
cli::cli_alert_info("Done adding code suggestion")
}
40 changes: 40 additions & 0 deletions R/chat.R
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,46 @@ ghost_chat <- function(service = getOption("gpttools.service", "openai"),
)
}

#' Writing Assistant
#'
#' @inheritParams chat
#' @export
ghost_writer <- function(service = getOption("gpttools.service", "openai"),
stream = TRUE,
where = "source") {
context <- get_cursor_context()
instructions <- glue::glue(
"You are an expert writing assistant that provides brief suggestions and
improvements directly into the text. Your response will go directly into the
document. You should only provide text or comments related to the writing.
Do not add any code. You are given context above and below the current
cursor position.
Here is an example:
The quick brown fox jumps over the lazy dog. The dog, startled by the
fox's sudden movement, [[start here]] barks loudly and chases after the fox.
The fox, being much quicker and more agile, easily outmaneuvers the dog and
disappears into the dense forest.
Your response begins at the placeholder [[start_here]].
Here is the context:
{context$above}
{context$below}"
)
stream_chat(
prompt = instructions,
service = service,
r = NULL,
output_id = NULL,
where = where
)
}

get_cursor_context <- function(context_lines = 20,
placeholder = "[[start_here]]") {
doc <- rstudioapi::getSourceEditorContext()
Expand Down
18 changes: 9 additions & 9 deletions R/gpt-query.R
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ query_openai <- function(task = "chat/completions",
model = "gpt-3.5-turbo") {
arg_match(task, c("chat/completions", "embeddings"))

req <- httr2::request(base_url)
req <- request(base_url)

if (task == "chat/completions") {
body <- list(
Expand All @@ -73,16 +73,16 @@ query_openai <- function(task = "chat/completions",

resp <-
req |>
httr2::req_url_path_append(task) |>
httr2::req_user_agent("gpttools: https://github.com/jameshwade/gpttools") |>
httr2::req_headers(
req_url_path_append(task) |>
req_user_agent("gpttools: https://github.com/jameshwade/gpttools") |>
req_headers(
"Authorization" = glue("Bearer {api_key}"),
"Content-Type" = "application/json"
) |>
httr2::req_body_json(body) |>
httr2::req_retry() |>
httr2::req_throttle(4) |>
httr2::req_perform()
req_body_json(body) |>
req_retry() |>
req_throttle(4) |>
req_perform()

resp |> httr2::resp_body_json(simplifyVector = TRUE)
resp |> resp_body_json(simplifyVector = TRUE)
}
2 changes: 1 addition & 1 deletion R/gpttools-package.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#' @import cli
#' @import rlang
#' @importFrom glue glue
#' @import httr2
#' @importFrom utils globalVariables head installed.packages old.packages
#' packageDescription packageVersion
#' @importFrom graphics text
#' @importFrom glue glue
## usethis namespace: end
NULL
12 changes: 7 additions & 5 deletions R/harvest-docs.R
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,17 @@ check_url <- function(url) {
status <-
try(
{
httr2::request(url) |>
httr2::req_error(is_error = \(resp) FALSE) |>
httr2::req_perform() |>
httr2::resp_status()
request(url) |>
req_error(is_error = \(resp) FALSE) |>
req_perform() |>
resp_status()
},
TRUE
)

if (inherits(status, "try-error")) return(invisible(FALSE))
if (inherits(status, "try-error")) {
return(invisible(FALSE))
}
status
}

Expand Down
37 changes: 20 additions & 17 deletions R/stream-anthropic.R
Original file line number Diff line number Diff line change
@@ -1,32 +1,35 @@
stream_chat_anthropic <- function(prompt,
element_callback = create_handler("anthropic"),
model = "claude-2",
model = "claude-3-sonnet-20240229",
key = Sys.getenv("ANTHROPIC_API_KEY")) {
request_body <- list(
prompt = glue::glue("\n\nHuman: {prompt}\n\nAssistant:"),
model = model,
max_tokens_to_sample = 256,
max_tokens = 1024,
messages = list(
list(role = "user", content = prompt)
),
stream = TRUE
)

response <-
httr2::request("https://api.anthropic.com/v1/complete") |>
httr2::req_headers(
`accept` = "application/json",
`anthropic-version` = "2023-06-01",
`content-type` = "application/json",
`x-api-key` = key
request("https://api.anthropic.com/v1/messages") |>
req_headers(
"anthropic-version" = "2023-06-01",
"content-type" = "application/json",
"x-api-key" = key
) |>
httr2::req_method("POST") |>
httr2::req_body_json(data = request_body) |>
httr2::req_retry(max_tries = 3) |>
httr2::req_error(is_error = function(resp) FALSE) |>
httr2::req_perform_stream(callback = element_callback, buffer_kb = 0.01)
req_body_json(data = request_body) |>
req_retry(max_tries = 3) |>
req_error(is_error = function(resp) FALSE) |>
req_perform_stream(
callback = element_callback,
buffer_kb = 0.01
)

# error handling
if (httr2::resp_is_error(response)) {
status <- httr2::resp_status(response)
description <- httr2::resp_status_desc(response)
if (resp_is_error(response)) {
status <- resp_status(response)
description <- resp_status_desc(response)

cli::cli_abort(message = c(
"x" = "Anthropic API request failed. Error {status} - {description}",
Expand Down
22 changes: 11 additions & 11 deletions R/stream-azure-openai.R
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,27 @@ stream_chat_azure_openai <- function(prompt = NULL,


response <-
httr2::request(Sys.getenv("AZURE_OPENAI_ENDPOINT")) |>
httr2::req_url_path_append("openai/deployments") |>
httr2::req_url_path_append(Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")) |>
httr2::req_url_path_append(Sys.getenv("AZURE_OPENAI_TASK")) |>
httr2::req_url_query("api-version" = Sys.getenv("AZURE_OPENAI_API_VERSION")) |>
httr2::req_headers(
request(Sys.getenv("AZURE_OPENAI_ENDPOINT")) |>
req_url_path_append("openai/deployments") |>
req_url_path_append(Sys.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")) |>
req_url_path_append(Sys.getenv("AZURE_OPENAI_TASK")) |>
req_url_query("api-version" = Sys.getenv("AZURE_OPENAI_API_VERSION")) |>
req_headers(
"api-key" = Sys.getenv("AZURE_OPENAI_KEY"),
"Content-Type" = "application/json"
)

if (use_token) {
token <- retrieve_azure_token()
response <- response |> httr2::req_auth_bearer_token(token = token)
response <- response |> req_auth_bearer_token(token = token)
}

response <-
response |>
httr2::req_body_json(data = body) |>
httr2::req_retry(max_tries = 3) |>
httr2::req_error(is_error = function(resp) FALSE) |>
httr2::req_perform_stream(
req_body_json(data = body) |>
req_retry(max_tries = 3) |>
req_error(is_error = function(resp) FALSE) |>
req_perform_stream(
callback = element_callback,
buffer_kb = 0.01
)
Expand Down
4 changes: 2 additions & 2 deletions R/stream-chat.R
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ get_stream_pattern <- function(service) {
pluck <- c("choices", "delta", "content")
},
"anthropic" = {
pattern <- "\\{\"type\":\"completion\",.*\"log_id\":\"compl_[^\"]*\"\\}"
pluck <- "completion"
pattern <- "\\{\"type\":\"content_block_delta\",.*\\}.*\\}"
pluck <- c("delta", "text")
},
"perplexity" = {
pattern <- '\\{"id".*?\\}\\}\\]\\}'
Expand Down
20 changes: 10 additions & 10 deletions R/stream-cohere.R
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,21 @@ stream_chat_cohere <- function(prompt,
)

response <-
httr2::request("https://api.cohere.ai/v1/chat") |>
httr2::req_headers(
request("https://api.cohere.ai/v1/chat") |>
req_headers(
`accept` = "application/json",
`Authorization` = paste("Bearer", key),
`content-type` = "application/json"
) |>
httr2::req_method("POST") |>
httr2::req_body_json(data = request_body) |>
httr2::req_retry(max_tries = 3) |>
httr2::req_error(is_error = function(resp) FALSE) |>
httr2::req_perform_stream(callback = element_callback, buffer_kb = 0.01)
req_method("POST") |>
req_body_json(data = request_body) |>
req_retry(max_tries = 3) |>
req_error(is_error = function(resp) FALSE) |>
req_perform_stream(callback = element_callback, buffer_kb = 0.01)

if (httr2::resp_is_error(response)) {
status <- httr2::resp_status(response)
description <- httr2::resp_status_desc(response)
if (resp_is_error(response)) {
status <- resp_status(response)
description <- resp_status_desc(response)

cli::cli_abort(message = c(
"x" = glue::glue("Cohere API request failed. Error {status} - {description}"),
Expand Down
23 changes: 12 additions & 11 deletions R/stream-ollama.R
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,16 @@ stream_chat_ollama <- function(prompt,
# ollama_is_available()
url <- Sys.getenv("OLLAMA_HOST", "http://localhost:11434")
response <-
httr2::request(url) |>
httr2::req_url_path_append("api") |>
httr2::req_url_path_append("generate") |>
httr2::req_body_json(data = body) |>
httr2::req_perform_stream(callback = element_callback, buffer_kb = 0.01)
request(url) |>
req_url_path_append("v1") |>
req_url_path_append("api") |>
req_url_path_append("generate") |>
req_body_json(data = body) |>
req_perform_stream(callback = element_callback, buffer_kb = 0.01)

if (httr2::resp_is_error(response)) {
status <- httr2::resp_status(response)
description <- httr2::resp_status_desc(response)
if (resp_is_error(response)) {
status <- resp_status(response)
description <- resp_status_desc(response)

cli::cli_abort(message = c(
"x" = glue::glue("Ollama API request failed. Error {status} - {description}"),
Expand All @@ -29,14 +30,14 @@ stream_chat_ollama <- function(prompt,

ollama_is_available <- function(verbose = FALSE) {
request <- Sys.getenv("OLLAMA_HOST", "http://localhost:11434") |>
httr2::request()
request()

check_value <- logical(1)

rlang::try_fetch(
{
response <- httr2::req_perform(request) |>
httr2::resp_body_string()
response <- req_perform(request) |>
resp_body_string()

if (verbose) cli::cli_alert_success(response)
check_value <- TRUE
Expand Down
12 changes: 6 additions & 6 deletions R/stream-openai.R
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@ stream_chat_openai <- function(prompt = NULL,
)

response <-
httr2::request("https://api.openai.com/v1/chat/completions") |>
httr2::req_auth_bearer_token(token = openai_api_key) |>
httr2::req_body_json(data = body) |>
httr2::req_retry(max_tries = 3) |>
httr2::req_error(is_error = function(resp) FALSE) |>
httr2::req_perform_stream(
request("https://api.openai.com/v1/chat/completions") |>
req_auth_bearer_token(token = openai_api_key) |>
req_body_json(data = body) |>
req_retry(max_tries = 3) |>
req_error(is_error = function(resp) FALSE) |>
req_perform_stream(
callback = element_callback,
buffer_kb = 0.01
)
Expand Down
19 changes: 9 additions & 10 deletions R/stream-perplexity.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
stream_chat_perplexity <- function(prompt,
element_callback = create_handler("perplexity"),
model = getOption("gpttools.model", "pplx-7b-chat"),
model = getOption("gpttools.model", "sonar-small-chat"),
api_key = Sys.getenv("PERPLEXITY_API_KEY")) {
request_body <- list(
model = model,
Expand All @@ -11,20 +11,19 @@ stream_chat_perplexity <- function(prompt,
)

response <-
httr2::request("https://api.perplexity.ai/chat/completions") |>
httr2::req_method("POST") |>
httr2::req_headers(
request("https://api.perplexity.ai/chat/completions") |>
req_headers(
accept = "application/json",
"Content-Type" = "application/json",
Authorization = paste("Bearer", api_key)
) |>
httr2::req_body_json(data = request_body) |>
httr2::req_retry(max_tries = 3) |>
httr2::req_perform_stream(callback = element_callback, buffer_kb = 0.01)
req_body_json(data = request_body) |>
req_retry(max_tries = 3) |>
req_perform_stream(callback = element_callback, buffer_kb = 0.01)

if (httr2::resp_is_error(response)) {
status <- httr2::resp_status(response)
description <- httr2::resp_status_desc(response)
if (resp_is_error(response)) {
status <- resp_status(response)
description <- resp_status_desc(response)
stop("Perplexity API request failed with error ", status, ": ", description, call. = FALSE)
}
}
5 changes: 5 additions & 0 deletions inst/rstudio/addins.dcf
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@ Description: Generate code suggestions in source docs like .R, .Rmd, and .qmd
Binding: copilot_addin
Interactive: true

Name: Ghost Writer
Description: Generate prose suggestions in docs like .Rmd and .qmd
Binding: ghost_writer_addin
Interactive: true

Name: Settings for gpttools
Description: Customize gpttools settings
Binding: launch_settings
Expand Down
Loading

0 comments on commit 3b784a0

Please sign in to comment.