From 5a3b2d810b5e553d610c54ff635efc77f9f575b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleksandar=20Toma=C5=A1evi=C4=87?= Date: Thu, 7 Dec 2023 11:35:08 -0500 Subject: [PATCH] Add video_scores function and update image_scores function --- NAMESPACE | 9 +++++++ R/video_scores.R | 8 +++--- man/image_scores.Rd | 4 +-- man/video_scores.Rd | 60 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 7 deletions(-) create mode 100644 man/video_scores.Rd diff --git a/NAMESPACE b/NAMESPACE index c171f96..9abc8e3 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -8,12 +8,21 @@ export(setup_miniconda) export(setup_modules) export(simulate_video) export(transformer_scores) +export(video_scores) +import(reticulate) importFrom(Matrix,bdiag) +importFrom(base,file.remove) +importFrom(base,paste0) importFrom(dplyr,left_join) +importFrom(reticulate,py_module_available) importFrom(reticulate,source_python) +importFrom(reticulate,use_condaenv) importFrom(stats,aggregate) +importFrom(stats,grep) importFrom(stats,na.omit) importFrom(utils,data) +importFrom(utils,dir.create) +importFrom(utils,dir.exists) importFrom(utils,install.packages) importFrom(utils,installed.packages) importFrom(utils,packageDescription) diff --git a/R/video_scores.R b/R/video_scores.R index 67af6a1..370d4d9 100644 --- a/R/video_scores.R +++ b/R/video_scores.R @@ -49,10 +49,8 @@ video_scores <- function(video, classes, nframes=100, setup_modules() } ################################################################ - # source_python(system.file("python", "image.py", package = "transforEmotion")) - # source_python(system.file("python", "image.py", package = "transforEmotion")) - reticulate::source_python("inst/python/image.py") - reticulate::source_python("inst/python/video.py") + source_python(system.file("python", "image.py", package = "transforEmotion")) + source_python(system.file("python", "image.py", package = "transforEmotion")) if (!grepl("youtu", video)){ stop("You need to provide a YouTube video URL.") } @@ -84,4 +82,4 @@ video_scores <- function(video, classes, nframes=100, file.remove(list.files(save_dir, pattern = ".jpg")) } return(result) -} \ No newline at end of file +} diff --git a/man/image_scores.Rd b/man/image_scores.Rd index 7f5948b..ee78eea 100644 --- a/man/image_scores.Rd +++ b/man/image_scores.Rd @@ -4,10 +4,10 @@ \alias{image_scores} \title{Calculate image scores based on OpenAI CLIP model} \usage{ -image_scores(image_file, classes, face_selection = "largest") +image_scores(image, classes, face_selection = "largest") } \arguments{ -\item{image_file}{The path to the image file or URL of the image.} +\item{image}{The path to the image file or URL of the image.} \item{classes}{A character vector of classes to classify the image into.} diff --git a/man/video_scores.Rd b/man/video_scores.Rd new file mode 100644 index 0000000..6777ada --- /dev/null +++ b/man/video_scores.Rd @@ -0,0 +1,60 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/video_scores.R +\name{video_scores} +\alias{video_scores} +\title{Run FER on YouTube video} +\usage{ +video_scores( + video, + classes, + nframes = 100, + face_selection = "largest", + cut = FALSE, + start = 0, + end = 60, + uniform = FALSE, + ffreq = 15, + save_video = FALSE, + save_frames = FALSE, + save_dir = "temp/", + video_name = "temp" +) +} +\arguments{ +\item{video}{The URL of the YouTube video to analyze.} + +\item{classes}{A character vector specifying the classes to analyze.} + +\item{nframes}{The number of frames to analyze in the video. Default is 100.} + +\item{face_selection}{The method for selecting faces in the video. Options are "largest", "left", or "right". Default is "largest".} + +\item{cut}{Logical indicating whether to cut the video to a specific time range. Default is FALSE.} + +\item{start}{The start time of the video range to analyze. Default is 0.} + +\item{end}{The end time of the video range to analyze. Default is 60.} + +\item{uniform}{Logical indicating whether to uniformly sample frames from the video. Default is FALSE.} + +\item{ffreq}{The frame frequency for sampling frames from the video. Default is 15.} + +\item{save_video}{Logical indicating whether to save the analyzed video. Default is FALSE.} + +\item{save_frames}{Logical indicating whether to save the analyzed frames. Default is FALSE.} + +\item{save_dir}{The directory to save the analyzed frames. Default is "temp/".} + +\item{video_name}{The name of the analyzed video. Default is "temp".} +} +\value{ +A result object containing the analyzed video scores. +} +\description{ +This function retrieves FER scores a specific number of frames extracted from YouTube video. It uses Python libraries for facial recognition and emotion detection in text, images, and videos. +} +\examples{ +# Not run: +result <- video_scores("https://www.youtube.com/watch?v=dQw4w9WgXcQ", c("happy", "sad"), nframes = 200, face_selection = "left", cut = TRUE, start = 30, end = 90, uniform = TRUE, ffreq = 10, save_video = TRUE, save_frames = TRUE, save_dir = "output/", video_name = "analysis") + +}