From b8a632fc126e7fab59e2faa3e773ec529edd73d4 Mon Sep 17 00:00:00 2001 From: gilbertocamara Date: Sat, 28 Oct 2023 14:29:48 -0300 Subject: [PATCH] fix documentation problems --- R/sits_accuracy.R | 2 +- R/sits_active_learning.R | 39 ++++++------ R/sits_classify.R | 64 +++++++++---------- R/sits_cluster.R | 19 +++--- R/sits_combine_predictions.R | 4 -- R/sits_config.R | 2 +- R/sits_cube.R | 103 ++++++++++++++----------------- R/sits_cube_copy.R | 5 +- R/sits_filters.R | 2 +- R/sits_get_data.R | 25 +++----- R/sits_label_classification.R | 1 + R/sits_lighttae.R | 11 ++-- R/sits_machine_learning.R | 5 +- R/sits_merge.R | 2 - R/sits_model_export.R | 2 +- R/sits_mosaic.R | 5 +- R/sits_plot.R | 49 ++++++--------- R/sits_predictors.R | 2 +- R/sits_segmentation.R | 34 +++++----- R/sits_select.R | 4 +- R/sits_summary.R | 3 + R/sits_tuning.R | 10 ++- man/plot.Rd | 39 +++++------- man/plot.som_map.Rd | 10 ++- man/sits_accuracy.Rd | 2 +- man/sits_classify.Rd | 60 +++++++++--------- man/sits_cluster_clean.Rd | 5 +- man/sits_cluster_dendro.Rd | 12 ++-- man/sits_cluster_frequency.Rd | 2 +- man/sits_combine_predictions.Rd | 4 -- man/sits_confidence_sampling.Rd | 19 +++--- man/sits_config.Rd | 2 +- man/sits_cube.Rd | 89 ++++++++++++-------------- man/sits_filter.Rd | 2 +- man/sits_get_data.Rd | 25 +++----- man/sits_label_classification.Rd | 2 + man/sits_lighttae.Rd | 14 ++--- man/sits_model_export.Rd | 2 +- man/sits_mosaic.Rd | 5 +- man/sits_predictors.Rd | 2 +- man/sits_rfor.Rd | 6 +- man/sits_segment.Rd | 25 ++++---- man/sits_select.Rd | 4 +- man/sits_slic.Rd | 10 +-- man/sits_tuning.Rd | 6 +- man/sits_tuning_hparams.Rd | 4 +- man/sits_uncertainty_sampling.Rd | 20 +++--- man/summary.raster_cube.Rd | 2 + man/summary.sits.Rd | 2 + 49 files changed, 356 insertions(+), 412 deletions(-) diff --git a/R/sits_accuracy.R b/R/sits_accuracy.R index ce51028cf..cfcb0a43e 100644 --- a/R/sits_accuracy.R +++ b/R/sits_accuracy.R @@ -4,7 +4,7 @@ #' @author Alber Sanchez, \email{alber.ipia@@inpe.br} #' @description This function calculates the accuracy of the classification #' result. For a set of time series, it creates a confusion matrix and then -#' calculates the resulting statistics using the R package "caret". The time +#' calculates the resulting statistics using package \code{caret}. The time #' series needs to be classified using \code{\link[sits]{sits_classify}}. #' #' Classified images are generated using \code{\link[sits]{sits_classify}} diff --git a/R/sits_active_learning.R b/R/sits_active_learning.R index c7204f80f..19ac00ce4 100644 --- a/R/sits_active_learning.R +++ b/R/sits_active_learning.R @@ -13,20 +13,20 @@ #' These points don't have labels and need be manually labelled by experts #' and then used to increase the classification's training set. #' -#' This function is best used in the following context -#' \itemize{ -#' \item{1. }{Select an initial set of samples.} -#' \item{2. }{Train a machine learning model.} -#' \item{3. }{Build a data cube and classify it using the model.} -#' \item{4. }{Run a Bayesian smoothing in the resulting probability cube.} -#' \item{5. }{Create an uncertainty cube.} -#' \item{6. }{Perform uncertainty sampling.} -#' } +#' This function is best used in the following context: +#' 1. Select an initial set of samples. +#' 2. Train a machine learning model. +#' 3. Build a data cube and classify it using the model. +#' 4. Run a Bayesian smoothing in the resulting probability cube. +#' 5. Create an uncertainty cube. +#' 6. Perform uncertainty sampling. +#' #' The Bayesian smoothing procedure will reduce the classification outliers #' and thus increase the likelihood that the resulting pixels with high #' uncertainty have meaningful information. #' -#' @param uncert_cube An uncertainty cube. See \code{sits_uncertainty}. +#' @param uncert_cube An uncertainty cube. +#' See \code{\link[sits]{sits_uncertainty}}. #' @param n Number of suggested points. #' @param min_uncert Minimum uncertainty value to select a sample. #' @param sampling_window Window size for collecting points (in pixels). @@ -158,21 +158,20 @@ sits_uncertainty_sampling <- function(uncert_cube, #' this label compared to all others. The algorithm also considers a #' minimum distance between new labels, to minimize spatial autocorrelation #' effects. +#' This function is best used in the following context: +#' 1. Select an initial set of samples. +#' 2. Train a machine learning model. +#' 3. Build a data cube and classify it using the model. +#' 4. Run a Bayesian smoothing in the resulting probability cube. +#' 5. Perform confidence sampling. #' -#' This function is best used in the following context -#' \itemize{ -#' \item{1. }{Select an initial set of samples.} -#' \item{2. }{Train a machine learning model.} -#' \item{3. }{Build a data cube and classify it using the model.} -#' \item{4. }{Run a Bayesian smoothing in the resulting probability cube.} -#' \item{5. }{Create an uncertainty cube.} -#' \item{6. }{Perform confidence sampling.} -#' } #' The Bayesian smoothing procedure will reduce the classification outliers #' and thus increase the likelihood that the resulting pixels with provide #' good quality samples for each class. #' -#' @param probs_cube A probability cube. See \code{sits_classify}. +#' @param probs_cube A smoothed probability cube. +#' See \code{\link[sits]{sits_classify}} and +#' \code{\link[sits]{sits_smooth}}. #' @param n Number of suggested points per class. #' @param min_margin Minimum margin of confidence to select a sample #' @param sampling_window Window size for collecting points (in pixels). diff --git a/R/sits_classify.R b/R/sits_classify.R index 843c013ae..fe3fe60d9 100644 --- a/R/sits_classify.R +++ b/R/sits_classify.R @@ -1,7 +1,5 @@ #' @title Classify time series or data cubes -#' #' @name sits_classify -#' #' @author Rolf Simoes, \email{rolf.simoes@@inpe.br} #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} #' @@ -10,16 +8,13 @@ #' a trained model prediction model created by \code{\link[sits]{sits_train}}. #' #' SITS supports the following models: -#' \itemize{ -#' \item{support vector machines: } {see \code{\link[sits]{sits_svm}}} -#' \item{random forests: } {see \code{\link[sits]{sits_rfor}}} -#' \item{extreme gradient boosting: } {see \code{\link[sits]{sits_xgboost}}} -#' \item{multi-layer perceptrons: } {see \code{\link[sits]{sits_mlp}}} -#' \item{1D CNN: } {see \code{\link[sits]{sits_tempcnn}}} -#' \item{deep residual networks:}{see \code{\link[sits]{sits_resnet}}} -#' \item{self-attention encoders:}{see \code{\link[sits]{sits_lighttae}}} -#' } -#' +#' (a) support vector machines: \code{\link[sits]{sits_svm}}; +#' (b) random forests: \code{\link[sits]{sits_rfor}}; +#' (c) extreme gradient boosting: \code{\link[sits]{sits_xgboost}}; +#' (d) multi-layer perceptrons: \code{\link[sits]{sits_mlp}}; +#' (e) 1D CNN: \code{\link[sits]{sits_tempcnn}}; +#' (f) deep residual networks: \code{\link[sits]{sits_resnet}}; +#' (g) self-attention encoders: \code{\link[sits]{sits_lighttae}}. #' #' @param data Data cube (tibble of class "raster_cube") #' @param ml_model R model trained by \code{\link[sits]{sits_train}} @@ -40,7 +35,7 @@ #' (integer, min = 1, max = 16384). #' @param multicores Number of cores to be used for classification #' (integer, min = 1, max = 2048). -#' @param gpu_memory Memory available in GPU (default = NULL) +#' @param gpu_memory Memory available in GPU in GB (default = 16) #' @param n_sam_pol Number of time series per segment to be classified #' (integer, min = 10, max = 50). #' @param output_dir Valid directory for output file. @@ -56,30 +51,31 @@ #' (tibble of class "probs_cube"). #' #' @note -#' The "roi" parameter defines a region of interest. It can be +#' The \code{roi} parameter defines a region of interest. It can be #' an sf_object, a shapefile, or a bounding box vector with -#' named XY values ("xmin", "xmax", "ymin", "ymax") or -#' named lat/long values ("lon_min", "lat_min", "lon_max", "lat_max") +#' named XY values (\code{xmin}, \code{xmax}, \code{ymin}, \code{ymax}) or +#' named lat/long values (\code{lon_min}, \code{lon_max}, +#' \code{lat_min}, \code{lat_max}) #' -#' The "filter_fn" parameter specifies a smoothing filter to be applied to -#' time series for reducing noise. Currently, options include -#' Savitzky-Golay (see \code{\link[sits]{sits_sgolay}}) and Whittaker -#' (see \code{\link[sits]{sits_whittaker}}). +#' Parameter \code{filter_fn} parameter specifies a smoothing filter +#' to be applied to each time series for reducing noise. Currently, options +#' are Savitzky-Golay (see \code{\link[sits]{sits_sgolay}}) and Whittaker +#' (see \code{\link[sits]{sits_whittaker}}) filters. #' -#' The "memsize" and "multicores" parameters are used for multiprocessing. -#' The "multicores" parameter defines the number of cores used for -#' processing. The "memsize" parameter controls the amount of memory -#' available for classification. We recommend using a 4:1 relation between -#' "memsize" and "multicores". +#' Parameter \code{memsize} controls the amount of memory available +#' for classification, while \code{multicores} defines the number of cores +#' used for processing. We recommend using as much memory as possible. +#' +#' When using a GPU for deep learning, \code{gpu_memory} indicates the +#' memory of available in the graphics card. #' #' For classifying vector data cubes created by -#' \code{\link[sits]{sits_segment}}, two parameters can be used: -#' \code{n_sam_pol}, which is the number of time series to be classified -#' per segment. +#' \code{\link[sits]{sits_segment}}, +#' \code{n_sam_pol} controls is the number of time series to be +#' classified per segment. #' -#' @note -#' Please refer to the sits documentation available in -#' for detailed examples. +#' Please refer to the sits documentation available in +#' for detailed examples. #' @examples #' if (sits_run_examples()) { #' # Example of classification of a time series @@ -165,7 +161,7 @@ sits_classify.sits <- function(data, ..., filter_fn = NULL, multicores = 2L, - gpu_memory = NULL, + gpu_memory = 16, progress = TRUE) { # Pre-conditions data <- .check_samples_ts(data) @@ -197,7 +193,7 @@ sits_classify.raster_cube <- function(data, end_date = NULL, memsize = 8L, multicores = 2L, - gpu_memory = NULL, + gpu_memory = 16, output_dir, version = "v1", verbose = FALSE, @@ -350,7 +346,7 @@ sits_classify.segs_cube <- function(data, end_date = NULL, memsize = 8L, multicores = 2L, - gpu_memory = NULL, + gpu_memory = 16, output_dir, version = "v1", n_sam_pol = 40, diff --git a/R/sits_cluster.R b/R/sits_cluster.R index 2839d59eb..61998506f 100644 --- a/R/sits_cluster.R +++ b/R/sits_cluster.R @@ -6,18 +6,20 @@ #' sits. They provide support from creating a dendrogram and using it for #' cleaning samples. #' -#' \code{sits_cluster_dendro()} takes a tibble containing time series and +#' \code{link[sits]{sits_cluster_dendro()}} takes a tibble with time series and #' produces a sits tibble with an added "cluster" column. The function first #' calculates a dendrogram and obtains a validity index for best clustering #' using the adjusted Rand Index. After cutting the dendrogram using the chosen #' validity index, it assigns a cluster to each sample. #' -#' \code{sits_cluster_frequency()} computes the contingency table between labels +#' \code{link[sits]{sits_cluster_frequency()}} computes the contingency +#' table between labels #' and clusters and produces a matrix. -#' It needs as input a tibble produced by \code{sits_cluster_dendro()}. +#' Its input is a tibble produced by \code{link[sits]{sits_cluster_dendro()}}. #' -#' \code{sits_cluster_clean()} takes a tibble with time series -#' that has an additional `cluster` produced by \code{sits_cluster_dendro()} +#' \code{link[sits]{sits_cluster_clean()}} takes a tibble with time series +#' that has an additional `cluster` produced by +#' \code{link[sits]{sits_cluster_dendro()}} #' and removes labels that are minority in each cluster. #' #' @references "dtwclust" package (https://CRAN.R-project.org/package=dtwclust) @@ -155,7 +157,7 @@ sits_cluster_dendro.default <- function(samples, ...) { #' @author Rolf Simoes, \email{rolf.simoes@@inpe.br} #' @param samples Tibble with input set of time series with additional #' cluster information produced -#' by \code{sits::sits_cluster_dendro}. +#' by \code{link[sits]{sits_cluster_dendro}}. #' @return A matrix containing frequencies #' of labels in clusters. #' @examples @@ -185,12 +187,13 @@ sits_cluster_frequency <- function(samples) { #' @name sits_cluster_clean #' @author Rolf Simoes, \email{rolf.simoes@@inpe.br} #' @description Takes a tibble with time series -#' that has an additional `cluster` produced by \code{sits_cluster_dendro()} +#' that has an additional `cluster` produced by +#' \code{link[sits]{sits_cluster_dendro()}} #' and removes labels that are minority in each cluster. #' #' @param samples Tibble with set of time series with additional #' cluster information produced -#' by \code{sits::sits_cluster_dendro()} (class "sits") +#' by \code{link[sits]{sits_cluster_dendro()}} #' @return Tibble with time series (class "sits") #' @examples #' if (sits_run_examples()) { diff --git a/R/sits_combine_predictions.R b/R/sits_combine_predictions.R index 3780c76f4..4cad9497c 100644 --- a/R/sits_combine_predictions.R +++ b/R/sits_combine_predictions.R @@ -29,10 +29,6 @@ #' The supported types of ensemble predictors are 'average' and #' 'uncertainty'. #' -#' @note -#' Please refer to the sits documentation available in -#' for detailed examples. -#' #' @examples #' if (sits_run_examples()) { #' # create a data cube from local files diff --git a/R/sits_config.R b/R/sits_config.R index a4d17de53..eb073419e 100644 --- a/R/sits_config.R +++ b/R/sits_config.R @@ -21,7 +21,7 @@ #' \code{SITS_CONFIG_USER_FILE} or as parameter to this function. #' #' To see the key entries and contents of the current configuration values, -#' use \code{sits_config_show()}. +#' use \code{link[sits]{sits_config_show()}}. #' #' @return Called for side effects #' diff --git a/R/sits_cube.R b/R/sits_cube.R index 2c5b274d0..d3b5096b6 100755 --- a/R/sits_cube.R +++ b/R/sits_cube.R @@ -4,20 +4,10 @@ #' @description Creates a data cube based on spatial and temporal restrictions #' in collections available in cloud services or local repositories. #' The following cloud providers are supported, based on the STAC protocol: -#' \itemize{ -#' \item{\code{"AWS"}: }{Amazon Web Services (AWS), -#' see https://registry.opendata.aws/ } -#' \item{\code{"BDC"}: }{Brazil Data Cube (BDC), -#' see http://brazildatacube.org/} -#' \item{\code{"DEAFRICA"}: }{Digital Earth Africa, -#' see https://www.digitalearthafrica.org/} -#' \item{\code{"MPC"}: }{Microsoft Planetary Computer, -#' see https://planetarycomputer.microsoft.com/} -#' \item{\code{"USGS"}:}{USGS LANDSAT collection, -#' see https://registry.opendata.aws/usgs-landsat/} -#' } -#' -#' Data cubes can also be created using local files (see details). +#' Amazon Web Services (AWS), Brazil Data Cube (BDC), +#' Digital Earth Africa (DEAFRICA), Microsoft Planetary Computer (MPC), +#' Nasa Harmonized Landsat/Sentinel (HLS), USGS Landsat (USGS), and +#' Swiss Data Cube (SDC). Data cubes can also be created using local files. #' #' #' @param source Data source (one of \code{"AWS"}, \code{"BDC"}, @@ -65,40 +55,41 @@ #' @param progress Logical: show a progress bar? #' @return A \code{tibble} describing the contents of a data cube. #' -#' @details +#' @note{ #' To create cubes from cloud providers, users need to inform: -#' \itemize{ -#' \item{\code{source}: }{One of \code{"AWS"}, \code{"BDC"}, \code{"DEAFRICA"}, -#' \code{"MPC"}, \code{"USGS"}, \code{"SDC"} and \code{"HLS"}}. -#' \item{\code{collection}: }{Use \code{sits_list_collections()} to see which -#' collections are supported.} -#' \item{\code{tiles}: }{A set of tiles defined according to the collection -#' tiling grid.} -#' \item{\code{roi}: }{Region of interest in WGS84 coordinates.} +#' \enumerate{ +#' \item \code{source}: One of "AWS", "BDC", "DEAFRICA", "HLS", "MPC", +#' "SDC" or "USGS"; +#' \item \code{collection}: Collection available in the cloud provider. +#' Use \code{sits_list_collections()} to see which +#' collections are supported; +#' \item \code{tiles}: A set of tiles defined according to the collection +#' tiling grid; +#' \item \code{roi}: Region of interest. Either +#' a named \code{vector} (\code{"lon_min"}, \code{"lat_min"}, +#' \code{"lon_max"}, \code{"lat_max"}) in WGS84, a \code{sfc} +#' or \code{sf} object from sf package in WGS84 projection. #' } #' Either \code{tiles} or \code{roi} must be informed. #' The parameters \code{bands}, \code{start_date}, and #' \code{end_date} are optional for cubes created from cloud providers. -#' #' The \code{roi} parameter allows a selection of an area of interest, -#' either using a named \code{vector} (\code{"lon_min"}, \code{"lat_min"}, -#' \code{"lon_max"}, \code{"lat_max"}) in WGS84, a \code{sfc} or \code{sf} -#' object from sf package in WGS84 projection. +#' #' GeoJSON geometries (RFC 7946) and shapefiles should be converted to #' \code{sf} objects before being used to define a region of interest. #' This parameter does not crop a region; it only selects images that #' intersect the \code{roi}. #' #' To create a cube from local files, users need to inform: -#' \itemize{ -#' \item{\code{source}:} {Provider from where the data has been -#' downloaded (e.g, "BDC", "MPC").} -#' \item{\code{collection}:}{Collection where the data has been extracted from. -#' (e.g., "SENTINEL-2-L2A" for the Sentinel-2 MPC collection level 2A).} -#' \item{\code{data_dir}: }{Local directory where images are stored.} -#' \item{\code{parse_info}: }{Parsing information for files (see below). -#' Default is \code{c("X1", "X2", "tile", "band", "date")}.} -#' \item{\code{delim}: }{Delimiter character for parsing files (see below). -#' Default is \code{"_"}.} +#' \enumerate{ +#' \item \code{source}: Provider from where the data has been downloaded +#' (e.g, "BDC"); +#' \item \code{collection}: Collection where the data has been extracted from. +#' (e.g., "SENTINEL-2-L2A" for the Sentinel-2 MPC collection level 2A); +#' \item \code{data_dir}: Local directory where images are stored. +#' \item \code{parse_info}: Parsing information for files. +#' Default is \code{c("X1", "X2", "tile", "band", "date")}. +#' \item \code{delim}: Delimiter character for parsing files. +#' Default is \code{"_"}. #' } #' #' To create a cube from local files, all images should have @@ -115,42 +106,44 @@ #' #' It is also possible to create result cubes for these are local files #' produced by classification or post-classification algorithms. In -#' this case, there are more parameters that are required (see below) and the -#' parameter \code{parse_info} is specified differently. +#' this case, more parameters that are required (see below). The +#' parameter \code{parse_info} is specified differently, as follows: #' -#' \itemize{ -#' \item{\code{band}: }{The band name is associated to the type of result. Use +#' \enumerate{ +#' \item \code{band}: Band name associated to the type of result. Use #' \code{"probs"}, for probability cubes produced by \code{sits_classify()}; #' \code{"bayes"}, for smoothed cubes produced by \code{sits_smooth()}; -#' \code{"entropy"} when using \code{sits_uncertainty()}, or \code{"class"} -#' for cubes produced by \code{sits_label_classification()}.} -#' \item{\code{labels}: }{Labels associated to the classification results.} -#' \item{\code{parse_info}: }{File name parsing information +#' \code{"segments"}, for vector cubes produced by \code{sits_segment()}; +#' \code{"entropy"} when using \code{sits_uncertainty()}, and \code{"class"} +#' for cubes produced by \code{sits_label_classification()}; +#' \item \code{labels}: Labels associated to the classification results; +#' \item \code{parse_info}: File name parsing information #' to deduce the values of "tile", "start_date", "end_date" from #' the file name. Default is c("X1", "X2", "tile", "start_date", #' "end_date", "band"). Unlike non-classified image files, #' cubes with results have both -#' "start_date" and "end_date".} +#' "start_date" and "end_date". #' } -#' @note In MPC, sits can access are two open data collections: +#' +#' In MPC, sits can access are two open data collections: #' \code{"SENTINEL-2-L2A"} for Sentinel-2/2A images, and #' \code{"LANDSAT-C2-L2"} for the Landsat-4/5/7/8/9 collection. #' (requester-pays) and \code{"SENTINEL-S2-L2A-COGS"} (open data). #' -#' @note Sentinel-2/2A level 2A files in MPC are organized by sensor +#' Sentinel-2/2A level 2A files in MPC are organized by sensor #' resolution. The bands in 10m resolution are \code{"B02"}, \code{"B03"}, #' \code{"B04"}, and \code{"B08"}. The 20m bands are \code{"B05"}, #' \code{"B06"}, \code{"B07"}, \code{"B8A"}, \code{"B11"}, and \code{"B12"}. #' Bands \code{"B01"} and \code{"B09"} are available at 60m resolution. #' The \code{"CLOUD"} band is also available. #' -#' @note All Landsat-4/5/7/8/9 images in MPC have bands with 30 meter +#' All Landsat-4/5/7/8/9 images in MPC have bands with 30 meter #' resolution. To account for differences between the different sensors, #' Landsat bands in this collection have been renamed \code{"BLUE"}, #' \code{"GREEN"}, \code{"RED"}, \code{"NIR08"}, \code{"SWIR16"} #' and \code{"SWIR22"}. The \code{"CLOUD"} band is also available. #' -#' @note In AWS, there are two types of collections: open data and +#' In AWS, there are two types of collections: open data and #' requester-pays. Currently, \code{sits} supports collection #' \code{"SENTINEL-2-L2A"} (open data) and LANDSAT-C2-L2 (requester-pays). #' There is no need to provide AWS credentials to access open data @@ -162,25 +155,25 @@ #' AWS_SECRET_ACCESS_KEY = #' )} #' -#' @note Sentinel-2/2A level 2A files in AWS are organized by sensor +#' Sentinel-2/2A level 2A files in AWS are organized by sensor #' resolution. The AWS bands in 10m resolution are \code{"B02"}, \code{"B03"}, #' \code{"B04"}, and \code{"B08"}. The 20m bands are \code{"B05"}, #' \code{"B06"}, \code{"B07"}, \code{"B8A"}, \code{"B11"}, and \code{"B12"}. #' Bands \code{"B01"} and \code{"B09"} are available at 60m resolution. #' -#' @note For DEAFRICA, sits currently works with collections \code{"S2_L2A"} +#' For DEAFRICA, sits currently works with collections \code{"S2_L2A"} #' for Sentinel-2 level 2A and \code{"LS8_SR"} for Landsat-8 ARD collection. #' (open data). These collections are located in Africa #' (Capetown) for faster access to African users. No payment for access #' is required. #' -#' @note For USGS, sits currently works with collection +#' For USGS, sits currently works with collection #' \code{"LANDSAT-C2L2-SR"}, which corresponds to Landsat #' Collection 2 Level-2 surface reflectance data, covering #' Landsat-8 dataset. This collection is requester-pays and #' requires payment for accessing. #' -#' @note All BDC collections are regularized. +#' All BDC collections are regularized. #' BDC users need to provide their credentials using environment #' variables. To create your credentials, please see #' . @@ -191,7 +184,7 @@ #' Sys.setenv( #' BDC_ACCESS_KEY = #' )} -#' +#' } #' @examples #' if (sits_run_examples()) { #' # --- Access to the Brazil Data Cube diff --git a/R/sits_cube_copy.R b/R/sits_cube_copy.R index 12d1c74ab..015c1a164 100644 --- a/R/sits_cube_copy.R +++ b/R/sits_cube_copy.R @@ -1,4 +1,7 @@ -#' Copy the images of a cube to a local directory +#' +#' @title Copy the images of a cube to a local directory +#' @name sits_cube_copy +#' @description #' #' This function downloads the images of a cube in parallel. #' A region of interest (\code{roi}) can be provided to crop diff --git a/R/sits_filters.R b/R/sits_filters.R index f9abf625a..4ee18b191 100644 --- a/R/sits_filters.R +++ b/R/sits_filters.R @@ -6,7 +6,7 @@ #' #' @description #' Applies a filter to all bands, using a filter function -#' such as `sits_whittaker()` or `sits_sgolay()`. +#' such as sits_whittaker() or sits_sgolay(). #' @examples #' if (sits_run_examples()) { #' # Retrieve a time series with values of NDVI diff --git a/R/sits_get_data.R b/R/sits_get_data.R index bfba87b0e..825743acc 100644 --- a/R/sits_get_data.R +++ b/R/sits_get_data.R @@ -9,19 +9,17 @@ #' #' @note #' There are four ways of specifying data to be retrieved using the -#' "samples" parameter: -#' \itemize{ -#' \item{CSV file: }{Provide a CSV file with columns -#' "longitude", "latitude", "start_date", "end_date" and "label" for -#' each sample} -#' \item{SHP file: }{Provide a shapefile in POINT or POLYGON geometry +#' \code{samples} parameter: +#' (a) CSV file: a CSV file with columns +#' \code{longitude}, \code{latitude}, +#' \code{start_date}, \code{end_date} and \code{label} for each sample; +#' (b) SHP file: a shapefile in POINT or POLYGON geometry #' containing the location of the samples and an attribute to be -#' used as label. Also, provide start and end date for the time series.} -#' \item{sits object: }{A sits tibble.} -#' \item{sf object: }{An "sf" object with POINT or POLYGON geometry.} -#' \item{data.frame: }{A data.frame with with mandatory columns -#' "longitude", "latitude".} -#' } +#' used as label. Also, provide start and end date for the time series; +#' (c) sits object: A sits tibble; +#' (d) sf object: An \code{link[sf]{sf}} object with POINT or POLYGON geometry; +#' (e) data.frame: A data.frame with with mandatory columns +#' \code{longitude} and \code{latitude}. # #' @param cube Data cube from where data is to be retrieved. #' (tibble of class "raster_cube"). @@ -56,9 +54,6 @@ #' @return A tibble of class "sits" with set of time series #' . #' -#' @note -#' Please refer to the sits documentation available in -#' for detailed examples. #' #' @examples #' if (sits_run_examples()) { diff --git a/R/sits_label_classification.R b/R/sits_label_classification.R index 7241c8214..1e491274f 100644 --- a/R/sits_label_classification.R +++ b/R/sits_label_classification.R @@ -2,6 +2,7 @@ #' #' @name sits_label_classification #' @author Rolf Simoes, \email{rolf.simoes@@inpe.br} +#' @author Felipe Souza, \email{felipe.souza@@inpe.br} #' #' @description Takes a set of classified raster layers with probabilities, #' and label them based on the maximum probability for each pixel. diff --git a/R/sits_lighttae.R b/R/sits_lighttae.R index 3ad9dc095..dd5245430 100644 --- a/R/sits_lighttae.R +++ b/R/sits_lighttae.R @@ -1,9 +1,9 @@ #' @title Train a model using Lightweight Temporal Self-Attention Encoder #' @name sits_lighttae #' -#' @author Charlotte Pelletier, \email{charlotte.pelletier@@univ-ubs.fr} #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} #' @author Rolf Simoes, \email{rolf.simoes@@inpe.br} +#' @author Charlotte Pelletier, \email{charlotte.pelletier@@univ-ubs.fr} #' #' @description Implementation of Light Temporal Attention Encoder (L-TAE) #' for satellite image time series @@ -50,10 +50,10 @@ #' to be used as validation data. #' @param optimizer Optimizer function to be used. #' @param opt_hparams Hyperparameters for optimizer: -#' lr : Learning rate of the optimizer -#' eps: Term added to the denominator +#' \code{lr} : Learning rate of the optimizer +#' \code{eps}: Term added to the denominator #' to improve numerical stability. -#' weight_decay: L2 regularization +#' \code{weight_decay}: L2 regularization rate. #' @param lr_decay_epochs Number of epochs to reduce learning rate. #' @param lr_decay_rate Decay factor for reducing learning rate. #' @param patience Number of epochs without improvements until @@ -64,9 +64,6 @@ #' #' @return A fitted model to be used for classification of data cubes. #' -#' @note -#' Please refer to the sits documentation available in -#' for detailed examples. #' #' @examples #' if (sits_run_examples()) { diff --git a/R/sits_machine_learning.R b/R/sits_machine_learning.R index f3ec0277a..28c1068a9 100644 --- a/R/sits_machine_learning.R +++ b/R/sits_machine_learning.R @@ -6,7 +6,7 @@ #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} #' #' @description Use Random Forest algorithm to classify samples. -#' This function is a front-end to the "randomForest" package. +#' This function is a front-end to the \code{randomForest} package. #' Please refer to the documentation in that package for more details. #' #' @param samples Time series with the training samples @@ -23,9 +23,6 @@ #' to `randomForest::randomForest` function. #' @return Model fitted to input data #' (to be passed to \code{\link[sits]{sits_classify}}). -#' @note -#' Please refer to the sits documentation available in -#' for detailed examples. #' #' @examples #' if (sits_run_examples()) { diff --git a/R/sits_merge.R b/R/sits_merge.R index 9efe2fef8..e23de8cba 100644 --- a/R/sits_merge.R +++ b/R/sits_merge.R @@ -1,7 +1,5 @@ #' @title Merge two data sets (time series or cubes) -#' #' @name sits_merge -#' #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} #' #' @description To merge two series, we consider that they contain different diff --git a/R/sits_model_export.R b/R/sits_model_export.R index 44e25ab06..306057c65 100644 --- a/R/sits_model_export.R +++ b/R/sits_model_export.R @@ -4,7 +4,7 @@ #' #' @description Given a trained machine learning or deep learning model, #' exports the model as an object for further exploration outside the -#' "sits" package +#' \code{sits} package. #' #' @param ml_model A trained machine learning model #' diff --git a/R/sits_mosaic.R b/R/sits_mosaic.R index 6b8614033..4702fa480 100644 --- a/R/sits_mosaic.R +++ b/R/sits_mosaic.R @@ -30,8 +30,9 @@ #' @note #' The "roi" parameter defines a region of interest. It can be #' an sf_object, a shapefile, or a bounding box vector with -#' named XY values ("xmin", "xmax", "ymin", "ymax") or -#' named lat/long values ("lon_min", "lat_min", "lon_max", "lat_max") +#' named XY values (\code{xmin}, \code{xmax}, \code{ymin}, \code{ymax}) or +#' named lat/long values (\code{lon_min}, \code{lon_max}, +#' \code{lat_min}, \code{lat_max}). #' #' The user should specify the crs of the mosaic since in many cases the #' input images will be in different coordinate systems. For example, diff --git a/R/sits_plot.R b/R/sits_plot.R index a990e3d23..74fde1d67 100644 --- a/R/sits_plot.R +++ b/R/sits_plot.R @@ -4,32 +4,23 @@ #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} #' @description This is a generic function. Parameters depend on the specific #' type of input. See each function description for the -#' required parameters: +#' required parameters. #' \itemize{ -#' \item{sits tibble: } {see \code{\link{plot.sits}}} -#' \item{patterns: } {see \code{\link{plot.patterns}}} -#' \item{SOM map: } {see \code{\link{plot.som_map}}} -#' \item{SOM evaluate cluster: } {see \code{\link{plot.som_evaluate_cluster}}} -#' \item{classified time series: } {see \code{\link{plot.predicted}}} -#' \item{raster cube: } {see \code{\link{plot.raster_cube}}} -#' \item{random forest model:} {see \code{\link{plot.rfor_model}}} -#' \item{xgboost model:} {see \code{\link{plot.xgb_model}}} -#' \item{torch ML model: } {see \code{\link{plot.torch_model}}} -#' \item{classification probabilities: }{see \code{\link{plot.probs_cube}}} -#' \item{model uncertainty: } {see \code{\link{plot.uncertainty_cube}}} -#' \item{classified image: } {see \code{\link{plot.class_cube}}} +#' \item sits tibble: see \code{\link{plot.sits}} +#' \item patterns: see \code{\link{plot.patterns}} +#' \item SOM map: see \code{\link{plot.som_map}} +#' \item SOM evaluate cluster: see \code{\link{plot.som_evaluate_cluster}} +#' \item classified time series: see \code{\link{plot.predicted}} +#' \item raster cube: see \code{\link{plot.raster_cube}} +#' \item vector cube: see \code{\link{plot.vector_cube}} +#' \item random forest model: see \code{\link{plot.rfor_model}} +#' \item xgboost model: see \code{\link{plot.xgb_model}} +#' \item torch ML model: see \code{\link{plot.torch_model}} +#' \item classification probabilities: see \code{\link{plot.probs_cube}} +#' \item model uncertainty: see \code{\link{plot.uncertainty_cube}} +#' \item classified cube: see \code{\link{plot.class_cube}} +#' \item classified vector cube: see \code{\link{plot.class_vector_cube}} #' } -#' In the case of time series, the plot function produces different plots -#' based on the input data: -#' \itemize{ -#' \item{"all years": }{Plot all samples from the same location together} -#' \item{"together": }{Plot all samples of the same band and label together} -#' } -#' The plot function makes an educated guess of what plot is required -#' based on the input data. If the input data has less than 30 samples or -#' the \code{together} parameter is FALSE, it will plot only one randomly -#' chosen sample. If the \code{together} parameter is set to TRUE or -#' there are more than 30 samples, it will plot all samples. #' #' @param x Object of class "sits". #' @param y Ignored. @@ -1303,12 +1294,10 @@ plot.som_evaluate_cluster <- function(x, y, ..., #' @title Plot a SOM map #' @name plot.som_map #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} -#' @description plots a SOM map generated by "sits_som_map" -#' The plot function produces different plots based on the input data: -#' \itemize{ -#' \item{"codes": }{Plot the vector weight for in each neuron.} -#' \item{"mapping": }{Shows where samples are mapped.} -#' } +#' @description plots a SOM map generated by "sits_som_map". +#' The plot function produces different plots based on the input data. +#' If type is "codes", plots the vector weight for in each neuron. +#' If type is "mapping", shows where samples are mapped. #' #' @param x Object of class "som_map". #' @param y Ignored. diff --git a/R/sits_predictors.R b/R/sits_predictors.R index e7d3bcae9..386a00f95 100644 --- a/R/sits_predictors.R +++ b/R/sits_predictors.R @@ -4,7 +4,7 @@ #' @description Predictors are X-Y values required for machine learning #' algorithms, organized as a data table where each row corresponds #' to a training sample. The first two columns of the predictors table -#' are categorical ("label_id" and "label"). The other columns are +#' are categorical (\code{label_id} and \code{label}). The other columns are #' the values of each band and time, organized first by band and then by time. #' #' @param samples Time series in sits format (tibble of class "sits") diff --git a/R/sits_segmentation.R b/R/sits_segmentation.R index 7e99ad9ed..dbed32ded 100644 --- a/R/sits_segmentation.R +++ b/R/sits_segmentation.R @@ -11,18 +11,20 @@ #' "seg_fn" to each tile. #' #' Segmentation uses the following steps: -#' \itemize{ -#' \item{create a regular data cube} -#' \item{use \code{\link[sits]{sits_segment}} to obtain a vector data cube -#' with polygons that define the boundary of the segments.} -#' \item{use \code{\link[sits]{sits_classify}} to classify the -#' time series associated to the segments, and obtain the probability -#' for each class.} -#' \item{use \code{\link[sits]{sits_label_classification}} to label the -#' vector probability cube.} -#' \item{use \code{\link[sits]{plot}} or \code{\link[sits]{sits_view}} -#' to display the results.} -#' } +#' \enumerate{ +#' \item Create a regular data cube with \code{\link[sits]{sits_cube}} and +#' \code{\link[sits]{sits_regularize}}; +#' \item Run \code{\link[sits]{sits_segment}} to obtain a vector data cube +#' with polygons that define the boundary of the segments; +#' \item Classify the time series associated to the segments +#' with \code{\link[sits]{sits_classify}}, to get obtain +#' a vector probability cube; +#' \item Use \code{\link[sits]{sits_label_classification}} to label the +#' vector probability cube; +#' \item Display the results with \code{\link[sits]{plot}} or +#' \code{\link[sits]{sits_view}}. +#'} +#' #' #' @param cube Regular data cube #' @param seg_fn Function to apply the segmentation @@ -157,7 +159,7 @@ sits_segment <- function(cube, #' @author Felipe Carvalho, \email{felipe.carvalho@@inpe.br} #' #' @description -#' Apply a segmentation on a data cube based on the "supercells" package. +#' Apply a segmentation on a data cube based on the \code{supercells} package. #' This is an adaptation and extension to remote sensing data of the #' SLIC superpixels algorithm proposed by Achanta et al. (2012). #' See references for more details. @@ -167,11 +169,11 @@ sits_segment <- function(cube, #' supercells' centers. #' @param compactness A compactness value. Larger values cause clusters to #' be more compact/even (square). -#' @param dist_fun Distance function. Currently implemented: "euclidean", -#' "jsd", "dtw", and any distance function from the +#' @param dist_fun Distance function. Currently implemented: +#' \code{euclidean, jsd, dtw}, +#' and any distance function from the #' \code{philentropy} package. #' See \code{philentropy::getDistMethods()}. -#' Default: "dtw" #' @param avg_fun Averaging function to calculate the values #' of the supercells' centers. #' Accepts any fitting R function diff --git a/R/sits_select.R b/R/sits_select.R index ebdef5d52..d01d5e2ce 100644 --- a/R/sits_select.R +++ b/R/sits_select.R @@ -2,7 +2,7 @@ #' @name sits_select #' @author Rolf Simoes, \email{rolf.simoes@@inpe.br} #' -#' @param data Tibble (class "sits" or class "raster_cube"). +#' @param data Tibble with time series or data cube. #' @param bands Character vector with the names of the bands. #' @param start_date Date in YYYY-MM-DD format: start date to be filtered. #' @param end_date Date in YYYY-MM-DD format: end date to be filtered. @@ -13,7 +13,7 @@ #' @description Filter only the selected bands and dates #' from a set of time series or froam a data cube. #' -#' @return Tibble of class "sits" or class "raster_cube". +#' @return Tibble with time series or data cube. #' #' @examples #' # Retrieve a set of time series with 2 classes diff --git a/R/sits_summary.R b/R/sits_summary.R index 52f910dc4..95525782b 100644 --- a/R/sits_summary.R +++ b/R/sits_summary.R @@ -2,6 +2,7 @@ #' @method summary sits #' @name summary.sits #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} +#' @author Felipe Souza, \email{felipe.souza@@inpe.br} #' @description This is a generic function. Parameters depend on the specific #' type of input. #' @@ -112,6 +113,7 @@ summary.sits_area_accuracy <- function(object, ...) { #' @method summary raster_cube #' @name summary.raster_cube #' @author Gilberto Camara, \email{gilberto.camara@@inpe.br} +#' @author Felipe Souza, \email{felipe.souza@@inpe.br} #' @description This is a generic function. Parameters depend on the specific #' type of input. #' @@ -190,6 +192,7 @@ summary.raster_cube <- function(object, ..., tile = NULL, date = NULL) { return(invisible(sum)) } #' @title Summary of a derived cube +#' @author Felipe Souza, \email{felipe.souza@@inpe.br} #' @noRd #' @param object data cube #' @param ... Further specifications for \link{summary}. diff --git a/R/sits_tuning.R b/R/sits_tuning.R index fbf085d3a..1cb1e1422 100644 --- a/R/sits_tuning.R +++ b/R/sits_tuning.R @@ -12,10 +12,8 @@ #' Instead of performing an exhaustive test of all parameter combinations, #' it selecting them randomly. Validation is done using an independent set #' of samples or by a validation split. The function returns the -#' best hyper-parameters in a list. -#' -#' hyper-parameters passed to \code{params} parameter should be passed -#' by calling \code{sits_tuning_hparams()} function. +#' best hyper-parameters in a list. Hyper-parameters passed to \code{params} +#' parameter should be passed by calling \code{sits_tuning_hparams()}. #' #' @references #' James Bergstra, Yoshua Bengio, @@ -170,8 +168,8 @@ sits_tuning <- function(samples, #' by \code{sits_tuning()} function search randomly the best parameter #' combination. #' -#' User should pass the possible values for hyper-parameters as -#' constant or by calling the following random functions: +#' Users should pass the possible values for hyper-parameters as +#' constants or by calling the following random functions: #' #' \itemize{ #' \item \code{uniform(min = 0, max = 1, n = 1)}: returns random numbers diff --git a/man/plot.Rd b/man/plot.Rd index 4cf6cf7e9..fecf1d92c 100644 --- a/man/plot.Rd +++ b/man/plot.Rd @@ -25,32 +25,23 @@ A series of plot objects produced by ggplot2 showing all \description{ This is a generic function. Parameters depend on the specific type of input. See each function description for the -required parameters: +required parameters. \itemize{ - \item{sits tibble: } {see \code{\link{plot.sits}}} - \item{patterns: } {see \code{\link{plot.patterns}}} - \item{SOM map: } {see \code{\link{plot.som_map}}} - \item{SOM evaluate cluster: } {see \code{\link{plot.som_evaluate_cluster}}} - \item{classified time series: } {see \code{\link{plot.predicted}}} - \item{raster cube: } {see \code{\link{plot.raster_cube}}} - \item{random forest model:} {see \code{\link{plot.rfor_model}}} - \item{xgboost model:} {see \code{\link{plot.xgb_model}}} - \item{torch ML model: } {see \code{\link{plot.torch_model}}} - \item{classification probabilities: }{see \code{\link{plot.probs_cube}}} - \item{model uncertainty: } {see \code{\link{plot.uncertainty_cube}}} - \item{classified image: } {see \code{\link{plot.class_cube}}} +\item sits tibble: see \code{\link{plot.sits}} +\item patterns: see \code{\link{plot.patterns}} +\item SOM map: see \code{\link{plot.som_map}} +\item SOM evaluate cluster: see \code{\link{plot.som_evaluate_cluster}} +\item classified time series: see \code{\link{plot.predicted}} +\item raster cube: see \code{\link{plot.raster_cube}} +\item vector cube: see \code{\link{plot.vector_cube}} +\item random forest model: see \code{\link{plot.rfor_model}} +\item xgboost model: see \code{\link{plot.xgb_model}} +\item torch ML model: see \code{\link{plot.torch_model}} +\item classification probabilities: see \code{\link{plot.probs_cube}} +\item model uncertainty: see \code{\link{plot.uncertainty_cube}} +\item classified cube: see \code{\link{plot.class_cube}} +\item classified vector cube: see \code{\link{plot.class_vector_cube}} } -In the case of time series, the plot function produces different plots -based on the input data: -\itemize{ - \item{"all years": }{Plot all samples from the same location together} - \item{"together": }{Plot all samples of the same band and label together} -} -The plot function makes an educated guess of what plot is required -based on the input data. If the input data has less than 30 samples or -the \code{together} parameter is FALSE, it will plot only one randomly -chosen sample. If the \code{together} parameter is set to TRUE or -there are more than 30 samples, it will plot all samples. } \examples{ if (sits_run_examples()) { diff --git a/man/plot.som_map.Rd b/man/plot.som_map.Rd index e16cb6644..298c80377 100644 --- a/man/plot.som_map.Rd +++ b/man/plot.som_map.Rd @@ -22,12 +22,10 @@ Called for side effects. } \description{ -plots a SOM map generated by "sits_som_map" -The plot function produces different plots based on the input data: -\itemize{ - \item{"codes": }{Plot the vector weight for in each neuron.} - \item{"mapping": }{Shows where samples are mapped.} -} +plots a SOM map generated by "sits_som_map". +The plot function produces different plots based on the input data. +If type is "codes", plots the vector weight for in each neuron. +If type is "mapping", shows where samples are mapped. } \note{ Please refer to the sits documentation available in diff --git a/man/sits_accuracy.Rd b/man/sits_accuracy.Rd index 712106f10..c45bdb6d6 100644 --- a/man/sits_accuracy.Rd +++ b/man/sits_accuracy.Rd @@ -42,7 +42,7 @@ A confusion matrix assessment produced by the caret package. \description{ This function calculates the accuracy of the classification result. For a set of time series, it creates a confusion matrix and then -calculates the resulting statistics using the R package "caret". The time +calculates the resulting statistics using package \code{caret}. The time series needs to be classified using \code{\link[sits]{sits_classify}}. Classified images are generated using \code{\link[sits]{sits_classify}} diff --git a/man/sits_classify.Rd b/man/sits_classify.Rd index 08307dcb4..4c5d82360 100644 --- a/man/sits_classify.Rd +++ b/man/sits_classify.Rd @@ -25,7 +25,7 @@ sits_classify( ..., filter_fn = NULL, multicores = 2L, - gpu_memory = NULL, + gpu_memory = 16, progress = TRUE ) @@ -39,7 +39,7 @@ sits_classify( end_date = NULL, memsize = 8L, multicores = 2L, - gpu_memory = NULL, + gpu_memory = 16, output_dir, version = "v1", verbose = FALSE, @@ -59,7 +59,7 @@ sits_classify( end_date = NULL, memsize = 8L, multicores = 2L, - gpu_memory = NULL, + gpu_memory = 16, output_dir, version = "v1", n_sam_pol = 40, @@ -85,7 +85,7 @@ sits_classify( \item{progress}{Logical: Show progress bar?} -\item{gpu_memory}{Memory available in GPU (default = NULL)} +\item{gpu_memory}{Memory available in GPU in GB (default = 16)} \item{roi}{Region of interest (either an sf object, shapefile, or a numeric vector with named XY values @@ -124,40 +124,40 @@ This function classifies a set of time series or data cube given a trained model prediction model created by \code{\link[sits]{sits_train}}. SITS supports the following models: -\itemize{ - \item{support vector machines: } {see \code{\link[sits]{sits_svm}}} - \item{random forests: } {see \code{\link[sits]{sits_rfor}}} - \item{extreme gradient boosting: } {see \code{\link[sits]{sits_xgboost}}} - \item{multi-layer perceptrons: } {see \code{\link[sits]{sits_mlp}}} - \item{1D CNN: } {see \code{\link[sits]{sits_tempcnn}}} - \item{deep residual networks:}{see \code{\link[sits]{sits_resnet}}} - \item{self-attention encoders:}{see \code{\link[sits]{sits_lighttae}}} - } +(a) support vector machines: \code{\link[sits]{sits_svm}}; +(b) random forests: \code{\link[sits]{sits_rfor}}; +(c) extreme gradient boosting: \code{\link[sits]{sits_xgboost}}; +(d) multi-layer perceptrons: \code{\link[sits]{sits_mlp}}; +(e) 1D CNN: \code{\link[sits]{sits_tempcnn}}; +(f) deep residual networks: \code{\link[sits]{sits_resnet}}; +(g) self-attention encoders: \code{\link[sits]{sits_lighttae}}. } \note{ -The "roi" parameter defines a region of interest. It can be +The \code{roi} parameter defines a region of interest. It can be an sf_object, a shapefile, or a bounding box vector with - named XY values ("xmin", "xmax", "ymin", "ymax") or - named lat/long values ("lon_min", "lat_min", "lon_max", "lat_max") + named XY values (\code{xmin}, \code{xmax}, \code{ymin}, \code{ymax}) or + named lat/long values (\code{lon_min}, \code{lon_max}, + \code{lat_min}, \code{lat_max}) - The "filter_fn" parameter specifies a smoothing filter to be applied to - time series for reducing noise. Currently, options include - Savitzky-Golay (see \code{\link[sits]{sits_sgolay}}) and Whittaker - (see \code{\link[sits]{sits_whittaker}}). + Parameter \code{filter_fn} parameter specifies a smoothing filter + to be applied to each time series for reducing noise. Currently, options + are Savitzky-Golay (see \code{\link[sits]{sits_sgolay}}) and Whittaker + (see \code{\link[sits]{sits_whittaker}}) filters. - The "memsize" and "multicores" parameters are used for multiprocessing. - The "multicores" parameter defines the number of cores used for - processing. The "memsize" parameter controls the amount of memory - available for classification. We recommend using a 4:1 relation between - "memsize" and "multicores". + Parameter \code{memsize} controls the amount of memory available + for classification, while \code{multicores} defines the number of cores + used for processing. We recommend using as much memory as possible. + + When using a GPU for deep learning, \code{gpu_memory} indicates the + memory of available in the graphics card. For classifying vector data cubes created by - \code{\link[sits]{sits_segment}}, two parameters can be used: - \code{n_sam_pol}, which is the number of time series to be classified - per segment. + \code{\link[sits]{sits_segment}}, + \code{n_sam_pol} controls is the number of time series to be + classified per segment. -Please refer to the sits documentation available in - for detailed examples. + Please refer to the sits documentation available in + for detailed examples. } \examples{ if (sits_run_examples()) { diff --git a/man/sits_cluster_clean.Rd b/man/sits_cluster_clean.Rd index 22b117caa..6129743c8 100644 --- a/man/sits_cluster_clean.Rd +++ b/man/sits_cluster_clean.Rd @@ -9,14 +9,15 @@ sits_cluster_clean(samples) \arguments{ \item{samples}{Tibble with set of time series with additional cluster information produced -by \code{sits::sits_cluster_dendro()} (class "sits")} +by \code{link[sits]{sits_cluster_dendro()}}} } \value{ Tibble with time series (class "sits") } \description{ Takes a tibble with time series -that has an additional `cluster` produced by \code{sits_cluster_dendro()} +that has an additional `cluster` produced by +\code{link[sits]{sits_cluster_dendro()}} and removes labels that are minority in each cluster. } \examples{ diff --git a/man/sits_cluster_dendro.Rd b/man/sits_cluster_dendro.Rd index e7fc9c5ed..e65fc14d8 100644 --- a/man/sits_cluster_dendro.Rd +++ b/man/sits_cluster_dendro.Rd @@ -62,18 +62,20 @@ These functions support hierarchical agglomerative clustering in sits. They provide support from creating a dendrogram and using it for cleaning samples. -\code{sits_cluster_dendro()} takes a tibble containing time series and +\code{link[sits]{sits_cluster_dendro()}} takes a tibble with time series and produces a sits tibble with an added "cluster" column. The function first calculates a dendrogram and obtains a validity index for best clustering using the adjusted Rand Index. After cutting the dendrogram using the chosen validity index, it assigns a cluster to each sample. -\code{sits_cluster_frequency()} computes the contingency table between labels +\code{link[sits]{sits_cluster_frequency()}} computes the contingency +table between labels and clusters and produces a matrix. -It needs as input a tibble produced by \code{sits_cluster_dendro()}. +Its input is a tibble produced by \code{link[sits]{sits_cluster_dendro()}}. -\code{sits_cluster_clean()} takes a tibble with time series -that has an additional `cluster` produced by \code{sits_cluster_dendro()} +\code{link[sits]{sits_cluster_clean()}} takes a tibble with time series +that has an additional `cluster` produced by +\code{link[sits]{sits_cluster_dendro()}} and removes labels that are minority in each cluster. } \note{ diff --git a/man/sits_cluster_frequency.Rd b/man/sits_cluster_frequency.Rd index 18294eea1..a1db3c4a8 100644 --- a/man/sits_cluster_frequency.Rd +++ b/man/sits_cluster_frequency.Rd @@ -9,7 +9,7 @@ sits_cluster_frequency(samples) \arguments{ \item{samples}{Tibble with input set of time series with additional cluster information produced -by \code{sits::sits_cluster_dendro}.} +by \code{link[sits]{sits_cluster_dendro}}.} } \value{ A matrix containing frequencies diff --git a/man/sits_combine_predictions.Rd b/man/sits_combine_predictions.Rd index 6628612b2..36657d898 100644 --- a/man/sits_combine_predictions.Rd +++ b/man/sits_combine_predictions.Rd @@ -77,10 +77,6 @@ to derive a value which is based on weights assigned to each model. The supported types of ensemble predictors are 'average' and 'uncertainty'. } -\note{ -Please refer to the sits documentation available in - for detailed examples. -} \examples{ if (sits_run_examples()) { # create a data cube from local files diff --git a/man/sits_confidence_sampling.Rd b/man/sits_confidence_sampling.Rd index 42bc93507..531f0f48f 100644 --- a/man/sits_confidence_sampling.Rd +++ b/man/sits_confidence_sampling.Rd @@ -12,7 +12,9 @@ sits_confidence_sampling( ) } \arguments{ -\item{probs_cube}{A probability cube. See \code{sits_classify}.} +\item{probs_cube}{A smoothed probability cube. +See \code{\link[sits]{sits_classify}} and +\code{\link[sits]{sits_smooth}}.} \item{n}{Number of suggested points per class.} @@ -35,16 +37,13 @@ location where the machine learning model has high confidence in choosing this label compared to all others. The algorithm also considers a minimum distance between new labels, to minimize spatial autocorrelation effects. +This function is best used in the following context: + 1. Select an initial set of samples. + 2. Train a machine learning model. + 3. Build a data cube and classify it using the model. + 4. Run a Bayesian smoothing in the resulting probability cube. + 5. Perform confidence sampling. -This function is best used in the following context -\itemize{ - \item{1. }{Select an initial set of samples.} - \item{2. }{Train a machine learning model.} - \item{3. }{Build a data cube and classify it using the model.} - \item{4. }{Run a Bayesian smoothing in the resulting probability cube.} - \item{5. }{Create an uncertainty cube.} - \item{6. }{Perform confidence sampling.} -} The Bayesian smoothing procedure will reduce the classification outliers and thus increase the likelihood that the resulting pixels with provide good quality samples for each class. diff --git a/man/sits_config.Rd b/man/sits_config.Rd index 1d8d58ccc..f8044e6a3 100644 --- a/man/sits_config.Rd +++ b/man/sits_config.Rd @@ -30,7 +30,7 @@ location of their file in the environmental variable \code{SITS_CONFIG_USER_FILE} or as parameter to this function. To see the key entries and contents of the current configuration values, -use \code{sits_config_show()}. +use \code{link[sits]{sits_config_show()}}. } \examples{ yaml_user_file <- system.file("extdata/config_user_example.yml", diff --git a/man/sits_cube.Rd b/man/sits_cube.Rd index 10b63ee8c..55099630d 100644 --- a/man/sits_cube.Rd +++ b/man/sits_cube.Rd @@ -108,55 +108,47 @@ A \code{tibble} describing the contents of a data cube. Creates a data cube based on spatial and temporal restrictions in collections available in cloud services or local repositories. The following cloud providers are supported, based on the STAC protocol: -\itemize{ - \item{\code{"AWS"}: }{Amazon Web Services (AWS), - see https://registry.opendata.aws/ } - \item{\code{"BDC"}: }{Brazil Data Cube (BDC), - see http://brazildatacube.org/} - \item{\code{"DEAFRICA"}: }{Digital Earth Africa, - see https://www.digitalearthafrica.org/} - \item{\code{"MPC"}: }{Microsoft Planetary Computer, - see https://planetarycomputer.microsoft.com/} - \item{\code{"USGS"}:}{USGS LANDSAT collection, - see https://registry.opendata.aws/usgs-landsat/} - } - -Data cubes can also be created using local files (see details). +Amazon Web Services (AWS), Brazil Data Cube (BDC), +Digital Earth Africa (DEAFRICA), Microsoft Planetary Computer (MPC), +Nasa Harmonized Landsat/Sentinel (HLS), USGS Landsat (USGS), and +Swiss Data Cube (SDC). Data cubes can also be created using local files. } -\details{ +\note{ +{ To create cubes from cloud providers, users need to inform: -\itemize{ -\item{\code{source}: }{One of \code{"AWS"}, \code{"BDC"}, \code{"DEAFRICA"}, -\code{"MPC"}, \code{"USGS"}, \code{"SDC"} and \code{"HLS"}}. -\item{\code{collection}: }{Use \code{sits_list_collections()} to see which - collections are supported.} -\item{\code{tiles}: }{A set of tiles defined according to the collection - tiling grid.} -\item{\code{roi}: }{Region of interest in WGS84 coordinates.} +\enumerate{ + \item \code{source}: One of "AWS", "BDC", "DEAFRICA", "HLS", "MPC", +"SDC" or "USGS"; + \item \code{collection}: Collection available in the cloud provider. + Use \code{sits_list_collections()} to see which + collections are supported; + \item \code{tiles}: A set of tiles defined according to the collection + tiling grid; + \item \code{roi}: Region of interest. Either + a named \code{vector} (\code{"lon_min"}, \code{"lat_min"}, + \code{"lon_max"}, \code{"lat_max"}) in WGS84, a \code{sfc} + or \code{sf} object from sf package in WGS84 projection. } Either \code{tiles} or \code{roi} must be informed. The parameters \code{bands}, \code{start_date}, and \code{end_date} are optional for cubes created from cloud providers. -#' The \code{roi} parameter allows a selection of an area of interest, -either using a named \code{vector} (\code{"lon_min"}, \code{"lat_min"}, -\code{"lon_max"}, \code{"lat_max"}) in WGS84, a \code{sfc} or \code{sf} -object from sf package in WGS84 projection. + GeoJSON geometries (RFC 7946) and shapefiles should be converted to \code{sf} objects before being used to define a region of interest. This parameter does not crop a region; it only selects images that intersect the \code{roi}. To create a cube from local files, users need to inform: -\itemize{ -\item{\code{source}:} {Provider from where the data has been - downloaded (e.g, "BDC", "MPC").} -\item{\code{collection}:}{Collection where the data has been extracted from. - (e.g., "SENTINEL-2-L2A" for the Sentinel-2 MPC collection level 2A).} -\item{\code{data_dir}: }{Local directory where images are stored.} -\item{\code{parse_info}: }{Parsing information for files (see below). - Default is \code{c("X1", "X2", "tile", "band", "date")}.} -\item{\code{delim}: }{Delimiter character for parsing files (see below). - Default is \code{"_"}.} +\enumerate{ + \item \code{source}: Provider from where the data has been downloaded + (e.g, "BDC"); + \item \code{collection}: Collection where the data has been extracted from. + (e.g., "SENTINEL-2-L2A" for the Sentinel-2 MPC collection level 2A); + \item \code{data_dir}: Local directory where images are stored. + \item \code{parse_info}: Parsing information for files. + Default is \code{c("X1", "X2", "tile", "band", "date")}. + \item \code{delim}: Delimiter character for parsing files. + Default is \code{"_"}. } To create a cube from local files, all images should have @@ -173,25 +165,25 @@ and the delimiter is "_", which are the default values. It is also possible to create result cubes for these are local files produced by classification or post-classification algorithms. In -this case, there are more parameters that are required (see below) and the -parameter \code{parse_info} is specified differently. +this case, more parameters that are required (see below). The +parameter \code{parse_info} is specified differently, as follows: -\itemize{ -\item{\code{band}: }{The band name is associated to the type of result. Use +\enumerate{ +\item \code{band}: Band name associated to the type of result. Use \code{"probs"}, for probability cubes produced by \code{sits_classify()}; \code{"bayes"}, for smoothed cubes produced by \code{sits_smooth()}; - \code{"entropy"} when using \code{sits_uncertainty()}, or \code{"class"} - for cubes produced by \code{sits_label_classification()}.} -\item{\code{labels}: }{Labels associated to the classification results.} -\item{\code{parse_info}: }{File name parsing information + \code{"segments"}, for vector cubes produced by \code{sits_segment()}; + \code{"entropy"} when using \code{sits_uncertainty()}, and \code{"class"} + for cubes produced by \code{sits_label_classification()}; +\item \code{labels}: Labels associated to the classification results; +\item \code{parse_info}: File name parsing information to deduce the values of "tile", "start_date", "end_date" from the file name. Default is c("X1", "X2", "tile", "start_date", "end_date", "band"). Unlike non-classified image files, cubes with results have both - "start_date" and "end_date".} -} + "start_date" and "end_date". } -\note{ + In MPC, sits can access are two open data collections: \code{"SENTINEL-2-L2A"} for Sentinel-2/2A images, and \code{"LANDSAT-C2-L2"} for the Landsat-4/5/7/8/9 collection. @@ -252,6 +244,7 @@ Sys.setenv( BDC_ACCESS_KEY = )} } +} \examples{ if (sits_run_examples()) { # --- Access to the Brazil Data Cube diff --git a/man/sits_filter.Rd b/man/sits_filter.Rd index ed7124c87..8919004c8 100644 --- a/man/sits_filter.Rd +++ b/man/sits_filter.Rd @@ -16,7 +16,7 @@ Filtered time series } \description{ Applies a filter to all bands, using a filter function - such as `sits_whittaker()` or `sits_sgolay()`. + such as sits_whittaker() or sits_sgolay(). } \examples{ if (sits_run_examples()) { diff --git a/man/sits_get_data.Rd b/man/sits_get_data.Rd index bf6a732f9..15a4cecde 100644 --- a/man/sits_get_data.Rd +++ b/man/sits_get_data.Rd @@ -149,22 +149,17 @@ They contain both the satellite image time series and their metadata. } \note{ There are four ways of specifying data to be retrieved using the -"samples" parameter: -\itemize{ -\item{CSV file: }{Provide a CSV file with columns -"longitude", "latitude", "start_date", "end_date" and "label" for -each sample} -\item{SHP file: }{Provide a shapefile in POINT or POLYGON geometry +\code{samples} parameter: +(a) CSV file: a CSV file with columns +\code{longitude}, \code{latitude}, +\code{start_date}, \code{end_date} and \code{label} for each sample; +(b) SHP file: a shapefile in POINT or POLYGON geometry containing the location of the samples and an attribute to be -used as label. Also, provide start and end date for the time series.} -\item{sits object: }{A sits tibble.} -\item{sf object: }{An "sf" object with POINT or POLYGON geometry.} -\item{data.frame: }{A data.frame with with mandatory columns -"longitude", "latitude".} -} - -Please refer to the sits documentation available in - for detailed examples. +used as label. Also, provide start and end date for the time series; +(c) sits object: A sits tibble; +(d) sf object: An \code{link[sf]{sf}} object with POINT or POLYGON geometry; +(e) data.frame: A data.frame with with mandatory columns +\code{longitude} and \code{latitude}. } \examples{ if (sits_run_examples()) { diff --git a/man/sits_label_classification.Rd b/man/sits_label_classification.Rd index 2fe0b0765..68e7f9df1 100644 --- a/man/sits_label_classification.Rd +++ b/man/sits_label_classification.Rd @@ -117,4 +117,6 @@ if (sits_run_examples()) { } \author{ Rolf Simoes, \email{rolf.simoes@inpe.br} + +Felipe Souza, \email{felipe.souza@inpe.br} } diff --git a/man/sits_lighttae.Rd b/man/sits_lighttae.Rd index cb27ad0ad..a9baac97e 100644 --- a/man/sits_lighttae.Rd +++ b/man/sits_lighttae.Rd @@ -40,10 +40,10 @@ to be used as validation data.} \item{optimizer}{Optimizer function to be used.} \item{opt_hparams}{Hyperparameters for optimizer: -lr : Learning rate of the optimizer -eps: Term added to the denominator +\code{lr} : Learning rate of the optimizer +\code{eps}: Term added to the denominator to improve numerical stability. -weight_decay: L2 regularization} +\code{weight_decay}: L2 regularization rate.} \item{lr_decay_epochs}{Number of epochs to reduce learning rate.} @@ -73,10 +73,6 @@ We also used the code made available by Maja Schneider in her work with Marco Körner referenced below and available at https://github.com/maja601/RC2020-psetae. } -\note{ -Please refer to the sits documentation available in - for detailed examples. -} \examples{ if (sits_run_examples()) { # create a lightTAE model @@ -129,9 +125,9 @@ ReScience C 7 (2), 2021. DOI: 10.5281/zenodo.4835356 } \author{ -Charlotte Pelletier, \email{charlotte.pelletier@univ-ubs.fr} - Gilberto Camara, \email{gilberto.camara@inpe.br} Rolf Simoes, \email{rolf.simoes@inpe.br} + +Charlotte Pelletier, \email{charlotte.pelletier@univ-ubs.fr} } diff --git a/man/sits_model_export.Rd b/man/sits_model_export.Rd index 791d0af47..49fb428d9 100644 --- a/man/sits_model_export.Rd +++ b/man/sits_model_export.Rd @@ -19,7 +19,7 @@ machine learning or deep learning package. \description{ Given a trained machine learning or deep learning model, exports the model as an object for further exploration outside the -"sits" package +\code{sits} package. } \examples{ if (sits_run_examples()) { diff --git a/man/sits_mosaic.Rd b/man/sits_mosaic.Rd index daa6ef48a..cf96f29b3 100644 --- a/man/sits_mosaic.Rd +++ b/man/sits_mosaic.Rd @@ -49,8 +49,9 @@ It is possible to provide a \code{roi} to crop the mosaic. \note{ The "roi" parameter defines a region of interest. It can be an sf_object, a shapefile, or a bounding box vector with - named XY values ("xmin", "xmax", "ymin", "ymax") or - named lat/long values ("lon_min", "lat_min", "lon_max", "lat_max") + named XY values (\code{xmin}, \code{xmax}, \code{ymin}, \code{ymax}) or + named lat/long values (\code{lon_min}, \code{lon_max}, + \code{lat_min}, \code{lat_max}). The user should specify the crs of the mosaic since in many cases the input images will be in different coordinate systems. For example, diff --git a/man/sits_predictors.Rd b/man/sits_predictors.Rd index 1cd8c4c09..1d697faed 100644 --- a/man/sits_predictors.Rd +++ b/man/sits_predictors.Rd @@ -16,7 +16,7 @@ The predictors for the sample: a data.frame with one row per sample. Predictors are X-Y values required for machine learning algorithms, organized as a data table where each row corresponds to a training sample. The first two columns of the predictors table -are categorical ("label_id" and "label"). The other columns are +are categorical (\code{label_id} and \code{label}). The other columns are the values of each band and time, organized first by band and then by time. } \examples{ diff --git a/man/sits_rfor.Rd b/man/sits_rfor.Rd index ec7efdedb..e0dace335 100644 --- a/man/sits_rfor.Rd +++ b/man/sits_rfor.Rd @@ -29,13 +29,9 @@ Model fitted to input data } \description{ Use Random Forest algorithm to classify samples. -This function is a front-end to the "randomForest" package. +This function is a front-end to the \code{randomForest} package. Please refer to the documentation in that package for more details. } -\note{ -Please refer to the sits documentation available in - for detailed examples. -} \examples{ if (sits_run_examples()) { # Example of training a model for time series classification diff --git a/man/sits_segment.Rd b/man/sits_segment.Rd index 5fdefd149..d3c1fb247 100644 --- a/man/sits_segment.Rd +++ b/man/sits_segment.Rd @@ -49,18 +49,19 @@ segmentation function. The function applies the segmentation algorithm "seg_fn" to each tile. Segmentation uses the following steps: -\itemize{ - \item{create a regular data cube} - \item{use \code{\link[sits]{sits_segment}} to obtain a vector data cube - with polygons that define the boundary of the segments.} - \item{use \code{\link[sits]{sits_classify}} to classify the - time series associated to the segments, and obtain the probability - for each class.} - \item{use \code{\link[sits]{sits_label_classification}} to label the - vector probability cube.} - \item{use \code{\link[sits]{plot}} or \code{\link[sits]{sits_view}} - to display the results.} - } +\enumerate{ + \item Create a regular data cube with \code{\link[sits]{sits_cube}} and + \code{\link[sits]{sits_regularize}}; + \item Run \code{\link[sits]{sits_segment}} to obtain a vector data cube + with polygons that define the boundary of the segments; + \item Classify the time series associated to the segments + with \code{\link[sits]{sits_classify}}, to get obtain + a vector probability cube; + \item Use \code{\link[sits]{sits_label_classification}} to label the + vector probability cube; + \item Display the results with \code{\link[sits]{plot}} or + \code{\link[sits]{sits_view}}. +} } \note{ The "roi" parameter defines a region of interest. It can be diff --git a/man/sits_select.Rd b/man/sits_select.Rd index 885c2fe50..c9a532571 100644 --- a/man/sits_select.Rd +++ b/man/sits_select.Rd @@ -30,7 +30,7 @@ sits_select(data, bands = NULL, start_date = NULL, end_date = NULL, ...) \method{sits_select}{default}(data, ...) } \arguments{ -\item{data}{Tibble (class "sits" or class "raster_cube").} +\item{data}{Tibble with time series or data cube.} \item{bands}{Character vector with the names of the bands.} @@ -45,7 +45,7 @@ sits_select(data, bands = NULL, start_date = NULL, end_date = NULL, ...) \item{tiles}{Character vector with the names of the tiles.} } \value{ -Tibble of class "sits" or class "raster_cube". +Tibble with time series or data cube. } \description{ Filter only the selected bands and dates diff --git a/man/sits_slic.Rd b/man/sits_slic.Rd index e78ac23c8..806012ef1 100644 --- a/man/sits_slic.Rd +++ b/man/sits_slic.Rd @@ -24,11 +24,11 @@ supercells' centers.} \item{compactness}{A compactness value. Larger values cause clusters to be more compact/even (square).} -\item{dist_fun}{Distance function. Currently implemented: "euclidean", -"jsd", "dtw", and any distance function from the +\item{dist_fun}{Distance function. Currently implemented: +\code{euclidean, jsd, dtw}, +and any distance function from the \code{philentropy} package. -See \code{philentropy::getDistMethods()}. -Default: "dtw"} +See \code{philentropy::getDistMethods()}.} \item{avg_fun}{Averaging function to calculate the values of the supercells' centers. @@ -47,7 +47,7 @@ Default: "median"} Set of segments for a single tile } \description{ -Apply a segmentation on a data cube based on the "supercells" package. +Apply a segmentation on a data cube based on the \code{supercells} package. This is an adaptation and extension to remote sensing data of the SLIC superpixels algorithm proposed by Achanta et al. (2012). See references for more details. diff --git a/man/sits_tuning.Rd b/man/sits_tuning.Rd index 7ab90399e..42c0f4677 100644 --- a/man/sits_tuning.Rd +++ b/man/sits_tuning.Rd @@ -51,10 +51,8 @@ This function performs a random search on values of selected hyperparameters. Instead of performing an exhaustive test of all parameter combinations, it selecting them randomly. Validation is done using an independent set of samples or by a validation split. The function returns the -best hyper-parameters in a list. - -hyper-parameters passed to \code{params} parameter should be passed -by calling \code{sits_tuning_hparams()} function. +best hyper-parameters in a list. Hyper-parameters passed to \code{params} +parameter should be passed by calling \code{sits_tuning_hparams()}. } \examples{ if (sits_run_examples()) { diff --git a/man/sits_tuning_hparams.Rd b/man/sits_tuning_hparams.Rd index 658096cef..740bd3d3d 100644 --- a/man/sits_tuning_hparams.Rd +++ b/man/sits_tuning_hparams.Rd @@ -18,8 +18,8 @@ This function allow user building the hyper-parameters space used by \code{sits_tuning()} function search randomly the best parameter combination. -User should pass the possible values for hyper-parameters as -constant or by calling the following random functions: +Users should pass the possible values for hyper-parameters as +constants or by calling the following random functions: \itemize{ \item \code{uniform(min = 0, max = 1, n = 1)}: returns random numbers diff --git a/man/sits_uncertainty_sampling.Rd b/man/sits_uncertainty_sampling.Rd index 0abee9b98..fbfaca79c 100644 --- a/man/sits_uncertainty_sampling.Rd +++ b/man/sits_uncertainty_sampling.Rd @@ -12,7 +12,8 @@ sits_uncertainty_sampling( ) } \arguments{ -\item{uncert_cube}{An uncertainty cube. See \code{sits_uncertainty}.} +\item{uncert_cube}{An uncertainty cube. +See \code{\link[sits]{sits_uncertainty}}.} \item{n}{Number of suggested points.} @@ -32,15 +33,14 @@ The function selects data points that have confused an algorithm. These points don't have labels and need be manually labelled by experts and then used to increase the classification's training set. -This function is best used in the following context -\itemize{ - \item{1. }{Select an initial set of samples.} - \item{2. }{Train a machine learning model.} - \item{3. }{Build a data cube and classify it using the model.} - \item{4. }{Run a Bayesian smoothing in the resulting probability cube.} - \item{5. }{Create an uncertainty cube.} - \item{6. }{Perform uncertainty sampling.} -} +This function is best used in the following context: + 1. Select an initial set of samples. + 2. Train a machine learning model. + 3. Build a data cube and classify it using the model. + 4. Run a Bayesian smoothing in the resulting probability cube. + 5. Create an uncertainty cube. + 6. Perform uncertainty sampling. + The Bayesian smoothing procedure will reduce the classification outliers and thus increase the likelihood that the resulting pixels with high uncertainty have meaningful information. diff --git a/man/summary.raster_cube.Rd b/man/summary.raster_cube.Rd index d0b40fc14..f37d27489 100644 --- a/man/summary.raster_cube.Rd +++ b/man/summary.raster_cube.Rd @@ -37,4 +37,6 @@ if (sits_run_examples()) { } \author{ Gilberto Camara, \email{gilberto.camara@inpe.br} + +Felipe Souza, \email{felipe.souza@inpe.br} } diff --git a/man/summary.sits.Rd b/man/summary.sits.Rd index 801aab2b6..42ef25cd0 100644 --- a/man/summary.sits.Rd +++ b/man/summary.sits.Rd @@ -26,4 +26,6 @@ if (sits_run_examples()) { } \author{ Gilberto Camara, \email{gilberto.camara@inpe.br} + +Felipe Souza, \email{felipe.souza@inpe.br} }