Skip to content

Commit

Permalink
ci: [torrust#384] add container healthcheck for index service
Browse files Browse the repository at this point in the history
For both services:

- The Index API
- The Tracker Stattistics Importer (console cronjob)
  • Loading branch information
josecelano committed Nov 17, 2023
1 parent 04b70ba commit 05d7ac9
Show file tree
Hide file tree
Showing 9 changed files with 197 additions and 34 deletions.
9 changes: 7 additions & 2 deletions Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,9 @@ COPY --from=build \
RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-index.tar.zst
RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json

RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-index /app/bin/torrust-index
RUN mkdir -p /app/bin/; \
cp -l /test/src/target/release/torrust-index /app/bin/torrust-index; \
cp -l /test/src/target/release/health_check /app/bin/health_check;
# RUN mkdir -p /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-index | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1
RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin

Expand All @@ -99,11 +101,13 @@ ARG TORRUST_INDEX_PATH_CONFIG="/etc/torrust/index/index.toml"
ARG TORRUST_INDEX_DATABASE_DRIVER="sqlite3"
ARG USER_ID=1000
ARG API_PORT=3001
ARG IMPORTER_API_PORT=3002

ENV TORRUST_INDEX_PATH_CONFIG=${TORRUST_INDEX_PATH_CONFIG}
ENV TORRUST_INDEX_DATABASE_DRIVER=${TORRUST_INDEX_DATABASE_DRIVER}
ENV USER_ID=${USER_ID}
ENV API_PORT=${API_PORT}
ENV IMPORTER_API_PORT=${IMPORTER_API_PORT}
ENV TZ=Etc/UTC

EXPOSE ${API_PORT}/tcp
Expand All @@ -130,5 +134,6 @@ CMD ["sh"]
FROM runtime as release
ENV RUNTIME="release"
COPY --from=test /app/ /usr/
# HEALTHCHECK CMD ["/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "localhost:${API_PORT}/version"]
HEALTHCHECK --interval=5s --timeout=5s --start-period=3s --retries=3 \
CMD /usr/bin/health_check http://localhost:${API_PORT}/health_check && /usr/bin/health_check http://localhost:${IMPORTER_API_PORT}/health_check || exit 1
CMD ["/usr/bin/torrust-index"]
8 changes: 4 additions & 4 deletions contrib/dev-tools/container/e2e/mysql/run-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ echo "Running E2E tests using MySQL ..."
./contrib/dev-tools/container/e2e/mysql/e2e-env-up.sh || exit 1

# Wait for conatiners to be healthy
./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-mysql-1 10 3
# todo: implement healthchecks for tracker and index and wait until they are healthy
#wait_for_container torrust-tracker-1 10 3
#wait_for_container torrust-idx-back-1 10 3
./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-mysql-1 10 3 || exit 1
# todo: implement healthchecks for the tracker and wait until it's healthy
#./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-tracker-1 10 3
./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-index-1 10 3 || exit 1
sleep 20s

# Just to make sure that everything is up and running
Expand Down
8 changes: 4 additions & 4 deletions contrib/dev-tools/container/e2e/sqlite/run-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ echo "Running E2E tests using SQLite ..."
./contrib/dev-tools/container/e2e/sqlite/e2e-env-up.sh || exit 1

# Wait for conatiners to be healthy
./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-mysql-1 10 3
# todo: implement healthchecks for tracker and index and wait until they are healthy
#wait_for_container torrust-tracker-1 10 3
#wait_for_container torrust-idx-back-1 10 3
./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-mysql-1 10 3 || exit 1
# todo: implement healthchecks for the tracker and wait until it's healthy
#./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-tracker-1 10 3
./contrib/dev-tools/container/functions/wait_for_container_to_be_healthy.sh torrust-index-1 10 3 || exit 1
sleep 20s

# Just to make sure that everything is up and running
Expand Down
2 changes: 1 addition & 1 deletion share/default/config/index.development.sqlite3.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ default_torrent_page_size = 10
max_torrent_page_size = 30

[tracker_statistics_importer]
torrent_info_update_interval = 3600
torrent_info_update_interval = 5
28 changes: 7 additions & 21 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ use crate::services::user::{self, DbBannedUserList, DbUserProfileRepository, DbU
use crate::services::{proxy, settings, torrent};
use crate::tracker::statistics_importer::StatisticsImporter;
use crate::web::api::v1::auth::Authentication;
use crate::web::api::{start, Version};
use crate::{mailer, tracker};
use crate::web::api::Version;
use crate::{console, mailer, tracker, web};

pub struct Running {
pub api_socket_addr: SocketAddr,
Expand Down Expand Up @@ -153,29 +153,15 @@ pub async fn run(configuration: Configuration, api_version: &Version) -> Running
ban_service,
));

// Start repeating task to import tracker torrent data and updating
// Start cronjob to import tracker torrent data and updating
// seeders and leechers info.

let weak_tracker_statistics_importer = Arc::downgrade(&tracker_statistics_importer);

let tracker_statistics_importer_handle = tokio::spawn(async move {
let interval = std::time::Duration::from_secs(torrent_info_update_interval);
let mut interval = tokio::time::interval(interval);
interval.tick().await; // first tick is immediate...
loop {
interval.tick().await;
if let Some(tracker) = weak_tracker_statistics_importer.upgrade() {
drop(tracker.import_all_torrents_statistics().await);
} else {
break;
}
}
});
let tracker_statistics_importer_handle =
console::tracker_statistics_importer::start(torrent_info_update_interval, &tracker_statistics_importer);

// Start API server
let running_api = web::api::start(app_data, &net_ip, net_port, api_version).await;

let running_api = start(app_data, &net_ip, net_port, api_version).await;

// Full running application
Running {
api_socket_addr: running_api.socket_addr,
api_server: running_api.api_server,
Expand Down
37 changes: 37 additions & 0 deletions src/bin/health_check.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
//! Minimal `curl` or `wget` to be used for container health checks.
//!
//! IT's convenient to avoid using third-party libraries because:
//!
//! - They are harder to maintain.
//! - They introduce new attack vectors.
use std::{env, process};

#[tokio::main]
async fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 2 {
eprintln!("Usage: cargo run --bin health_check <HEALTH_URL>");
eprintln!("Example: cargo run --bin health_check http://localhost:3002/health_check");
std::process::exit(1);
}

println!("Health check ...");

let url = &args[1].clone();

match reqwest::get(url).await {
Ok(response) => {
if response.status().is_success() {
println!("STATUS: {}", response.status());
process::exit(0);
} else {
println!("Non-success status received.");
process::exit(1);
}
}
Err(err) => {
println!("ERROR: {err}");
process::exit(1);
}
}
}
1 change: 1 addition & 0 deletions src/console/mod.rs
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
pub mod commands;
pub(crate) mod tracker_statistics_importer;
127 changes: 127 additions & 0 deletions src/console/tracker_statistics_importer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
//! Cronjob to import tracker torrent data and updating seeders and leechers
//! info.
//!
//! It has two services:
//!
//! - The importer which is the cronjob executed at regular intervals.
//! - The importer API.
//!
//! The cronjob sends a heartbeat signal to the API each time it is executed.
//! The last heartbeat signal time is used to determine whether the cronjob was
//! executed successfully or not. The API has a `health_check` endpoint which is
//! used when the application is running in containers.
use std::sync::{Arc, Mutex};

use axum::extract::State;
use axum::routing::{get, post};
use axum::{Json, Router};
use chrono::{DateTime, Utc};
use log::{error, info};
use serde_json::{json, Value};
use tokio::task::JoinHandle;

use crate::tracker::statistics_importer::StatisticsImporter;

const IMPORTER_API_IP: &str = "127.0.0.1";
const IMPORTER_API_PORT: u16 = 3002; // todo: use configuration option

#[derive(Clone)]
struct ImporterState {
/// Shared variable to store the timestamp of the last heartbeat sent
/// by the cronjob.
pub last_heartbeat: Arc<Mutex<DateTime<Utc>>>,
/// Interval between importation executions
pub torrent_info_update_interval: u64,
}

pub fn start(torrent_info_update_interval: u64, tracker_statistics_importer: &Arc<StatisticsImporter>) -> JoinHandle<()> {
let weak_tracker_statistics_importer = Arc::downgrade(tracker_statistics_importer);

tokio::spawn(async move {
info!("Tracker statistics importer launcher started");

// Start the Importer API

let _importer_api_handle = tokio::spawn(async move {
let import_state = Arc::new(ImporterState {
last_heartbeat: Arc::new(Mutex::new(Utc::now())),
torrent_info_update_interval,
});

let app = Router::new()
.route("/", get(|| async { Json(json!({})) }))
.route("/health_check", get(health_check_handler))
.with_state(import_state.clone())
.route("/heartbeat", post(heartbeat_handler))
.with_state(import_state);

let addr = format!("{IMPORTER_API_IP}:{IMPORTER_API_PORT}");

info!("Tracker statistics importer API server listening on http://{}", addr);

axum::Server::bind(&addr.parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
});

// Start the Importer cronjob

info!("Tracker statistics importer cronjob starting ...");

let interval = std::time::Duration::from_secs(torrent_info_update_interval);
let mut interval = tokio::time::interval(interval);

interval.tick().await; // first tick is immediate...

loop {
interval.tick().await;

info!("Running tracker statistics importer ...");

if let Err(e) = send_heartbeat().await {
error!("Failed to send heartbeat from importer cronjob: {}", e);
}

if let Some(tracker) = weak_tracker_statistics_importer.upgrade() {
drop(tracker.import_all_torrents_statistics().await);
} else {
break;
}
}
})
}

/// Endpoint for container health check.
async fn health_check_handler(State(state): State<Arc<ImporterState>>) -> Json<Value> {
let margin_in_seconds = 10;
let now = Utc::now();
let last_heartbeat = state.last_heartbeat.lock().unwrap();

if now.signed_duration_since(*last_heartbeat).num_seconds()
<= (state.torrent_info_update_interval + margin_in_seconds).try_into().unwrap()
{
Json(json!({ "status": "Ok" }))
} else {
Json(json!({ "status": "Error" }))
}
}

/// The tracker statistics importer cronjob sends a heartbeat on each execution
/// to inform that it's alive. This endpoint handles receiving that signal.
async fn heartbeat_handler(State(state): State<Arc<ImporterState>>) -> Json<Value> {
let now = Utc::now();
let mut last_heartbeat = state.last_heartbeat.lock().unwrap();
*last_heartbeat = now;
Json(json!({ "status": "Heartbeat received" }))
}

/// Send a heartbeat from the importer cronjob to the importer API.
async fn send_heartbeat() -> Result<(), reqwest::Error> {
let client = reqwest::Client::new();
let url = format!("http://{IMPORTER_API_IP}:{IMPORTER_API_PORT}/heartbeat");

client.post(url).send().await?;

Ok(())
}
11 changes: 9 additions & 2 deletions src/web/api/v1/routes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ use std::sync::Arc;

use axum::extract::DefaultBodyLimit;
use axum::routing::get;
use axum::Router;
use axum::{Json, Router};
use serde_json::{json, Value};
use tower_http::compression::CompressionLayer;
use tower_http::cors::CorsLayer;

Expand Down Expand Up @@ -34,7 +35,8 @@ pub fn router(app_data: Arc<AppData>) -> Router {
.nest("/proxy", proxy::routes::router(app_data.clone()));

let router = Router::new()
.route("/", get(about_page_handler).with_state(app_data))
.route("/", get(about_page_handler).with_state(app_data.clone()))
.route("/health_check", get(health_check_handler).with_state(app_data))
.nest(&format!("/{API_VERSION_URL_PREFIX}"), v1_api_routes);

let router = if env::var(ENV_VAR_CORS_PERMISSIVE).is_ok() {
Expand All @@ -45,3 +47,8 @@ pub fn router(app_data: Arc<AppData>) -> Router {

router.layer(DefaultBodyLimit::max(10_485_760)).layer(CompressionLayer::new())
}

/// Endpoint for container health check.
async fn health_check_handler() -> Json<Value> {
Json(json!({ "status": "Ok" }))
}

0 comments on commit 05d7ac9

Please sign in to comment.