From 71b5a4a6da2b715dacb71f00fe884a472b1328a8 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 11 Aug 2023 17:31:51 +0200 Subject: [PATCH 01/26] Remote configuration & live debugging Signed-off-by: Bob Weinand --- Cargo.toml | 3 + ddcommon-ffi/Cargo.toml | 1 + ddcommon-ffi/src/option.rs | 9 + ddcommon-ffi/src/slice.rs | 33 +- live-debugger-ffi/Cargo.toml | 14 + live-debugger-ffi/cbindgen.toml | 35 ++ live-debugger-ffi/src/data.rs | 280 +++++++++++++ live-debugger-ffi/src/evaluator.rs | 172 ++++++++ live-debugger-ffi/src/lib.rs | 7 + live-debugger-ffi/src/query.rs | 39 ++ live-debugger-ffi/src/sender.rs | 117 ++++++ live-debugger/Cargo.toml | 15 + live-debugger/src/debugger_defs.rs | 78 ++++ live-debugger/src/expr_defs.rs | 82 ++++ live-debugger/src/expr_eval.rs | 355 ++++++++++++++++ live-debugger/src/lib.rs | 16 + live-debugger/src/parse_json.rs | 216 ++++++++++ live-debugger/src/parse_json_expr.rs | 211 ++++++++++ live-debugger/src/parse_util.rs | 13 + live-debugger/src/probe_defs.rs | 134 ++++++ live-debugger/src/sender.rs | 94 +++++ remote-config/Cargo.toml | 23 ++ .../src/dynamic_configuration/data.rs | 50 +++ .../src/dynamic_configuration/mod.rs | 1 + remote-config/src/fetch/fetcher.rs | 316 ++++++++++++++ remote-config/src/fetch/mod.rs | 9 + remote-config/src/fetch/multitarget.rs | 379 +++++++++++++++++ remote-config/src/fetch/shared.rs | 266 ++++++++++++ remote-config/src/fetch/single.rs | 29 ++ remote-config/src/lib.rs | 17 + remote-config/src/parse.rs | 114 +++++ remote-config/src/targets.rs | 55 +++ sidecar-ffi/Cargo.toml | 2 + sidecar-ffi/cbindgen.toml | 2 +- sidecar-ffi/src/lib.rs | 97 ++++- sidecar/Cargo.toml | 6 +- sidecar/src/entry.rs | 1 + sidecar/src/lib.rs | 1 + sidecar/src/one_way_shared_memory.rs | 12 + sidecar/src/service/blocking.rs | 98 ++++- sidecar/src/service/mod.rs | 3 + sidecar/src/service/remote_configs.rs | 65 +++ sidecar/src/service/runtime_info.rs | 12 + sidecar/src/service/session_info.rs | 43 +- sidecar/src/service/sidecar_interface.rs | 33 +- sidecar/src/service/sidecar_server.rs | 88 +++- sidecar/src/shm_remote_config.rs | 391 ++++++++++++++++++ sidecar/src/tracer.rs | 3 + trace-protobuf/build.rs | 48 ++- trace-protobuf/src/lib.rs | 2 + trace-protobuf/src/pb/remoteconfig.proto | 178 ++++++++ trace-protobuf/src/remoteconfig.rs | 306 ++++++++++++++ trace-protobuf/src/serde.rs | 34 ++ 53 files changed, 4588 insertions(+), 20 deletions(-) create mode 100644 live-debugger-ffi/Cargo.toml create mode 100644 live-debugger-ffi/cbindgen.toml create mode 100644 live-debugger-ffi/src/data.rs create mode 100644 live-debugger-ffi/src/evaluator.rs create mode 100644 live-debugger-ffi/src/lib.rs create mode 100644 live-debugger-ffi/src/query.rs create mode 100644 live-debugger-ffi/src/sender.rs create mode 100644 live-debugger/Cargo.toml create mode 100644 live-debugger/src/debugger_defs.rs create mode 100644 live-debugger/src/expr_defs.rs create mode 100644 live-debugger/src/expr_eval.rs create mode 100644 live-debugger/src/lib.rs create mode 100644 live-debugger/src/parse_json.rs create mode 100644 live-debugger/src/parse_json_expr.rs create mode 100644 live-debugger/src/parse_util.rs create mode 100644 live-debugger/src/probe_defs.rs create mode 100644 live-debugger/src/sender.rs create mode 100644 remote-config/Cargo.toml create mode 100644 remote-config/src/dynamic_configuration/data.rs create mode 100644 remote-config/src/dynamic_configuration/mod.rs create mode 100644 remote-config/src/fetch/fetcher.rs create mode 100644 remote-config/src/fetch/mod.rs create mode 100644 remote-config/src/fetch/multitarget.rs create mode 100644 remote-config/src/fetch/shared.rs create mode 100644 remote-config/src/fetch/single.rs create mode 100644 remote-config/src/lib.rs create mode 100644 remote-config/src/parse.rs create mode 100644 remote-config/src/targets.rs create mode 100644 sidecar/src/service/remote_configs.rs create mode 100644 sidecar/src/shm_remote_config.rs create mode 100644 trace-protobuf/src/pb/remoteconfig.proto create mode 100644 trace-protobuf/src/remoteconfig.rs create mode 100644 trace-protobuf/src/serde.rs diff --git a/Cargo.toml b/Cargo.toml index 6833f61a0..8e6c01c68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,9 @@ members = [ "tools", "ipc", "ipc/macros", + "live-debugger", + "live-debugger-ffi", + "remote-config", "sidecar", "sidecar/macros", "sidecar-ffi", diff --git a/ddcommon-ffi/Cargo.toml b/ddcommon-ffi/Cargo.toml index b657e4d69..d5d4735e3 100644 --- a/ddcommon-ffi/Cargo.toml +++ b/ddcommon-ffi/Cargo.toml @@ -19,3 +19,4 @@ build_common = { path = "../build-common" } ddcommon = { path = "../ddcommon" } anyhow = "1.0" hyper = {version = "0.14", default-features = false} +serde = "1.0" diff --git a/ddcommon-ffi/src/option.rs b/ddcommon-ffi/src/option.rs index 2f55d95c9..c9a18ad2d 100644 --- a/ddcommon-ffi/src/option.rs +++ b/ddcommon-ffi/src/option.rs @@ -23,6 +23,15 @@ impl From> for std::option::Option { } } +impl From> for Option { + fn from(o: std::option::Option) -> Self { + match o { + Some(s) => Option::Some(s), + None => Option::None, + } + } +} + impl From<&Option> for std::option::Option { fn from(o: &Option) -> Self { match o { diff --git a/ddcommon-ffi/src/slice.rs b/ddcommon-ffi/src/slice.rs index 7702af79b..e5fa1b714 100644 --- a/ddcommon-ffi/src/slice.rs +++ b/ddcommon-ffi/src/slice.rs @@ -3,10 +3,13 @@ use core::slice; use std::borrow::Cow; -use std::fmt::{Debug, Formatter}; +use std::fmt::{Debug, Display, Formatter}; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::os::raw::c_char; use std::str::Utf8Error; +use serde::ser::Error; +use serde::Serializer; #[repr(C)] #[derive(Copy, Clone)] @@ -54,27 +57,27 @@ pub fn is_aligned_and_not_null(ptr: *const T) -> bool { } pub trait AsBytes<'a> { - fn as_bytes(&'a self) -> &'a [u8]; + fn as_bytes(&self) -> &'a [u8]; #[inline] - fn try_to_utf8(&'a self) -> Result<&'a str, Utf8Error> { + fn try_to_utf8(&self) -> Result<&'a str, Utf8Error> { std::str::from_utf8(self.as_bytes()) } #[inline] - fn to_utf8_lossy(&'a self) -> Cow<'a, str> { + fn to_utf8_lossy(&self) -> Cow<'a, str> { String::from_utf8_lossy(self.as_bytes()) } } impl<'a> AsBytes<'a> for Slice<'a, u8> { - fn as_bytes(&'a self) -> &'a [u8] { + fn as_bytes(&self) -> &'a [u8] { self.as_slice() } } impl<'a> AsBytes<'a> for Slice<'a, i8> { - fn as_bytes(&'a self) -> &'a [u8] { + fn as_bytes(&self) -> &'a [u8] { unsafe { slice::from_raw_parts(self.ptr.cast(), self.len) } } } @@ -123,6 +126,24 @@ impl<'a, T> Default for Slice<'a, T> { } } +impl<'a, T> Hash for Slice<'a, T> where Slice<'a, T>: AsBytes<'a> { + fn hash(&self, state: &mut H) { + state.write(self.as_bytes()) + } +} + +impl<'a, T> serde::Serialize for Slice<'a, T> where Slice<'a, T>: AsBytes<'a> { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(self.try_to_utf8().map_err(Error::custom)?) + } +} + +impl<'a, T> Display for Slice<'a, T> where Slice<'a, T>: AsBytes<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.try_to_utf8().map_err(|_| std::fmt::Error)?) + } +} + impl<'a, T: 'a> From<&'a [T]> for Slice<'a, T> { fn from(s: &'a [T]) -> Self { Slice::new(s) diff --git a/live-debugger-ffi/Cargo.toml b/live-debugger-ffi/Cargo.toml new file mode 100644 index 000000000..24ca50886 --- /dev/null +++ b/live-debugger-ffi/Cargo.toml @@ -0,0 +1,14 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +[package] +name = "datadog-live-debugger-ffi" +version = "0.0.1" +edition = "2021" + +[lib] +crate-type = ["lib", "staticlib", "cdylib"] + +[dependencies] +datadog-live-debugger = { path = "../live-debugger" } +ddcommon-ffi = { path = "../ddcommon-ffi" } diff --git a/live-debugger-ffi/cbindgen.toml b/live-debugger-ffi/cbindgen.toml new file mode 100644 index 000000000..2d29f5793 --- /dev/null +++ b/live-debugger-ffi/cbindgen.toml @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +language = "C" +tab_width = 2 +header = """// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +typedef struct ddog_DebuggerCapture ddog_DebuggerCapture; +typedef struct ddog_DebuggerValue ddog_DebuggerValue; +""" +include_guard = "DDOG_LIVE_DEBUGGER_H" +style = "both" + +no_includes = true +sys_includes = ["stdbool.h", "stddef.h", "stdint.h", "stdio.h"] +includes = ["common.h"] + +[export] +prefix = "ddog_" +renaming_overrides_prefixing = true + +[export.mangle] +rename_types = "PascalCase" + +[enum] +prefix_with_name = true +rename_variants = "ScreamingSnakeCase" + +[fn] +must_use = "DDOG_CHECK_RETURN" + +[parse] +parse_deps = true +include = ["datadog-live-debugger", "ddcommon-ffi"] diff --git a/live-debugger-ffi/src/data.rs b/live-debugger-ffi/src/data.rs new file mode 100644 index 000000000..d3a031db1 --- /dev/null +++ b/live-debugger-ffi/src/data.rs @@ -0,0 +1,280 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use datadog_live_debugger::{ + Capture, DslString, EvaluateAt, InBodyLocation, MetricKind, ProbeCondition, ProbeValue, + SpanProbeTarget, +}; +use ddcommon_ffi::{CharSlice, Option}; + +#[repr(C)] +pub struct CharSliceVec<'a> { + pub strings: *const CharSlice<'a>, + pub string_count: usize, +} + +impl<'a> Drop for CharSliceVec<'a> { + fn drop(&mut self) { + unsafe { + Vec::from_raw_parts( + self.strings as *mut CharSlice, + self.string_count, + self.string_count, + ) + }; + } +} + +impl<'a> From<&'a Vec> for CharSliceVec<'a> { + fn from(from: &'a Vec) -> Self { + let char_slices: Vec> = from.iter().map(|s| s.as_str().into()).collect(); + let new = CharSliceVec { + strings: char_slices.as_ptr(), + string_count: char_slices.len(), + }; + std::mem::forget(char_slices); + new + } +} + +#[repr(C)] +pub struct MetricProbe<'a> { + pub kind: MetricKind, + pub name: CharSlice<'a>, + pub value: &'a ProbeValue, +} + +impl<'a> From<&'a datadog_live_debugger::MetricProbe> for MetricProbe<'a> { + fn from(from: &'a datadog_live_debugger::MetricProbe) -> Self { + MetricProbe { + kind: from.kind, + name: from.name.as_str().into(), + value: &from.value, + } + } +} + +#[repr(C)] +pub struct LogProbe<'a> { + pub segments: &'a DslString, + pub when: &'a ProbeCondition, + pub capture: &'a Capture, + pub sampling_snapshots_per_second: u32, +} + +impl<'a> From<&'a datadog_live_debugger::LogProbe> for LogProbe<'a> { + fn from(from: &'a datadog_live_debugger::LogProbe) -> Self { + LogProbe { + segments: &from.segments, + when: &from.when, + capture: &from.capture, + sampling_snapshots_per_second: from.sampling_snapshots_per_second, + } + } +} + +#[repr(C)] +pub struct Tag<'a> { + pub name: CharSlice<'a>, + pub value: &'a DslString, +} + +#[repr(C)] +pub struct SpanProbeDecoration<'a> { + pub condition: &'a ProbeCondition, + pub tags: *const Tag<'a>, + pub tags_count: usize, +} + +impl<'a> From<&'a datadog_live_debugger::SpanProbeDecoration> for SpanProbeDecoration<'a> { + fn from(from: &'a datadog_live_debugger::SpanProbeDecoration) -> Self { + let tags: Vec<_> = from + .tags + .iter() + .map(|(name, value)| Tag { + name: name.as_str().into(), + value, + }) + .collect(); + + let new = SpanProbeDecoration { + condition: &from.condition, + tags: tags.as_ptr(), + tags_count: tags.len(), + }; + std::mem::forget(tags); + new + } +} + +impl<'a> Drop for SpanProbeDecoration<'a> { + fn drop(&mut self) { + unsafe { + Vec::from_raw_parts( + self.tags as *mut CharSlice, + self.tags_count, + self.tags_count, + ) + }; + } +} + +#[repr(C)] +pub struct SpanDecorationProbe<'a> { + pub target: SpanProbeTarget, + pub decorations: *const SpanProbeDecoration<'a>, + pub decorations_count: usize, +} + +impl<'a> From<&'a datadog_live_debugger::SpanDecorationProbe> for SpanDecorationProbe<'a> { + fn from(from: &'a datadog_live_debugger::SpanDecorationProbe) -> Self { + let tags: Vec<_> = from.decorations.iter().map(Into::into).collect(); + let new = SpanDecorationProbe { + target: from.target, + decorations: tags.as_ptr(), + decorations_count: tags.len(), + }; + std::mem::forget(tags); + new + } +} + +impl<'a> Drop for SpanDecorationProbe<'a> { + fn drop(&mut self) { + unsafe { + Vec::from_raw_parts( + self.decorations as *mut SpanProbeDecoration, + self.decorations_count, + self.decorations_count, + ) + }; + } +} + +#[repr(C)] +pub enum ProbeType<'a> { + Metric(MetricProbe<'a>), + Log(LogProbe<'a>), + Span, + SpanDecoration(SpanDecorationProbe<'a>), +} + +impl<'a> From<&'a datadog_live_debugger::ProbeType> for ProbeType<'a> { + fn from(from: &'a datadog_live_debugger::ProbeType) -> Self { + match from { + datadog_live_debugger::ProbeType::Metric(metric) => ProbeType::Metric(metric.into()), + datadog_live_debugger::ProbeType::Log(log) => ProbeType::Log(log.into()), + datadog_live_debugger::ProbeType::Span(_) => ProbeType::Span, + datadog_live_debugger::ProbeType::SpanDecoration(span_decoration) => { + ProbeType::SpanDecoration(span_decoration.into()) + } + } + } +} + +#[repr(C)] +pub struct ProbeTarget<'a> { + pub type_name: Option>, + pub method_name: Option>, + pub source_file: Option>, + pub signature: Option>, + pub lines: CharSliceVec<'a>, + pub in_body_location: InBodyLocation, +} + +impl<'a> From<&'a datadog_live_debugger::ProbeTarget> for ProbeTarget<'a> { + fn from(from: &'a datadog_live_debugger::ProbeTarget) -> Self { + ProbeTarget { + type_name: from.type_name.as_ref().map(|s| s.as_str().into()).into(), + method_name: from.method_name.as_ref().map(|s| s.as_str().into()).into(), + source_file: from.source_file.as_ref().map(|s| s.as_str().into()).into(), + signature: from.signature.as_ref().map(|s| s.as_str().into()).into(), + lines: (&from.lines).into(), + in_body_location: from.in_body_location, + } + } +} + +#[repr(C)] +pub struct Probe<'a> { + pub id: CharSlice<'a>, + pub version: u64, + pub language: Option>, + pub tags: CharSliceVec<'a>, + pub target: ProbeTarget<'a>, // "where" is rust keyword + pub evaluate_at: EvaluateAt, + pub probe: ProbeType<'a>, +} + +impl<'a> From<&'a datadog_live_debugger::Probe> for Probe<'a> { + fn from(from: &'a datadog_live_debugger::Probe) -> Self { + Probe { + id: from.id.as_str().into(), + version: from.version, + language: from.language.as_ref().map(|s| s.as_str().into()).into(), + tags: (&from.tags).into(), + target: (&from.target).into(), + evaluate_at: from.evaluate_at, + probe: (&from.probe).into(), + } + } +} + +#[repr(C)] +pub struct FilterList<'a> { + pub package_prefixes: CharSliceVec<'a>, + pub classes: CharSliceVec<'a>, +} + +impl<'a> From<&'a datadog_live_debugger::FilterList> for FilterList<'a> { + fn from(from: &'a datadog_live_debugger::FilterList) -> Self { + FilterList { + package_prefixes: (&from.package_prefixes).into(), + classes: (&from.classes).into(), + } + } +} + +#[repr(C)] +pub struct ServiceConfiguration<'a> { + pub id: CharSlice<'a>, + pub allow: FilterList<'a>, + pub deny: FilterList<'a>, + pub sampling_snapshots_per_second: u32, +} + +impl<'a> From<&'a datadog_live_debugger::ServiceConfiguration> for ServiceConfiguration<'a> { + fn from(from: &'a datadog_live_debugger::ServiceConfiguration) -> Self { + ServiceConfiguration { + id: from.id.as_str().into(), + allow: (&from.allow).into(), + deny: (&from.deny).into(), + sampling_snapshots_per_second: from.sampling_snapshots_per_second, + } + } +} + +#[repr(C)] +pub enum LiveDebuggingData<'a> { + None, + Probe(Probe<'a>), + ServiceConfiguration(ServiceConfiguration<'a>), +} + +impl<'a> From<&'a datadog_live_debugger::LiveDebuggingData> for LiveDebuggingData<'a> { + fn from(from: &'a datadog_live_debugger::LiveDebuggingData) -> Self { + match from { + datadog_live_debugger::LiveDebuggingData::Probe(probe) => { + LiveDebuggingData::Probe(probe.into()) + } + datadog_live_debugger::LiveDebuggingData::ServiceConfiguration(config) => { + LiveDebuggingData::ServiceConfiguration(config.into()) + } + } + } +} + +#[no_mangle] +pub extern "C" fn ddog_capture_defaults() -> Capture { + Capture::default() +} diff --git a/live-debugger-ffi/src/evaluator.rs b/live-debugger-ffi/src/evaluator.rs new file mode 100644 index 000000000..e89f301c8 --- /dev/null +++ b/live-debugger-ffi/src/evaluator.rs @@ -0,0 +1,172 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use datadog_live_debugger::{DslString, ProbeCondition}; +use ddcommon_ffi::CharSlice; +use std::ffi::c_void; + +#[repr(C)] +pub enum IntermediateValue<'a> { + String(CharSlice<'a>), + Number(f64), + Bool(bool), + Null, + Referenced(&'a c_void), +} + +impl<'a> From<&'a datadog_live_debugger::IntermediateValue<&c_void>> for IntermediateValue<'a> { + fn from(value: &'a datadog_live_debugger::IntermediateValue<&c_void>) -> Self { + match value { + datadog_live_debugger::IntermediateValue::String(s) => { + IntermediateValue::String(s.as_str().into()) + } + datadog_live_debugger::IntermediateValue::Number(n) => IntermediateValue::Number(*n), + datadog_live_debugger::IntermediateValue::Bool(b) => IntermediateValue::Bool(*b), + datadog_live_debugger::IntermediateValue::Null => IntermediateValue::Null, + datadog_live_debugger::IntermediateValue::Referenced(value) => { + IntermediateValue::Referenced(value) + } + } + } +} + +#[repr(C)] +pub struct VoidCollection { + pub count: isize, // set to < 0 on error + pub elements: *const c_void, + pub free: extern "C" fn(VoidCollection), +} + +#[repr(C)] +#[derive(Clone)] +pub struct Evaluator { + pub equals: + for<'a> extern "C" fn(&'a c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, + pub greater_than: + for<'a> extern "C" fn(&'a c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, + pub greater_or_equals: + for<'a> extern "C" fn(&'a c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, + pub fetch_identifier: + for<'a, 'b> extern "C" fn(&'a c_void, &CharSlice<'b>) -> Option<&'a c_void>, // special values: @duration, @return, @exception + pub fetch_index: for<'a, 'b> extern "C" fn( + &'a c_void, + &'a c_void, + IntermediateValue<'b>, + ) -> Option<&'a c_void>, + pub fetch_nested: for<'a, 'b> extern "C" fn( + &'a c_void, + &'a c_void, + IntermediateValue<'b>, + ) -> Option<&'a c_void>, + pub length: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> u64, + pub try_enumerate: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> VoidCollection, + pub stringify: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> VoidCollection, + pub convert_index: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> isize, // return < 0 on error +} + +static mut FFI_EVALUATOR: Option = None; +static EVALUATOR: datadog_live_debugger::Evaluator = + datadog_live_debugger::Evaluator { + equals: |context, a, b| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().equals)(context, (&a).into(), (&b).into()) + }, + greater_than: |context, a, b| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().greater_than)(context, (&a).into(), (&b).into()) + }, + greater_or_equals: |context, a, b| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().greater_or_equals)(context, (&a).into(), (&b).into()) + }, + fetch_identifier: |context, name| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().fetch_identifier)(context, &CharSlice::from(name)) + }, + fetch_index: |context, base, index| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().fetch_index)(context, base, (&index).into()) + }, + fetch_nested: |context, base, member| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().fetch_nested)(context, base, (&member).into()) + }, + length: |context, value| unsafe { + (FFI_EVALUATOR.as_ref().unwrap().length)(context, value) + }, + try_enumerate: |context, value| unsafe { + let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(context, value); + if collection.count < 0 { + None + } else { + // We need to copy, Vec::from_raw_parts with only free in the allocator would be unstable... + let mut vec = Vec::with_capacity(collection.count as usize); + vec.extend_from_slice(std::slice::from_raw_parts( + collection.elements as *const &c_void, + collection.count as usize, + )); + (collection.free)(collection); + Some(vec) + } + }, + stringify: |context, value| unsafe { + let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(context, value); + if collection.count < 0 { + unreachable!() + } + + // We need to copy... + let string = String::from_raw_parts( + collection.elements as *mut u8, + collection.count as usize, + collection.count as usize, + ); + let copy = string.clone(); + std::mem::forget(string); + (collection.free)(collection); + copy + }, + convert_index: |context, value| unsafe { + let index = (FFI_EVALUATOR.as_ref().unwrap().convert_index)(context, value); + if index < 0 { + None + } else { + Some(index as usize) + } + }, + }; + +#[no_mangle] +#[allow(clippy::missing_safety_doc)] +pub unsafe extern "C" fn register_expr_evaluator(eval: &Evaluator) { + FFI_EVALUATOR = Some(eval.clone()); +} + +#[no_mangle] +pub extern "C" fn evaluate_condition(condition: &ProbeCondition, context: &c_void) -> bool { + datadog_live_debugger::eval_condition(&EVALUATOR, condition, context) +} + +pub fn evaluate_string(condition: &DslString, context: &c_void) -> String { + datadog_live_debugger::eval_string(&EVALUATOR, condition, context) +} + +// This is unsafe, but we want to use it as function pointer... +extern "C" fn drop_void_collection_string(void: VoidCollection) { + unsafe { + String::from_raw_parts( + void.elements as *mut u8, + void.count as usize, + void.count as usize, + ); + } +} + +#[no_mangle] +pub extern "C" fn evaluate_unmanaged_string( + condition: &DslString, + context: &c_void, +) -> VoidCollection { + let string = evaluate_string(condition, context); + let new = VoidCollection { + count: string.len() as isize, + elements: string.as_ptr() as *const c_void, + free: drop_void_collection_string as extern "C" fn(VoidCollection), + }; + std::mem::forget(string); + new +} diff --git a/live-debugger-ffi/src/lib.rs b/live-debugger-ffi/src/lib.rs new file mode 100644 index 000000000..c91a95339 --- /dev/null +++ b/live-debugger-ffi/src/lib.rs @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +pub mod data; +pub mod evaluator; +pub mod query; +pub mod sender; diff --git a/live-debugger-ffi/src/query.rs b/live-debugger-ffi/src/query.rs new file mode 100644 index 000000000..05fe858ff --- /dev/null +++ b/live-debugger-ffi/src/query.rs @@ -0,0 +1,39 @@ +use crate::data::LiveDebuggingData; +use ddcommon_ffi::slice::AsBytes; +use ddcommon_ffi::CharSlice; + +#[repr(C)] +pub struct LiveDebuggingParseResult { + pub data: LiveDebuggingData<'static>, + opaque_data: Option>, +} + +#[no_mangle] +pub extern "C" fn parse_json(json: CharSlice) -> LiveDebuggingParseResult { + if let Ok(parsed) = + datadog_live_debugger::parse_json(unsafe { std::str::from_utf8_unchecked(json.as_bytes()) }) + { + let parsed = Box::new(parsed); + LiveDebuggingParseResult { + // we have the box. Rust doesn't allow us to specify a self-referential struct, so pretend it's 'static + data: unsafe { + std::mem::transmute::<&_, &'static datadog_live_debugger::LiveDebuggingData>( + &*parsed, + ) + } + .into(), + opaque_data: Some(parsed), + } + } else { + LiveDebuggingParseResult { + data: LiveDebuggingData::None, + opaque_data: None, + } + } +} + +#[no_mangle] +pub extern "C" fn drop_probe(_: LiveDebuggingData) {} + +#[no_mangle] +pub extern "C" fn drop_parse_result(_: LiveDebuggingParseResult) {} diff --git a/live-debugger-ffi/src/sender.rs b/live-debugger-ffi/src/sender.rs new file mode 100644 index 000000000..13b741339 --- /dev/null +++ b/live-debugger-ffi/src/sender.rs @@ -0,0 +1,117 @@ +use ddcommon_ffi::CharSlice; +// Alias to prevent cbindgen panic +use datadog_live_debugger::debugger_defs::{Value as DebuggerValueAlias, Capture as DebuggerCaptureAlias, Captures, DebuggerData, Entry, Fields, DebuggerPayload, Snapshot}; +use datadog_live_debugger::sender::generate_new_id; + +#[repr(C)] +pub enum FieldType { + STATIC, + ARG, + LOCAL, +} + +#[repr(C)] +pub struct CaptureValue<'a> { + pub r#type: CharSlice<'a>, + pub value: CharSlice<'a>, + pub fields: Option>>>, + pub elements: Vec>, + pub entries: Vec>>, + pub is_null: bool, + pub truncated: bool, + pub not_captured_reason: CharSlice<'a>, + pub size: CharSlice<'a>, +} + +impl<'a> From> for DebuggerValueAlias> { + fn from(val: CaptureValue<'a>) -> Self { + DebuggerValueAlias { + r#type: val.r#type, + value: if val.value.len() == 0 { None } else { Some(val.value) }, + fields: if let Some(boxed) = val.fields { *boxed } else { Fields::default() }, + elements: unsafe { std::mem::transmute(val.elements) }, // SAFETY: is transparent + entries: val.entries, + is_null: val.is_null, + truncated: val.truncated, + not_captured_reason: if val.not_captured_reason.len() == 0 { None } else { Some(val.not_captured_reason) }, + size: if val.size.len() == 0 { None } else { Some(val.size) }, + } + } +} + +/// cbindgen:no-export +#[repr(transparent)] +pub struct DebuggerValue<'a>(DebuggerValueAlias>); +/// cbindgen:no-export +#[repr(transparent)] +pub struct DebuggerCapture<'a>(DebuggerCaptureAlias>); + +#[repr(C)] +pub struct ExceptionSnapshot<'a> { + pub data: *mut DebuggerPayload>, + pub capture: *mut DebuggerCapture<'a>, +} + +#[no_mangle] +pub extern "C" fn ddog_create_exception_snapshot<'a>(buffer: &mut Vec>>, service: CharSlice<'a>, language: CharSlice<'a>, id: CharSlice<'a>, exception_id: CharSlice<'a>, timestamp: u64) -> *mut DebuggerCapture<'a> { + let snapshot = DebuggerPayload { + service, + source: "dd_debugger", + timestamp, + debugger: DebuggerData { + snapshot: Snapshot { + captures: Captures { + r#return: Some(DebuggerCaptureAlias::default()), + ..Default::default() + }, + language, + id, + exception_id, + timestamp, + } + } + }; + buffer.push(snapshot); + unsafe { std::mem::transmute(buffer.last_mut().unwrap().debugger.snapshot.captures.r#return.as_mut().unwrap()) } +} + +#[no_mangle] +#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here +pub extern "C" fn ddog_snapshot_add_field<'a, 'b: 'a, 'c: 'a>(capture: &mut DebuggerCapture<'a>, r#type: FieldType, name: CharSlice<'b>, value: CaptureValue<'c>) { + let fields = match r#type { + FieldType::STATIC => &mut capture.0.static_fields, + FieldType::ARG => &mut capture.0.arguments, + FieldType::LOCAL => &mut capture.0.locals, + }; + fields.insert(name, value.into()); +} + +#[no_mangle] +#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here +pub extern "C" fn ddog_capture_value_add_element<'a, 'b: 'a>(value: &mut CaptureValue<'a>, element: CaptureValue<'b>) { + value.elements.push(DebuggerValue(element.into())); +} + +#[no_mangle] +#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here +pub extern "C" fn ddog_capture_value_add_entry<'a, 'b: 'a, 'c: 'a>(value: &mut CaptureValue<'a>, key: CaptureValue<'b>, element: CaptureValue<'c>) { + value.entries.push(Entry(key.into(), element.into())); +} + +#[no_mangle] +#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here +pub extern "C" fn ddog_capture_value_add_field<'a, 'b: 'a, 'c: 'a>(value: &mut CaptureValue<'a>, key: CharSlice<'b>, element: CaptureValue<'c>) { + let fields = match value.fields { + None => { + value.fields = Some(Box::default()); + value.fields.as_mut().unwrap() + }, + Some(ref mut f) => f, + }; + fields.insert(key, element.into()); +} + +#[no_mangle] +pub extern "C" fn ddog_snapshot_format_new_uuid(buf: &mut [u8; 36]) { + generate_new_id().as_hyphenated().encode_lower(buf); +} diff --git a/live-debugger/Cargo.toml b/live-debugger/Cargo.toml new file mode 100644 index 000000000..011eb2cc3 --- /dev/null +++ b/live-debugger/Cargo.toml @@ -0,0 +1,15 @@ +[package] +edition = "2021" +license = "Apache 2.0" +name = "datadog-live-debugger" +version = "0.0.1" + +[dependencies] +anyhow = "1.0" +ddcommon = { path = "../ddcommon" } +hyper = { version = "0.14", features = ["client"] } +regex = "1.9.3" +json = "0.12.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +uuid = { version = "1.0", features = ["v4"] } diff --git a/live-debugger/src/debugger_defs.rs b/live-debugger/src/debugger_defs.rs new file mode 100644 index 000000000..82cb39c93 --- /dev/null +++ b/live-debugger/src/debugger_defs.rs @@ -0,0 +1,78 @@ +use std::collections::HashMap; +use std::hash::Hash; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct DebuggerPayload { + pub service: S, + pub source: &'static str, + pub timestamp: u64, + pub debugger: DebuggerData, +} + +#[derive(Serialize, Deserialize)] +pub struct DebuggerData { + pub snapshot: Snapshot, +} + +#[derive(Serialize, Deserialize)] +pub struct Snapshot { + pub captures: Captures, + pub language: S, + pub id: S, + #[serde(rename = "exception-id")] + pub exception_id: S, + pub timestamp: u64, +} + +#[derive(Default, Serialize, Deserialize)] +pub struct Captures { + #[serde(skip_serializing_if = "HashMap::is_empty")] + pub lines: HashMap>, + #[serde(skip_serializing_if = "Option::is_none")] + pub entry: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub r#return: Option>, +} + +pub type Fields = HashMap>; +#[derive(Default, Serialize, Deserialize)] +pub struct Capture { + #[serde(skip_serializing_if = "HashMap::is_empty")] + #[serde(rename = "staticFields")] + pub static_fields: Fields, + #[serde(skip_serializing_if = "HashMap::is_empty")] + pub arguments: Fields, + #[serde(skip_serializing_if = "HashMap::is_empty")] + pub locals: Fields, + #[serde(skip_serializing_if = "Option::is_none")] + pub throwable: Option>, +} + +#[derive(Serialize, Deserialize)] +pub struct Entry(pub Value, pub Value); + +#[derive(Default, Serialize, Deserialize)] +pub struct Value { + pub r#type: S, + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, + #[serde(skip_serializing_if = "HashMap::is_empty")] + pub fields: Fields, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub elements: Vec>, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub entries: Vec>, + #[serde(skip_serializing_if = "<&bool as std::ops::Not>::not")] + #[serde(rename = "isNull")] + pub is_null: bool, + #[serde(skip_serializing_if = "<&bool as std::ops::Not>::not")] + pub truncated: bool, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "notCapturedReason")] + pub not_captured_reason: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, +} + + diff --git a/live-debugger/src/expr_defs.rs b/live-debugger/src/expr_defs.rs new file mode 100644 index 000000000..15e16e3d2 --- /dev/null +++ b/live-debugger/src/expr_defs.rs @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +#[derive(Debug)] +pub enum CollectionSource { + Reference(Reference), + FilterOperator(Box<(CollectionSource, Condition)>), +} + +#[derive(Debug)] +pub enum Reference { + Base(String), + Index(Box<(CollectionSource, Value)>), // i.e. foo[bar] + Nested(Box<(Reference, Value)>), // i.e. foo.bar +} + +#[derive(Debug)] +pub enum BinaryComparison { + Equals, + NotEquals, + GreaterThan, + GreaterOrEquals, + LessThan, + LessOrEquals, +} + +#[derive(Debug)] +pub enum StringComparison { + StartsWith, + EndsWith, + Contains, + Matches, +} + +#[derive(Debug)] +pub enum CollectionMatch { + All, + Any, +} + +#[derive(Debug)] +pub enum Condition { + Always, + Never, + Disjunction(Box<(Condition, Condition)>), + Conjunction(Box<(Condition, Condition)>), + Negation(Box), + StringComparison(StringComparison, StringSource, String), + BinaryComparison(Value, BinaryComparison, Value), + CollectionMatch(CollectionMatch, Reference, Box), + IsUndefinedReference(Reference), + IsEmptyReference(Reference), +} + +#[derive(Debug)] +pub enum NumberSource { + Number(f64), + CollectionSize(CollectionSource), + StringLength(Reference), + Reference(Reference), +} + +#[derive(Debug)] +pub enum StringSource { + String(String), + Substring(Box<(StringSource, NumberSource, NumberSource)>), + Null, + Reference(Reference), +} + +#[derive(Debug)] +pub enum Value { + Bool(Box), + String(StringSource), + Number(NumberSource), +} + +#[derive(Debug)] +pub enum DslPart { + Ref(CollectionSource), + String(String), +} diff --git a/live-debugger/src/expr_eval.rs b/live-debugger/src/expr_eval.rs new file mode 100644 index 000000000..c91830ee9 --- /dev/null +++ b/live-debugger/src/expr_eval.rs @@ -0,0 +1,355 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use crate::expr_defs::{ + BinaryComparison, CollectionMatch, CollectionSource, Condition, DslPart, NumberSource, + Reference, StringComparison, StringSource, Value, +}; +use regex::Regex; +use std::cmp::min; +use std::str::FromStr; +use std::usize; + +#[derive(Debug)] +pub struct DslString(pub(crate) Vec); +#[derive(Debug)] +pub struct ProbeValue(pub(crate) Value); +#[derive(Debug)] +pub struct ProbeCondition(pub(crate) Condition); + +pub enum IntermediateValue { + String(String), + Number(f64), + Bool(bool), + Null, + Referenced(I), +} + +pub struct Evaluator { + pub equals: for<'a> fn(&'a C, IntermediateValue<&'a I>, IntermediateValue<&'a I>) -> bool, + pub greater_than: for<'a> fn(&'a C, IntermediateValue<&'a I>, IntermediateValue<&'a I>) -> bool, + pub greater_or_equals: + for<'a> fn(&'a C, IntermediateValue<&'a I>, IntermediateValue<&'a I>) -> bool, + pub fetch_identifier: for<'a> fn(&'a C, &str) -> Option<&'a I>, // special values: @duration, @return, @exception + pub fetch_index: for<'a> fn(&'a C, &'a I, IntermediateValue<&'a I>) -> Option<&'a I>, + pub fetch_nested: for<'a> fn(&'a C, &'a I, IntermediateValue<&'a I>) -> Option<&'a I>, + pub length: for<'a> fn(&'a C, &'a I) -> u64, + pub try_enumerate: for<'a> fn(&'a C, &'a I) -> Option>, + pub stringify: for<'a> fn(&'a C, &'a I) -> String, + pub convert_index: for<'a> fn(&'a C, &'a I) -> Option, +} + +type EvalResult = Result; + +struct Eval<'a, I, C> { + eval: &'a Evaluator, + context: &'a C, + it: Option<&'a I>, +} + +impl<'a, I, C> Eval<'a, I, C> { + fn value(&mut self, value: &'a Value) -> EvalResult> { + Ok(match value { + Value::Bool(condition) => IntermediateValue::Bool(self.condition(condition)?), + Value::String(s) => self.string_source(s)?, + Value::Number(n) => self.number_source(n)?, + }) + } + + fn number_source(&mut self, value: &'a NumberSource) -> EvalResult> { + Ok(match value { + NumberSource::Number(n) => IntermediateValue::Number(*n), + NumberSource::CollectionSize(collection) => { + IntermediateValue::Number(match collection { + CollectionSource::Reference(reference) => { + (self.eval.length)(self.context, self.reference(reference)?.ok_or(())?) + as f64 + } + CollectionSource::FilterOperator(_) => { + self.collection_source(collection)?.ok_or(())?.len() as f64 + } + }) + } + NumberSource::StringLength(reference) => IntermediateValue::Number((self.eval.length)( + self.context, + self.reference(reference)?.ok_or(())?, + ) + as f64), + NumberSource::Reference(reference) => { + IntermediateValue::Referenced(self.reference(reference)?.ok_or(())?) + } + }) + } + + fn convert_index(&mut self, value: IntermediateValue<&'a I>) -> EvalResult { + Ok(match value { + IntermediateValue::String(s) => return usize::from_str(s.as_str()).map_err(|_| ()), + IntermediateValue::Number(n) => n as usize, + IntermediateValue::Bool(_) => return Err(()), + IntermediateValue::Null => 0, + IntermediateValue::Referenced(referenced) => { + (self.eval.convert_index)(self.context, referenced).ok_or(())? + } + }) + } + + fn number_to_index(&mut self, value: &'a NumberSource) -> EvalResult { + let value = self.number_source(value)?; + self.convert_index(value) + } + + fn string_source(&mut self, value: &'a StringSource) -> EvalResult> { + Ok(match value { + StringSource::String(s) => IntermediateValue::String(s.to_string()), + StringSource::Substring(boxed) => { + let (string, start, end) = &**boxed; + let str = self.stringify(string)?; + let start = self.number_to_index(start)?; + let mut end = self.number_to_index(end)?; + if start > end || start >= str.len() { + return Err(()); + } + end = min(end, str.len()); + IntermediateValue::String(str[start..end].to_string()) + } + StringSource::Null => IntermediateValue::Null, + StringSource::Reference(reference) => { + IntermediateValue::Referenced(self.reference(reference)?.ok_or(())?) + } + }) + } + + fn reference_collection(&mut self, reference: &'a Reference) -> EvalResult>> { + Ok(self + .reference(reference)? + .and_then(|reference| (self.eval.try_enumerate)(self.context, reference))) + } + + fn reference(&mut self, reference: &'a Reference) -> EvalResult> { + Ok(match reference { + Reference::Base(ref identifier) => { + if identifier == "@it" { + self.it + } else { + (self.eval.fetch_identifier)(self.context, identifier.as_str()) + } + } + Reference::Index(ref boxed) => { + let (source, dimension) = &**boxed; + let dimension = self.value(dimension)?; + match source { + CollectionSource::FilterOperator(_) => { + let index = self.convert_index(dimension)?; + self.collection_source(source)?.and_then(|vec| { + if index < vec.len() { + Some(vec[index]) + } else { + None + } + }) + } + CollectionSource::Reference(ref reference) => self + .reference(reference)? + .and_then(|base| (self.eval.fetch_index)(self.context, base, dimension)), + } + } + Reference::Nested(ref boxed) => { + let (source, member) = &**boxed; + let member = self.value(member)?; + self.reference(source)? + .and_then(|base| (self.eval.fetch_nested)(self.context, base, member)) + } + }) + } + + fn collection_source( + &mut self, + collection: &'a CollectionSource, + ) -> EvalResult>> { + Ok(match collection { + CollectionSource::Reference(ref reference) => self.reference_collection(reference)?, + CollectionSource::FilterOperator(ref boxed) => { + let (source, condition) = &**boxed; + let mut values = vec![]; + let it = self.it; + if let Some(source_values) = self.collection_source(source)? { + for item in source_values { + self.it = Some(item); + if self.condition(condition)? { + values.push(item); + } + } + self.it = it; + Some(values) + } else { + None + } + } + }) + } + + fn stringify_intermediate(&mut self, value: IntermediateValue<&'a I>) -> String { + match value { + IntermediateValue::String(s) => s.to_string(), + IntermediateValue::Number(n) => n.to_string(), + IntermediateValue::Bool(b) => b.to_string(), + IntermediateValue::Null => "".to_string(), + IntermediateValue::Referenced(referenced) => { + (self.eval.stringify)(self.context, referenced) + } + } + } + + fn stringify(&mut self, value: &'a StringSource) -> EvalResult { + let value = self.string_source(value)?; + Ok(self.stringify_intermediate(value)) + } + + fn condition(&mut self, condition: &'a Condition) -> EvalResult { + Ok(match condition { + Condition::Always => true, + Condition::Never => false, + Condition::StringComparison(comparer, haystack, needle) => { + let haystack = self.stringify(haystack)?; + match comparer { + StringComparison::StartsWith => haystack.starts_with(needle), + StringComparison::EndsWith => haystack.ends_with(needle), + StringComparison::Contains => haystack.contains(needle), + StringComparison::Matches => { + return Regex::new(needle.as_str()) + .map_err(|_| ()) + .map(|r| r.is_match(haystack.as_str())) + } + } + } + Condition::BinaryComparison(a, comparer, b) => { + let (a, b) = (self.value(a)?, self.value(b)?); + match comparer { + BinaryComparison::Equals => (self.eval.equals)(self.context, a, b), + BinaryComparison::NotEquals => !(self.eval.equals)(self.context, a, b), + BinaryComparison::GreaterThan => (self.eval.greater_than)(self.context, a, b), + BinaryComparison::GreaterOrEquals => { + (self.eval.greater_or_equals)(self.context, a, b) + } + BinaryComparison::LessThan => { + !(self.eval.greater_or_equals)(self.context, a, b) + } + BinaryComparison::LessOrEquals => !(self.eval.greater_than)(self.context, a, b), + } + } + Condition::CollectionMatch(match_type, reference, condition) => { + let vec = self.reference_collection(reference)?.ok_or(())?; + let it = self.it; + let mut result; + match match_type { + CollectionMatch::All => { + result = true; + for v in vec { + self.it = Some(v); + if !self.condition(condition)? { + result = false; + break; + } + } + } + CollectionMatch::Any => { + result = false; + for v in vec { + self.it = Some(v); + if self.condition(condition)? { + result = true; + break; + } + } + } + } + self.it = it; + result + } + Condition::IsUndefinedReference(reference) => self.reference(reference).ok().is_none(), + Condition::IsEmptyReference(reference) => { + if let Some(value) = self.reference(reference)? { + (self.eval.length)(self.context, value) == 0 + } else { + return Err(()); + } + } + Condition::Disjunction(boxed) => { + let (a, b) = &**boxed; + self.condition(a)? || self.condition(b)? + } + Condition::Conjunction(boxed) => { + let (a, b) = &**boxed; + self.condition(a)? && self.condition(b)? + } + Condition::Negation(boxed) => !self.condition(boxed)?, + }) + } +} + +pub fn eval_condition<'a, 'e, 'v, I, C>( + eval: &'e Evaluator, + condition: &'v ProbeCondition, + context: &'a C, +) -> bool +where + 'e: 'a, + 'v: 'a, +{ + Eval { + eval, + context, + it: None, + } + .condition(&condition.0) + .unwrap_or(false) +} + +pub fn eval_string<'a, 'e, 'v, I, C>( + eval: &'e Evaluator, + dsl: &'v DslString, + context: &'a C, +) -> String +where + 'e: 'a, + 'v: 'a, +{ + dsl.0 + .iter() + .map(|p| match p { + DslPart::String(ref str) => str.to_string(), + DslPart::Ref(ref reference) => { + let mut eval = Eval { + eval, + context, + it: None, + }; + match reference { + CollectionSource::Reference(reference) => eval + .reference(reference) + .unwrap_or_default() + .map(|referenced| { + eval.stringify_intermediate(IntermediateValue::Referenced(referenced)) + }), + CollectionSource::FilterOperator(_) => eval + .collection_source(reference) + .ok() + .unwrap_or_default() + .map(|vec| { + format!( + "[{}]", + vec.iter() + .map(|referenced| eval.stringify_intermediate( + IntermediateValue::Referenced(referenced) + )) + .collect::>() + .join(", ") + ) + }), + } + .unwrap_or("UNDEFINED".to_string()) + } + }) + .collect::>() + .join("") +} diff --git a/live-debugger/src/lib.rs b/live-debugger/src/lib.rs new file mode 100644 index 000000000..79eea4e1b --- /dev/null +++ b/live-debugger/src/lib.rs @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +mod expr_defs; +mod expr_eval; +mod parse_json; +mod parse_json_expr; +mod parse_util; +mod probe_defs; + +pub mod debugger_defs; +pub mod sender; + +pub use expr_eval::*; +pub use parse_json::parse as parse_json; +pub use probe_defs::*; diff --git a/live-debugger/src/parse_json.rs b/live-debugger/src/parse_json.rs new file mode 100644 index 000000000..e6a199fbb --- /dev/null +++ b/live-debugger/src/parse_json.rs @@ -0,0 +1,216 @@ +use anyhow::Context; +use crate::expr_defs::{Condition, StringSource, Value}; +use crate::parse_json_expr::{parse_condition, parse_segments, parse_value}; +use crate::parse_util::{get, try_get}; +use crate::{ + Capture, EvaluateAt, FilterList, InBodyLocation, LiveDebuggingData, LogProbe, MetricKind, + MetricProbe, Probe, ProbeCondition, ProbeTarget, ProbeType, ProbeValue, ServiceConfiguration, + SpanDecorationProbe, SpanProbe, SpanProbeDecoration, SpanProbeTarget, +}; +use json::JsonValue; + +fn parse_string_vec(array: &JsonValue) -> anyhow::Result> { + let mut vec = vec![]; + if !array.is_array() { + anyhow::bail!("Tried to get Vec from non-array"); + } + for value in array.members() { + vec.push(value.as_str().ok_or_else(|| anyhow::format_err!("Failed to get string from array value"))?.to_string()); + } + Ok(vec) +} + +fn parse_probe(parsed: &JsonValue) -> anyhow::Result { + let mut tags = vec![]; + if let Some(json_tags) = try_get(parsed, "tags") { + tags = parse_string_vec(json_tags).context("parsing tags")?; + } + + let target = get(parsed, "where")?; + let lines = if let Some(lines) = try_get(target, "lines") { + parse_string_vec(get(lines, "where").context("parsing lines")?).context("parsing lines")? + } else { + vec![] + }; + + let target_get = |name: &str| -> anyhow::Result> { + try_get(target, name) + .and_then(|v| { + if v.is_null() { + None + } else { + Some(v.as_str().map(ToString::to_string).ok_or_else(|| anyhow::format_err!("Failed getting string for {name}"))) + } + }) + .transpose() + }; + let probe = match get(parsed, "type")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from type"))? { + "METRIC_PROBE" => ProbeType::Metric(MetricProbe { + kind: match get(parsed, "kind")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from METRIC_PROBE.kind"))? { + "COUNT" => MetricKind::Count, + "GAUGE" => MetricKind::Gauge, + "HISTOGRAM" => MetricKind::Histogram, + "DISTRIBUTION" => MetricKind::Distribution, + kind => anyhow::bail!("{kind} is not a valid METRIC_PROBE.kind"), + }, + name: get(parsed, "metricName")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from metricName"))?.to_string(), + value: ProbeValue( + try_get(parsed, "value") + .map(|v| { + if v.is_null() { + Ok(Value::String(StringSource::Null)) + } else { + parse_value(v) + } + }) + .transpose()? + .unwrap_or(Value::String(StringSource::Null)), + ), + }), + "LOG_PROBE" => ProbeType::Log(LogProbe { + segments: parse_segments(get(parsed, "segments")?).context("while parsing LOG_PROBE.segments")?, + when: ProbeCondition( + try_get(parsed, "when") + .map(|v| parse_condition(v).context("while parsing LOG_PROBE.when")) + .unwrap_or(Ok(Condition::Always))?, + ), + capture: { + let mut capture = Capture::default(); + if let Some(v) = try_get(parsed, "capture") { + if !v.is_null() { + if let Some(max_reference_depth) = try_get(v, "maxReference_depth") { + capture.max_reference_depth = max_reference_depth.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxReferenceDepth"))?; + } + if let Some(max_collection_size) = try_get(v, "maxCollectionSize") { + capture.max_collection_size = max_collection_size.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxCollectionSize"))?; + } + if let Some(max_length) = try_get(v, "maxLength") { + capture.max_length = max_length.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxLength"))?; + } + if let Some(max_field_depth) = try_get(v, "maxFieldDepth") { + capture.max_field_depth = max_field_depth.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxFieldDepth"))?; + } + } + } + capture + }, + sampling_snapshots_per_second: try_get(parsed, "sampling") + .and_then(|v| { + if v.is_null() { + None + } else { + Some(v.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.sampling"))) + } + }) + .transpose()? + .unwrap_or(5000), + }), + "SPAN_PROBE" => ProbeType::Span(SpanProbe {}), + "SPAN_DECORATION_PROBE" => ProbeType::SpanDecoration(SpanDecorationProbe { + target: match try_get(parsed, "targetSpan").map_or(Ok("ACTIVE"), |v| v.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from SPAN_DECORATION_PROBE.targetSpan")))? { + "ACTIVE" => SpanProbeTarget::Active, + "ROOT" => SpanProbeTarget::Root, + target => anyhow::bail!("{target} is not a valid SPAN_DECORATION_PROBE.targetSpan"), + }, + decorations: { + let mut vec = vec![]; + let decorations = get(parsed, "decorations").context("on SPAN_DECORATIONS_PROBE")?; + if !decorations.is_array() { + anyhow::bail!("SPAN_DECORATIONS_PROBE.decorations is not an array"); + } + for decoration in decorations.members() { + let tags = get(decoration, "tags").context("on SPAN_DECORATIONS_PROBE.decorations")?; + if !tags.is_array() { + anyhow::bail!("SPAN_DECORATIONS_PROBE.decorations.tags is not an array"); + } + let mut tagvec = vec![]; + for tag in tags.members() { + let name = get(tag, "name").context("on SPAN_DECORATIONS_PROBE.decorations[].tags[]")?.as_str().ok_or_else(|| anyhow::format_err!("SPAN_DECORATIONS_PROBE.decorations.tags[].name is not a string"))?.to_string(); + let value = parse_segments(get(tag, "value")?).context("while parsing SPAN_DECORATIONS_PROBE.decorations[].tags[].value")?; + tagvec.push((name, value)); + } + let condition = try_get(decoration, "when") + .map(|v| { + if v.is_null() { + Ok(Condition::Always) + } else { + parse_condition(v).context("parsing the condition of SPAN_DECORATION_PROBE.decorations[].when") + } + }) + .transpose()? + .unwrap_or(Condition::Always); + vec.push(SpanProbeDecoration { + condition: ProbeCondition(condition), + tags: tagvec, + }); + } + vec + }, + }), + r#type => anyhow::bail!("Unknown probe type {type}"), + }; + + Ok(Probe { + id: get(parsed, "id")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from id"))?.into(), + version: get(parsed, "version")?.as_u64().unwrap_or(0), + language: get(parsed, "language")?.as_str().map(ToString::to_string), + tags, + target: ProbeTarget { + type_name: target_get("typeName")?, + method_name: target_get("methodName")?, + source_file: target_get("sourceFile")?, + signature: target_get("signature")?, + lines, + in_body_location: match target_get("inBodyLocation")? { + None => InBodyLocation::None, + Some(string) => match string.as_str() { + "START" => InBodyLocation::Start, + "END" => InBodyLocation::End, + location => anyhow::bail!("{location} is not a valid inBodyLocation"), + }, + }, + }, + evaluate_at: match get(parsed, "evaluateAt")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from evaluateAt"))? { + "ENTRY" => EvaluateAt::Entry, + "EXIT" => EvaluateAt::Exit, + eval_at => anyhow::bail!("{eval_at} is not a valid evaluateAt"), + }, + probe, + }) +} + +fn parse_service_configuration(parsed: &JsonValue) -> anyhow::Result { + fn parse_filter_list(parsed: &JsonValue, key: &str) -> anyhow::Result { + let f = get(parsed, key)?; + Ok(FilterList { + package_prefixes: try_get(f, "packagePrefixes") + .map_or(Ok(vec![]), parse_string_vec).map_err(|e| e.context(format!("while parsing {key}.packagePrefixes")))?, + classes: try_get(f, "classes").map_or(Ok(vec![]), parse_string_vec) + .map_err(|e| e.context(format!("while parsing {key}.classes")))?, + }) + } + + Ok(ServiceConfiguration { + id: get(parsed, "id")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from id"))?.into(), + allow: parse_filter_list(parsed, "allowList")?, + deny: parse_filter_list(parsed, "denyList")?, + sampling_snapshots_per_second: try_get(parsed, "sampling") + .and_then(|v| { + if v.is_null() { + None + } else { + Some(v.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from sampling"))) + } + }) + .transpose()? + .unwrap_or(5000), + }) +} + +pub fn parse(json: &str) -> anyhow::Result { + let parsed = json::parse(json)?; + Ok(match get(&parsed, "type")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from type"))? { + "SERVICE_CONFIGURATION" => LiveDebuggingData::Probe(parse_probe(&parsed).context("while parsing probe")?), + _ => LiveDebuggingData::ServiceConfiguration(parse_service_configuration(&parsed).context("While parsing service configuration")?), + }) +} diff --git a/live-debugger/src/parse_json_expr.rs b/live-debugger/src/parse_json_expr.rs new file mode 100644 index 000000000..5ae19f772 --- /dev/null +++ b/live-debugger/src/parse_json_expr.rs @@ -0,0 +1,211 @@ +use anyhow::Context; +use crate::expr_defs::{ + BinaryComparison, CollectionMatch, CollectionSource, Condition, DslPart, NumberSource, + Reference, StringComparison, StringSource, Value, +}; +use crate::parse_util::try_get; +use crate::DslString; +use json::JsonValue; + +fn try_parse_string_value(json: &JsonValue) -> anyhow::Result> { + if let Some(substring) = try_get(json, "substring") { + if substring.is_array() && substring.len() == 3 { + return Ok(Some(StringSource::Substring(Box::new(( + parse_string_value(&substring[0]).context("while parsing source string for substring")?, + parse_number_value(&substring[1]).context("while parsing number for substring")?, + parse_number_value(&substring[2]).context("while parsing number for substring")?, + ))))); + } + } + if json.is_string() { + return Ok(Some(StringSource::String(json.as_str().unwrap().into()))); + } + if json.is_null() { + return Ok(Some(StringSource::Null)); + } + Ok(try_parse_reference(json).context("while parsing string reference")?.map(StringSource::Reference)) +} + +fn parse_string_value(json: &JsonValue) -> anyhow::Result { + try_parse_string_value(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a string value")) +} + +fn try_parse_number_value(json: &JsonValue) -> anyhow::Result> { + if let Some(reference) = try_get(json, "len") { + return Ok(Some(NumberSource::StringLength(parse_reference(reference).context("while parsing reference for len operation")?))); + } + if let Some(reference) = try_get(json, "count") { + return Ok(Some(NumberSource::CollectionSize(parse_collection_source( + reference, + ).context("while parsing collection for size operation")?))); + } + if json.is_number() { + return Ok(Some(NumberSource::Number(json.as_number().unwrap().into()))); + } + Ok(try_parse_reference(json).context("while parsing number reference")?.map(NumberSource::Reference)) +} + +fn parse_number_value(json: &JsonValue) -> anyhow::Result { + try_parse_number_value(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a number")) +} + +fn try_parse_reference(json: &JsonValue) -> anyhow::Result> { + if let Some(identifier) = try_get(json, "ref") { + return Ok(Some(Reference::Base(identifier.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from ref"))?.into()))); + } + if let Some(index) = try_get(json, "index") { + if index.is_array() && index.len() == 2 { + return Ok(Some(Reference::Index(Box::new(( + parse_collection_source(&index[0]).context("while parsing collection for index operation")?, + parse_value(&index[1]).context("while parsing index for index operation")?, + ))))); + } + } + if let Some(index) = try_get(json, "nested") { + if index.is_array() && index.len() == 2 { + return Ok(Some(Reference::Nested(Box::new(( + parse_reference(&index[0]).context("while parsing reference for nested operation")?, + parse_value(&index[1]).context("while parsing key for nested operation")?, + ))))); + } + } + Ok(None) +} + +fn parse_reference(json: &JsonValue) -> anyhow::Result { + try_parse_reference(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a reference")) +} + +pub fn parse_value(json: &JsonValue) -> anyhow::Result { + Ok(if let Some(str) = try_parse_string_value(json)? { + Value::String(str) + } else if let Some(num) = try_parse_number_value(json)? { + Value::Number(num) + } else { + Value::Bool(Box::new(parse_condition(json).context("while parsing arbitrary value")?)) + }) +} + +pub fn parse_condition(json: &JsonValue) -> anyhow::Result { + for (key, comparer) in [ + ("eq", BinaryComparison::Equals), + ("ne", BinaryComparison::NotEquals), + ("gt", BinaryComparison::GreaterThan), + ("ge", BinaryComparison::GreaterOrEquals), + ("lt", BinaryComparison::LessThan), + ("le", BinaryComparison::LessOrEquals), + ] { + if let Some(args) = try_get(json, key) { + if args.is_array() && args.len() == 2 { + return Ok(Condition::BinaryComparison( + parse_value(&args[0]).context("while parsing lhs of binary comparison")?, + comparer, + parse_value(&args[1]).context("while parsing rhs of binary comparison")?, + )); + } + } + } + + if let Some(args) = try_get(json, "and") { + if args.is_array() && args.len() == 2 { + return Ok(Condition::Disjunction(Box::new(( + parse_condition(&args[0]).context("while parsing lhs of binary and")?, + parse_condition(&args[1]).context("while parsing rhs of binary and")?, + )))); + } + } + + if let Some(args) = try_get(json, "or") { + if args.is_array() && args.len() == 2 { + return Ok(Condition::Conjunction(Box::new(( + parse_condition(&args[0]).context("while parsing lhs of binary or")?, + parse_condition(&args[1]).context("while parsing rhs of binary or")?, + )))); + } + } + + if let Some(arg) = try_get(json, "not") { + return Ok(Condition::Negation(Box::new(parse_condition(arg).context("while parsing negation")?))); + } + + if let Some(arg) = try_get(json, "isEmpty") { + return Ok(Condition::IsEmptyReference(parse_reference(arg).context("while parsing reference for isEmpty operation")?)); + } + + if let Some(arg) = try_get(json, "isUndefined") { + return Ok(Condition::IsUndefinedReference(parse_reference(arg).context("while parsing reference for isUndefined operation")?)); + } + + for (key, comparer) in [("any", CollectionMatch::Any), ("all", CollectionMatch::All)] { + if let Some(args) = try_get(json, key) { + if args.is_array() && args.len() == 2 { + return Ok(Condition::CollectionMatch( + comparer, + parse_reference(&args[0]).context("while parsing collection reference for collection operation")?, + Box::new(parse_condition(&args[1]).context("while parsing condition for collection operation")?), + )); + } + } + } + + for (key, comparer) in [ + ("startsWith", StringComparison::StartsWith), + ("endsWith", StringComparison::EndsWith), + ("contains", StringComparison::Contains), + ("matches", StringComparison::Matches), + ] { + if let Some(args) = try_get(json, key) { + if args.is_array() && args.len() == 2 && args[1].is_string() { + return Ok(Condition::StringComparison( + comparer, + parse_string_value(&args[0]).context("While parsing string operand for string comparison")?, + args[1].as_str().unwrap().into(), + )); + } + } + } + + if let Some(bool) = json.as_bool() { + return Ok(if bool { + Condition::Always + } else { + Condition::Never + }); + } + + anyhow::bail!("Could not find an appropriate operation for a condition / boolean") +} + +pub fn try_parse_collection_source(json: &JsonValue) -> anyhow::Result> { + if let Some(index) = try_get(json, "filter") { + if index.is_array() && index.len() == 2 { + return Ok(Some(CollectionSource::FilterOperator(Box::new(( + parse_collection_source(&index[0]).context("while parsing collection source for filter operation")?, + parse_condition(&index[1]).context("while parsing condition for collection filter operation")?, + ))))); + } + } + + Ok(try_parse_reference(json)?.map(CollectionSource::Reference)) +} + +fn parse_collection_source(json: &JsonValue) -> anyhow::Result { + try_parse_collection_source(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a collection source")) +} + +pub fn parse_segments(json: &JsonValue) -> anyhow::Result { + if json.is_array() { + let mut vec = vec![]; + for member in json.members() { + if let Some(str) = try_get(member, "str") { + vec.push(DslPart::String(str.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from str in segment parsing"))?.to_string())); + } else if let Some(part) = try_parse_collection_source(member).context("while parsing collection source for segments")? { + vec.push(DslPart::Ref(part)); + } else { + anyhow::bail!("Could not find an appropriate key for segment parsing"); + } + } + return Ok(DslString(vec)); + } + anyhow::bail!("segments is not an array") +} diff --git a/live-debugger/src/parse_util.rs b/live-debugger/src/parse_util.rs new file mode 100644 index 000000000..c4f855e7c --- /dev/null +++ b/live-debugger/src/parse_util.rs @@ -0,0 +1,13 @@ +use json::JsonValue; + +pub fn get<'a>(json: &'a JsonValue, name: &str) -> anyhow::Result<&'a JsonValue> { + try_get(json, name).ok_or_else(|| anyhow::format_err!("Missing key {name}")) +} + +pub fn try_get<'a>(json: &'a JsonValue, name: &str) -> Option<&'a JsonValue> { + if json.has_key(name) { + Some(&json[name]) + } else { + None + } +} diff --git a/live-debugger/src/probe_defs.rs b/live-debugger/src/probe_defs.rs new file mode 100644 index 000000000..726f39d47 --- /dev/null +++ b/live-debugger/src/probe_defs.rs @@ -0,0 +1,134 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use crate::{DslString, ProbeCondition, ProbeValue}; + +#[derive(Debug)] +#[repr(C)] +pub struct Capture { + pub max_reference_depth: u32, + pub max_collection_size: u32, + pub max_length: u32, + pub max_field_depth: u32, +} + +impl Default for Capture { + fn default() -> Self { + Capture { + max_reference_depth: 3, + max_collection_size: 100, + max_length: 255, + max_field_depth: 20, + } + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum MetricKind { + Count, + Gauge, + Histogram, + Distribution, +} + +#[derive(Debug)] +pub struct MetricProbe { + pub kind: MetricKind, + pub name: String, + pub value: ProbeValue, // May be Value::Null +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum SpanProbeTarget { + Active, + Root, +} + +#[derive(Debug)] +pub struct SpanProbeDecoration { + pub condition: ProbeCondition, + pub tags: Vec<(String, DslString)>, +} + +#[derive(Debug)] +pub struct LogProbe { + pub segments: DslString, + pub when: ProbeCondition, + pub capture: Capture, + pub sampling_snapshots_per_second: u32, +} + +#[derive(Debug)] +pub struct SpanProbe {} + +#[derive(Debug)] +pub struct SpanDecorationProbe { + pub target: SpanProbeTarget, + pub decorations: Vec, +} + +#[derive(Debug)] +pub enum ProbeType { + Metric(MetricProbe), + Log(LogProbe), + Span(SpanProbe), + SpanDecoration(SpanDecorationProbe), +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum InBodyLocation { + None, + Start, + End, +} + +#[derive(Debug)] +pub struct ProbeTarget { + pub type_name: Option, + pub method_name: Option, + pub source_file: Option, + pub signature: Option, + pub lines: Vec, + pub in_body_location: InBodyLocation, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum EvaluateAt { + Entry, + Exit, +} + +#[derive(Debug)] +pub struct Probe { + pub id: String, + pub version: u64, + pub language: Option, + pub tags: Vec, + pub target: ProbeTarget, // "where" is rust keyword + pub evaluate_at: EvaluateAt, + pub probe: ProbeType, +} + +#[derive(Debug)] +pub struct FilterList { + pub package_prefixes: Vec, + pub classes: Vec, +} + +#[derive(Debug)] +pub struct ServiceConfiguration { + pub id: String, + pub allow: FilterList, + pub deny: FilterList, + pub sampling_snapshots_per_second: u32, +} + +#[derive(Debug)] +pub enum LiveDebuggingData { + Probe(Probe), + ServiceConfiguration(ServiceConfiguration), +} diff --git a/live-debugger/src/sender.rs b/live-debugger/src/sender.rs new file mode 100644 index 000000000..04c11f574 --- /dev/null +++ b/live-debugger/src/sender.rs @@ -0,0 +1,94 @@ +use std::hash::Hash; +use std::str::FromStr; +use hyper::{Body, Client, Method, Uri}; +use hyper::http::uri::PathAndQuery; +use serde::Serialize; +use uuid::Uuid; +use ddcommon::connector::Connector; +use ddcommon::Endpoint; +use crate::debugger_defs::DebuggerPayload; + +pub const PROD_INTAKE_SUBDOMAIN: &str = "http-intake.logs"; + +const DIRECT_TELEMETRY_URL_PATH: &str = "/v1/input"; +const AGENT_TELEMETRY_URL_PATH: &str = "/debugger/v1/input"; + +#[derive(Default)] +pub struct Config { + pub endpoint: Option, +} + +impl Config { + pub fn set_endpoint(&mut self, mut endpoint: Endpoint) -> anyhow::Result<()> { + let mut uri_parts = endpoint.url.into_parts(); + if uri_parts.scheme.is_some() && uri_parts.scheme.as_ref().unwrap().as_str() != "file" { + uri_parts.path_and_query = Some(PathAndQuery::from_static( + if endpoint.api_key.is_some() { + DIRECT_TELEMETRY_URL_PATH + } else { + AGENT_TELEMETRY_URL_PATH + }, + )); + } + + endpoint.url = Uri::from_parts(uri_parts)?; + self.endpoint = Some(endpoint); + Ok(()) + } +} + +pub fn encode(data: Vec>) -> Vec { + serde_json::to_vec(&data).unwrap() +} + +pub async fn send(payload: &[u8], endpoint: &Endpoint) -> anyhow::Result<()> { + let mut req = hyper::Request::builder() + .header( + hyper::header::USER_AGENT, + concat!("Tracer/", env!("CARGO_PKG_VERSION")), + ) + .header("Content-type", "application/json") + .method(Method::POST); + + let mut url = endpoint.url.clone(); + if endpoint.api_key.is_some() { + // TODO DD-REQUEST-ID header necessary? + req = req.header("DD-EVP-ORIGIN", "agent-debugger"); + let mut parts = url.into_parts(); + let mut query = String::from(parts.path_and_query.unwrap().as_str()); + query.push_str("?ddtags=host:"); + query.push_str(""); // TODO hostname + // TODO container tags and such + parts.path_and_query = Some(PathAndQuery::from_str(&query)?); + url = Uri::from_parts(parts)?; + } + // "env:" + config.getEnv(), + // "version:" + config.getVersion(), + // "debugger_version:" + DDTraceCoreInfo.VERSION, + // "agent_version:" + DebuggerAgent.getAgentVersion(), + // "host_name:" + config.getHostName()); + + // SAFETY: we ensure the reference exists across the request + let req = req.uri(url).body(Body::from(unsafe { std::mem::transmute::<&[u8], &[u8]>(payload) }))?; + + match Client::builder() + .build(Connector::default()) + .request(req) + .await + { + Ok(response) => { + if response.status().as_u16() >= 400 { + let body_bytes = hyper::body::to_bytes(response.into_body()).await?; + let response_body = + String::from_utf8(body_bytes.to_vec()).unwrap_or_default(); + anyhow::bail!("Server did not accept traces: {response_body}"); + } + Ok(()) + } + Err(e) => anyhow::bail!("Failed to send traces: {e}"), + } +} + +pub fn generate_new_id() -> Uuid { + Uuid::new_v4() +} diff --git a/remote-config/Cargo.toml b/remote-config/Cargo.toml new file mode 100644 index 000000000..70f3062a3 --- /dev/null +++ b/remote-config/Cargo.toml @@ -0,0 +1,23 @@ +[package] +edition = "2021" +license = "Apache 2.0" +name = "datadog-remote-config" +version = "0.0.1" + +[dependencies] +anyhow = { version = "1.0" } +ddcommon = { path = "../ddcommon" } +datadog-trace-protobuf = { path = "../trace-protobuf" } +datadog-live-debugger = { path = "../live-debugger" } +hyper = { version = "0.14", features = ["client"], default-features = false } +base64 = "0.21.0" +sha2 = "0.10" +uuid = "1.7.0" +futures-util = "0.3" +tokio = { version = "1.36.0" } +tokio-util = "0.7.10" +manual_future = "0.1.1" +time = { version = "0.3", features = ["parsing", "serde"] } +tracing = { version = "0.1", default-features = false } +serde = "1.0" +serde_json = { version = "1.0", features = ["raw_value"] } diff --git a/remote-config/src/dynamic_configuration/data.rs b/remote-config/src/dynamic_configuration/data.rs new file mode 100644 index 000000000..cee7a536d --- /dev/null +++ b/remote-config/src/dynamic_configuration/data.rs @@ -0,0 +1,50 @@ +use std::collections::HashMap; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +pub struct DynamicConfigTarget { + pub service: String, + pub env: String, +} + +#[derive(Debug, Deserialize)] +pub struct DynamicConfigFile { + pub action: String, + pub service_target: DynamicConfigTarget, + pub lib_config: DynamicConfig, +} + +#[derive(Debug, Deserialize)] +struct TracingHeaderTag { + header: String, + tag_name: String, +} + +#[derive(Debug, Deserialize)] +pub struct DynamicConfig { + tracing_header_tags: Option>, + tracing_sample_rate: Option, + log_injection_enabled: Option, +} + +impl From for Vec { + fn from(value: DynamicConfig) -> Self { + let mut vec = vec![]; + if let Some(tags) = value.tracing_header_tags { + vec.push(Configs::TracingHeaderTags(tags.into_iter().map(|t| (t.header, t.tag_name)).collect())) + } + if let Some(sample_rate) = value.tracing_sample_rate { + vec.push(Configs::TracingSampleRate(sample_rate)); + } + if let Some(log_injection) = value.log_injection_enabled { + vec.push(Configs::LogInjectionEnabled(log_injection)); + } + vec + } +} + +pub enum Configs { + TracingHeaderTags(HashMap), + TracingSampleRate(f64), + LogInjectionEnabled(bool), +} diff --git a/remote-config/src/dynamic_configuration/mod.rs b/remote-config/src/dynamic_configuration/mod.rs new file mode 100644 index 000000000..12e35bbf6 --- /dev/null +++ b/remote-config/src/dynamic_configuration/mod.rs @@ -0,0 +1 @@ +pub mod data; \ No newline at end of file diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs new file mode 100644 index 000000000..5c293b337 --- /dev/null +++ b/remote-config/src/fetch/fetcher.rs @@ -0,0 +1,316 @@ +use std::collections::HashMap; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex, MutexGuard}; +use base64::Engine; +use hyper::http::uri::{PathAndQuery, Scheme}; +use hyper::{Body, Client, StatusCode}; +use sha2::{Digest, Sha256, Sha512}; +use tracing::{debug, trace, warn}; +use datadog_trace_protobuf::remoteconfig::{ClientGetConfigsRequest, ClientGetConfigsResponse, ClientState, ClientTracer, ConfigState, TargetFileHash, TargetFileMeta}; +use ddcommon::{connector, Endpoint}; +use crate::{RemoteConfigPath, Target}; +use crate::targets::TargetsList; + +const PROD_INTAKE_SUBDOMAIN: &str = "config"; + +/// Manages files. +/// Presents store() and update() operations. +/// It is recommended to minimize the overhead of these operations as they +pub trait FileStorage { + type StoredFile; + + /// A new, currently unknown file was received. + fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result>; + + /// A file at a given path was updated (new contents). + fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()>; +} + +/// Fundamental configuration of the RC client, which always must be set. +#[derive(Clone, Hash, Eq, PartialEq)] +pub struct ConfigInvariants { + pub language: String, + pub tracer_version: String, + pub endpoint: Endpoint, +} + +struct StoredTargetFile { + hash: String, + handle: Arc, + state: ConfigState, + meta: TargetFileMeta, +} + +pub struct ConfigFetcherState { + target_files_by_path: Mutex>>, + pub invariants: ConfigInvariants, + endpoint: Endpoint, + pub expire_unused_files: bool, +} + +pub struct ConfigFetcherFilesLock<'a, S> { + inner: MutexGuard<'a, HashMap>>, +} + +impl<'a, S> ConfigFetcherFilesLock<'a, S> { + pub fn expire_file(&mut self, path: &RemoteConfigPath) { + self.inner.remove(&path.to_string()); + } +} + +impl ConfigFetcherState { + pub fn new(invariants: ConfigInvariants) -> Self { + ConfigFetcherState { + target_files_by_path: Default::default(), + endpoint: get_product_endpoint(PROD_INTAKE_SUBDOMAIN, &invariants.endpoint), + invariants, + expire_unused_files: true, + } + } + + /// To remove unused remote files manually. Must not be called when auto expiration is active. + /// Note: careful attention must be paid when using this API in order to not deadlock: + /// - This files_lock() must always be called prior to locking any data structure locked within + /// FileStorage::store(). + /// - Also, files_lock() must not be called from within FileStorage::store(). + pub fn files_lock(&self) -> ConfigFetcherFilesLock { + assert!(!self.expire_unused_files); + ConfigFetcherFilesLock { + inner: self.target_files_by_path.lock().unwrap() + } + } +} + +pub struct ConfigFetcher { + pub file_storage: S, + state: Arc>, + timeout: AtomicU32, + /// Collected interval. May be zero if not provided by the remote config server or fetched yet. + pub interval: AtomicU64, +} + +#[derive(Default)] +pub struct OpaqueState { + client_state: Vec, +} + +impl ConfigFetcher { + pub fn new(file_storage: S, state: Arc>) -> Self { + ConfigFetcher { + file_storage, + state, + timeout: AtomicU32::new(5000), + interval: AtomicU64::new(0), + } + } + + /// Quite generic fetching implementation: + /// - runs a request against the Remote Config Server, + /// - validates the data, + /// - removes unused files + /// - checks if the files are already known, + /// - stores new files, + /// - returns all currently active files. + /// It also makes sure that old files are dropped before new files are inserted. + pub async fn fetch_once( + &mut self, + runtime_id: &str, + target: Arc, + config_id: &str, + last_error: Option, + opaque_state: &mut OpaqueState, + ) -> anyhow::Result>> { + let Target { service, env, app_version } = (*target).clone(); + + let mut cached_target_files = vec![]; + let mut config_states = vec![]; + + for StoredTargetFile { state, meta, .. } in self.state.target_files_by_path.lock().unwrap().values() { + config_states.push(state.clone()); + cached_target_files.push(meta.clone()); + } + + let config_req = ClientGetConfigsRequest { + client: Some(datadog_trace_protobuf::remoteconfig::Client { + state: Some(ClientState { + root_version: 1, + targets_version: 0, + config_states, + has_error: last_error.is_some(), + error: last_error.unwrap_or_default(), + backend_client_state: std::mem::take(&mut opaque_state.client_state), + }), + id: config_id.into(), + // TODO maybe not hardcode requested products? + products: vec!["APM_TRACING".to_string(), "LIVE_DEBUGGING".to_string()], + is_tracer: true, + client_tracer: Some(ClientTracer { + runtime_id: runtime_id.to_string(), + language: self.state.invariants.language.to_string(), + tracer_version: self.state.invariants.tracer_version.clone(), + service, + extra_services: vec![], + env, + app_version, + tags: vec![], + }), + is_agent: false, + client_agent: None, + last_seen: 0, + capabilities: vec![], + }), + cached_target_files, + }; + let json = serde_json::to_string(&config_req)?; + + // TODO: directly talking to datadog endpoint (once signatures are validated) + let req = self.state.endpoint + .into_request_builder(concat!("Sidecar/", env!("CARGO_PKG_VERSION")))?; + let response = Client::builder() + .build(connector::Connector::default()) + .request(req.body(Body::from(json))?) + .await + .map_err(|e| anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)))?; + let status = response.status(); + let body_bytes = hyper::body::to_bytes(response.into_body()).await?; + if status != StatusCode::OK { + let response_body = + String::from_utf8(body_bytes.to_vec()).unwrap_or_default(); + anyhow::bail!("Server did not accept traces: {response_body}"); + } + + // Agent remote config not active or broken or similar + if body_bytes.len() <= 3 { + trace!("Requested remote config, but not active; received: {}", String::from_utf8_lossy(body_bytes.as_ref())); + return Ok(vec![]); + } + + let response: ClientGetConfigsResponse = + serde_json::from_str(&String::from_utf8_lossy(body_bytes.as_ref()))?; + + let decoded_targets = base64::engine::general_purpose::STANDARD.decode(response.targets.as_slice())?; + let targets_list = TargetsList::try_parse(decoded_targets.as_slice()).map_err(|e| anyhow::Error::msg(e).context(format!("Decoded targets reply: {}", String::from_utf8_lossy(decoded_targets.as_slice()))))?; + // TODO: eventually also verify the targets_list.signatures for FIPS compliance. + + opaque_state.client_state = targets_list.signed.custom.opaque_backend_state.as_bytes().to_vec(); + if let Some(interval) = targets_list.signed.custom.agent_refresh_interval { + self.interval.store(interval, Ordering::Relaxed); + } + + trace!("Received remote config of length {}, containing {:?} paths for target {:?}", body_bytes.len(), targets_list.signed.targets.keys().collect::>(), target); + + let incoming_files: HashMap<_, _> = response.target_files.iter().map(|f| (f.path.as_str(), f.raw.as_slice())).collect(); + + // This lock must be held continuously at least between the existence check + // (target_files.get()) and the insertion later on. Makes more sense to just hold it continuously + let mut target_files = self.state.target_files_by_path.lock().unwrap(); + + if self.state.expire_unused_files { + target_files.retain(|k, _| { + targets_list.signed.targets.contains_key(k.as_str()) + }); + } + + for (path, target_file) in targets_list.signed.targets { + fn hash_sha256(v: &[u8]) -> String { format!("{:x}", Sha256::digest(v)) } + fn hash_sha512(v: &[u8]) -> String { format!("{:x}", Sha512::digest(v)) } + let (hasher, hash) = if let Some(sha256) = target_file.hashes.get("sha256") { + (hash_sha256 as fn(&[u8]) -> String, *sha256) + } else if let Some(sha512) = target_file.hashes.get("sha512") { + (hash_sha512 as fn(&[u8]) -> String, *sha512) + } else { + warn!("Found a target file without hashes at path {path}"); + continue; + }; + let handle = if let Some(StoredTargetFile { hash: old_hash, handle, .. }) = target_files.get(path) { + if old_hash == hash { + continue; + } + Some(handle.clone()) + } else { + None + }; + if let Some(raw_file) = incoming_files.get(path) { + if let Ok(decoded) = base64::engine::general_purpose::STANDARD + .decode(raw_file) + { + let computed_hash = hasher(decoded.as_slice()); + if hash != computed_hash { + warn!("Computed hash of file {computed_hash} did not match remote config targets file hash {hash} for path {path}: file: {}", String::from_utf8_lossy(decoded.as_slice())); + continue; + } + + match RemoteConfigPath::try_parse(path) { + Ok(parsed_path) => if let Some(version) = target_file.try_parse_version() { + debug!("Fetched new remote config file at path {path} targeting {target:?}"); + + target_files.insert(path.to_string(), StoredTargetFile { + hash: computed_hash, + state: ConfigState { + id: parsed_path.config_id.to_string(), + version, + product: parsed_path.product.to_string(), + apply_state: 0, + apply_error: "".to_string(), + }, + meta: TargetFileMeta { + path: path.to_string(), + length: decoded.len() as i64, + hashes: target_file.hashes.iter().map(|(algorithm, hash)| TargetFileHash { + algorithm: algorithm.to_string(), + hash: hash.to_string(), + }).collect(), + }, + handle: if let Some(handle) = handle { + self.file_storage.update(&handle, version, decoded)?; + handle + } else { + self.file_storage.store(version, parsed_path, decoded)? + }, + }); + } else { + warn!("Failed parsing version from remote config path {path}"); + }, + Err(e) => { + warn!("Failed parsing remote config path: {path} - {e:?}"); + } + } + } else { + warn!("Failed base64 decoding config for path {path}: {}", String::from_utf8_lossy(raw_file)) + } + } else { + warn!("Found changed config data for path {path}, but no file; existing files: {:?}", incoming_files.keys().collect::>()) + } + } + + let mut configs = Vec::with_capacity(response.client_configs.len()); + for config in response.client_configs.iter() { + if let Some(StoredTargetFile { handle, .. }) = target_files.get(config) { + configs.push(handle.clone()); + } + } + + Ok(configs) + } +} + +fn get_product_endpoint(subdomain: &str, endpoint: &Endpoint) -> Endpoint { + let mut parts = endpoint.url.clone().into_parts(); + if endpoint.api_key.is_some() { + if parts.scheme.is_none() { + parts.scheme = Some(Scheme::HTTPS); + parts.authority = Some( + format!("{}.{}", subdomain, parts.authority.unwrap()) + .parse() + .unwrap(), + ); + } + parts.path_and_query = Some(PathAndQuery::from_static("/api/v0.1/configurations")); + } else { + parts.path_and_query = Some(PathAndQuery::from_static("/v0.7/config")); + } + Endpoint { + url: hyper::Uri::from_parts(parts).unwrap(), + api_key: endpoint.api_key.clone(), + } +} diff --git a/remote-config/src/fetch/mod.rs b/remote-config/src/fetch/mod.rs new file mode 100644 index 000000000..a97f4415e --- /dev/null +++ b/remote-config/src/fetch/mod.rs @@ -0,0 +1,9 @@ +mod fetcher; +mod single; +mod shared; +mod multitarget; + +pub use fetcher::*; +pub use single::*; +pub use shared::*; +pub use multitarget::*; diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs new file mode 100644 index 000000000..f98cdba9c --- /dev/null +++ b/remote-config/src/fetch/multitarget.rs @@ -0,0 +1,379 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use std::collections::hash_map::Entry; +use std::collections::{HashMap, HashSet}; +use std::default::Default; +use std::fmt::Debug; +use std::hash::Hash; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use futures_util::future::Shared; +use futures_util::FutureExt; +use manual_future::ManualFuture; +use tokio::sync::Semaphore; +use tokio::time::Instant; +use tracing::{debug, error, trace}; +use crate::fetch::{ConfigFetcherState, ConfigInvariants, FileStorage, RefcountedFile, RefcountingStorage, SharedFetcher}; +use crate::Target; + +/// MultiTargetFetcher built on a set of SharedFetchers, managing multiple environments and services +/// at once. +/// It is able to keep track of all Target tuples as well as runtime_ids currently active. +/// The implementation chooses an arbitrary runtime id from the set of runtimes which have just a +/// single associated Target. If there is no such runtime id, it uses a synthetic runtime id. +/// This fetcher is designed for use cases with more than one Target tuple associated to a +/// specific runtime id and/or handling hundreds to thousands of different runtime ids with a low +/// amount of actual remote config clients. +pub struct MultiTargetFetcher where S::StoredFile: RefcountedFile + Sync + Send, S: MultiTargetHandlers { + /// All runtime ids belonging to a specific target + target_runtimes: Mutex, HashSet>>, + /// Keyed by runtime_id + runtimes: Mutex>>, + pub remote_config_interval: AtomicU32, + /// All services by target in use + services: Mutex, KnownTarget>>, + pending_async_insertions: AtomicU32, + storage: RefcountingStorage, + /// Limit on how many fetchers can be active at once. + /// This functionality is mostly targeted at CLI programs which generally have their file name + /// as the service name. E.g. a phpt testsuite will generate one service for every single file. + /// The remote config backend can only handle a certain amount of services at once. + fetcher_semaphore: Semaphore, +} + +enum KnownTargetStatus { + Pending, + Alive, + RemoveAt(Instant), + Removing(Shared>), +} + +struct KnownTarget { + refcount: u32, + status: Arc>, + synthetic_id: bool, + runtimes: HashSet, + fetcher: Arc, +} + +impl Drop for KnownTarget { + fn drop(&mut self) { + self.fetcher.cancel(); + } +} + +pub trait NotifyTarget: Sync + Send + Sized + Hash + Eq + Clone + Debug { + fn notify(&self); +} + +pub trait MultiTargetHandlers { + fn fetched(&self, target: &Arc, files: &[Arc]) -> (Option, bool); + + fn expired(&self, target: &Arc); + + fn dead(&self); +} + +struct RuntimeInfo { + notify_target: N, + targets: HashMap, u32>, +} + +impl MultiTargetFetcher where S::StoredFile: RefcountedFile + Sync + Send, S: MultiTargetHandlers { + pub const DEFAULT_CLIENTS_LIMIT: u32 = 100; + + pub fn new(storage: S, invariants: ConfigInvariants) -> Arc { + Arc::new(MultiTargetFetcher { + storage: RefcountingStorage::new(storage, ConfigFetcherState::new(invariants)), + target_runtimes: Mutex::new(Default::default()), + runtimes: Mutex::new(Default::default()), + remote_config_interval: AtomicU32::new(5000), + services: Mutex::new(Default::default()), + pending_async_insertions: AtomicU32::new(0), + fetcher_semaphore: Semaphore::new(Self::DEFAULT_CLIENTS_LIMIT as usize), + }) + } + + pub fn is_dead(&self) -> bool { + self.services.lock().unwrap().is_empty() && self.pending_async_insertions.load(Ordering::Relaxed) == 0 + } + + /// Allow for more than DEFAULT_CLIENTS_LIMIT fetchers running simultaneously + pub fn increase_clients_limit(&self, increase: u32) { + self.fetcher_semaphore.add_permits(increase as usize); + } + + fn generate_synthetic_id() -> String { + uuid::Uuid::new_v4().to_string() + } + + fn remove_target(self: &Arc, runtime_id: &str, target: &Arc) { + let mut services = self.services.lock().unwrap(); + // "goto" like handling to drop the known_service borrow and be able to change services + 'service_handling: { + 'drop_service: { + let known_service = services.get_mut(target).unwrap(); + known_service.refcount = if known_service.refcount == 1 { + known_service.runtimes.remove(runtime_id); + let mut status = known_service.status.lock().unwrap(); + *status = match *status { + KnownTargetStatus::Pending => break 'drop_service, + KnownTargetStatus::Alive => { + KnownTargetStatus::RemoveAt(Instant::now() + Duration::from_secs(3666)) + }, + KnownTargetStatus::RemoveAt(_) | KnownTargetStatus::Removing(_) => unreachable!(), + }; + 0 + } else { + if *known_service.fetcher.runtime_id.lock().unwrap() == runtime_id { + 'changed_rt_id: { + for (id, runtime) in self.runtimes.lock().unwrap().iter() { + if runtime.targets.len() == 1 && runtime.targets.contains_key(target) { + *known_service.fetcher.runtime_id.lock().unwrap() = id.to_string(); + break 'changed_rt_id; + } + } + known_service.synthetic_id = true; + *known_service.fetcher.runtime_id.lock().unwrap() = Self::generate_synthetic_id(); + } + } + known_service.refcount - 1 + }; + break 'service_handling; + } + services.remove(target); + } + + let mut target_runtimes = self.target_runtimes.lock().unwrap(); + if if let Some(target_runtime) = target_runtimes.get_mut(target) { + target_runtime.remove(runtime_id); + target_runtime.is_empty() + } else { + false + } { + target_runtimes.remove(target); + } + } + + fn add_target(self: &Arc, synthetic_id: bool, runtime_id: &str, target: Arc) { + let mut target_runtimes = self.target_runtimes.lock().unwrap(); + match target_runtimes.entry(target.clone()) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => e.insert(HashSet::new()), + }.insert(runtime_id.to_string()); + drop(target_runtimes); // unlock + + let mut services = self.services.lock().unwrap(); + match services.entry(target.clone()) { + Entry::Occupied(mut e) => { + let known_target = &mut e.get_mut(); + if known_target.refcount == 0 { + let mut status = known_target.status.lock().unwrap(); + match *status { + KnownTargetStatus::RemoveAt(_) => { + *status = KnownTargetStatus::Alive; + known_target.refcount = 1; + if synthetic_id && !known_target.synthetic_id { + known_target.synthetic_id = true; + *known_target.fetcher.runtime_id.lock().unwrap() = Self::generate_synthetic_id(); + } + known_target.runtimes.insert(runtime_id.to_string()); + }, + KnownTargetStatus::Removing(ref future) => { + let future = future.clone(); + // Avoid deadlocking between known_target.status and self.services + self.pending_async_insertions.fetch_add(1, Ordering::AcqRel); + let runtime_id = runtime_id.to_string(); + let this = self.clone(); + tokio::spawn(async move { + future.await; + this.add_target(synthetic_id, runtime_id.as_str(), target); + this.pending_async_insertions.fetch_sub(1, Ordering::AcqRel); + }); + return; + }, + KnownTargetStatus::Alive | KnownTargetStatus::Pending => unreachable!(), + } + } else { + known_target.refcount += 1; + } + if !synthetic_id && known_target.synthetic_id { + known_target.synthetic_id = false; + *known_target.fetcher.runtime_id.lock().unwrap() = runtime_id.into(); + } + }, + Entry::Vacant(e) => { + let runtime_id = if synthetic_id { Self::generate_synthetic_id() } else { runtime_id.into() }; + self.start_fetcher(e.insert(KnownTarget { + refcount: 1, + status: Arc::new(Mutex::new(KnownTargetStatus::Pending)), + synthetic_id, + runtimes: { + let mut set = HashSet::default(); + set.insert(runtime_id.to_string()); + set + }, + fetcher: Arc::new(SharedFetcher::new(target, runtime_id)), + })); + } + } + } + + fn requires_synthetic_id(info: &RuntimeInfo) -> bool { + info.targets.len() > 1 + } + + pub fn add_runtime( + self: &Arc, + runtime_id: String, + notify_target: N, + target: &Arc, + ) { + trace!("Adding remote config runtime: {target:?} with runtime id {runtime_id}"); + match self.runtimes.lock().unwrap().entry(runtime_id) { + Entry::Occupied(mut runtime_entry) => { + let info = runtime_entry.get_mut(); + match info.targets.entry(target.clone()) { + Entry::Occupied(mut e) => *e.get_mut() += 1, + Entry::Vacant(e) => { + e.insert(1); + self.add_target(Self::requires_synthetic_id(info), runtime_entry.key(), target.clone()); + }, + } + } + Entry::Vacant(e) => { + if self.storage.invariants().endpoint.url.scheme().map(|s| s.as_str() != "file") == Some(true) { + let info = RuntimeInfo { + notify_target, + targets: HashMap::from([(target.clone(), 1)]), + }; + self.add_target(Self::requires_synthetic_id(&info), e.key(), target.clone()); + e.insert(info); + } + } + } + } + + pub fn delete_runtime( + self: &Arc, + runtime_id: &str, + target: &Arc, + ) { + trace!("Removing remote config runtime: {target:?} with runtime id {runtime_id}"); + { + let mut runtimes = self.runtimes.lock().unwrap(); + let last_removed = { + let info = match runtimes.get_mut(runtime_id) { + None => return, + Some(i) => i, + }; + match info.targets.entry(target.clone()) { + Entry::Occupied(mut e) => { + if *e.get() == 1 { + e.remove(); + } else { + *e.get_mut() -= 1; + return; + } + } + Entry::Vacant(_) => unreachable!("Missing target runtime"), + } + info.targets.is_empty() + }; + if last_removed { + runtimes.remove(runtime_id); + } + } + Self::remove_target(self, runtime_id, target); + } + + fn start_fetcher(self: &Arc, known_target: &mut KnownTarget) { + let this = self.clone(); + let fetcher = known_target.fetcher.clone(); + let status = known_target.status.clone(); + tokio::spawn(async move { + // Relatively primitive, no prioritization or anything. It is not expected that this + // semaphore is ever awaiting under standard usage. Can be improved if needed, e.g. + // sorted by amount of targets on the outstanding services or similar. + let _semaphore = this.fetcher_semaphore.acquire().await.unwrap(); + { + let mut status = status.lock().unwrap(); + if !matches!(*status, KnownTargetStatus::Pending) { + return; + } + *status = KnownTargetStatus::Alive; + } + + let (remove_future, remove_completer) = ManualFuture::new(); + let shared_future = remove_future.shared(); + + let inner_fetcher = fetcher.clone(); + let inner_this = this.clone(); + fetcher.run(this.storage.clone(), Box::new(move |files| { + { + let mut status = status.lock().unwrap(); + if let KnownTargetStatus::RemoveAt(instant) = *status { + if instant < Instant::now() { + // We need to signal that we're in progress of removing to avoid race conditions + *status = KnownTargetStatus::Removing(shared_future.clone()); + // break here to drop mutex guard and avoid having status and services locked simultaneously + inner_fetcher.cancel(); + return None; + } + } + } // unlock status + + let (error, notify) = inner_this.storage.storage.fetched(&inner_fetcher.target, files); + + if notify { + // notify_targets is Hash + Eq + Clone, allowing us to deduplicate. Also avoid the lock during notifying + let mut notify_targets = HashSet::new(); + if let Some(runtimes) = inner_this.target_runtimes.lock().unwrap().get(&inner_fetcher.target) { + for runtime_id in runtimes { + if let Some(runtime) = inner_this.runtimes.lock().unwrap().get(runtime_id) { + notify_targets.insert(runtime.notify_target.clone()); + } + } + } + + debug!("Notify {:?} about remote config changes", notify_targets); + for notify_target in notify_targets { + notify_target.notify(); + } + } + + error + })).await; + + this.storage.storage.expired(&fetcher.target); + + { // scope lock before await + let mut services = this.services.lock().unwrap(); + services.remove(&fetcher.target); + if services.is_empty() && this.pending_async_insertions.load(Ordering::Relaxed) == 0 { + this.storage.storage.dead(); + } + } + remove_completer.complete(()).await; + }); + } + + pub fn shutdown(&self) { + let services = self.services.lock().unwrap(); + for (target, service) in services.iter() { + let mut status = service.status.lock().unwrap(); + match *status { + KnownTargetStatus::Pending | KnownTargetStatus::Alive => { + error!("Trying to shutdown {:?} while still alive", target); + }, + KnownTargetStatus::RemoveAt(_) => { + *status = KnownTargetStatus::RemoveAt(Instant::now()); + service.fetcher.cancel(); + }, + KnownTargetStatus::Removing(_) => {}, + } + } + } +} diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs new file mode 100644 index 000000000..9c8a09d40 --- /dev/null +++ b/remote-config/src/fetch/shared.rs @@ -0,0 +1,266 @@ +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::time::Duration; +use tokio_util::sync::CancellationToken; +use tracing::error; +use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; +use crate::{RemoteConfigPath, Target}; +use tokio::time::sleep; +use tokio::select; + +/// Fetcher which does a run-loop and carefully manages state around files, with the following +/// guarantees: +/// - A file at a given RemoteConfigPath will not be recreated as long as it exists +/// I.e. it will always be drop()'ed before recreation. +/// - It does not leak files which are no longer in use, i.e. it refcounts across all remote +/// config clients sharing the same RefcountingStorage. +/// - The state is always valid, i.e. there will be no intermittently expired files. +/// +pub struct SharedFetcher { + /// (env, service, version) tuple representing the basic remote config target + pub target: Arc, // could be theoretically also Mutex<>ed if needed + /// A unique runtime id. It must not be used by any other remote config client at the same time. + /// Is allowed to be changed at any time. + pub runtime_id: Arc>, + /// Each fetcher must have an unique id. Defaults to a random UUID. + pub client_id: String, + cancellation: CancellationToken, + /// Interval used if the remote server does not specify a refetch interval + pub default_interval: AtomicU64, +} + +pub struct FileRefcountData { + /// Primary refcounter: + /// - When active (dropped_run_id == 0), the amount of runners holding it since the last + /// remote config fetch. + /// - When inactive (dropped_run_id > 0), the remaining amount of runners actively fetching + /// remote config at the point in time dropped_run_id represents. + rc: AtomicU32, + /// 0, or point in time (see RunnersGeneration) where the file was moved to inactive. + dropped_run_id: AtomicU64, + pub path: RemoteConfigPath, + pub version: u64, +} + +impl FileRefcountData { + pub fn new(version: u64, path: RemoteConfigPath) -> Self { + FileRefcountData { + rc: AtomicU32::new(0), + dropped_run_id: AtomicU64::new(0), + path, + version, + } + } +} + +pub trait RefcountedFile { + fn refcount(&self) -> &FileRefcountData; + + fn incref(&self) -> u32 { + self.refcount().rc.fetch_add(1, Ordering::AcqRel) + } + + fn delref(&self) -> u32 { + self.refcount().rc.fetch_sub(1, Ordering::AcqRel) + } + + fn setref(&self, val: u32) { + self.refcount().rc.store(val, Ordering::SeqCst) + } + + fn set_dropped_run_id(&self, val: u64) { + self.refcount().dropped_run_id.store(val, Ordering::SeqCst) + } + + fn get_dropped_run_id(&self) -> u64 { + self.refcount().dropped_run_id.load(Ordering::Relaxed) + } +} + +#[derive(Default)] +struct RunnersGeneration { + val: AtomicU64, +} + +/// Atomic structure to represent the exact amount of remote config fetching runners at a specific +/// point in time represented by the generation, an integer which is only ever incremented. +/// This data structure helps contain which inactive files are pending deletion. +impl RunnersGeneration { + const RUN_ID_SHIFT: i32 = 20; + + /// Increments run_id and increments active runners. Returns first run_id to watch for. + fn inc_runners(&self) -> u64 { + (self.val.fetch_add((1 << Self::RUN_ID_SHIFT) + 1, Ordering::SeqCst) >> Self::RUN_ID_SHIFT) + 1 + } + + /// Increments run_id and decrements active runners. Returns last run_id to watch for. + fn dec_runners(&self) -> u64 { + self.val.fetch_add((1 << Self::RUN_ID_SHIFT) - 1, Ordering::SeqCst) >> Self::RUN_ID_SHIFT + } + + /// Returns amount of active runners and current run_id. + fn runners_and_run_id(&self) -> (u32, u64) { + let val = self.val.load(Ordering::Acquire); + ((val & ((1 << Self::RUN_ID_SHIFT) - 1)) as u32, val >> Self::RUN_ID_SHIFT) + } +} + +pub struct RefcountingStorage where S::StoredFile: RefcountedFile { + pub storage: S, + state: Arc>, + /// Stores recently expired files. When a file refcount drops to zero, they're no longer sent + /// via the remote config client. However, there may still be in-flight requests, with telling + /// the remote config server that we know about these files. Thus, as long as these requests are + /// being processed, we must retain the files, as these would not be resent, leaving us with a + /// potentially incomplete configuration. + inactive: Arc>>>, + /// times ConfigFetcher::::fetch_once() is currently being run + run_id: Arc, +} + +impl Clone for RefcountingStorage where S::StoredFile: RefcountedFile { + fn clone(&self) -> Self { + RefcountingStorage { + storage: self.storage.clone(), + state: self.state.clone(), + inactive: self.inactive.clone(), + run_id: self.run_id.clone(), + } + } +} + +impl RefcountingStorage where S::StoredFile: RefcountedFile { + pub fn new(storage: S, mut state: ConfigFetcherState) -> Self { + state.expire_unused_files = true; + RefcountingStorage { + storage, + state: Arc::new(state), + inactive: Default::default(), + run_id: Default::default(), + } + } + + fn expire_file(&mut self, file: Arc) { + let mut expire_lock = self.state.files_lock(); + let mut inactive = self.inactive.lock().unwrap(); + if file.refcount().rc.load(Ordering::Relaxed) != 0 { + return; // Don't do anything if refcount was increased while acquiring the lock + } + expire_lock.expire_file(&file.refcount().path); + drop(expire_lock); // early release + let (runners, run_id) = self.run_id.runners_and_run_id(); + if runners > 0 { + file.setref(runners); + file.set_dropped_run_id(run_id); + inactive.insert(file.refcount().path.clone(), file); + } + } + + pub fn invariants(&self) -> &ConfigInvariants { + &self.state.invariants + } +} + +impl FileStorage for RefcountingStorage where S::StoredFile: RefcountedFile { + type StoredFile = S::StoredFile; + + fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + let mut inactive = self.inactive.lock().unwrap(); + if let Some(existing) = inactive.remove(&path) { + if version <= existing.refcount().version { + existing.set_dropped_run_id(0); + existing.setref(0); + } else { + self.storage.update(&existing, version, contents)?; + } + Ok(existing) + } else { + self.storage.store(version, path, contents) + } + } + + fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + self.storage.update(file, version, contents) + } +} + +impl SharedFetcher { + pub fn new(target: Arc, runtime_id: String) -> Self { + SharedFetcher { + target, + runtime_id: Arc::new(Mutex::new(runtime_id)), + client_id: uuid::Uuid::new_v4().to_string(), + cancellation: CancellationToken::new(), + default_interval: AtomicU64::new(5_000_000_000), + } + } + + /// Runs. + /// On successful fetches on_fetch() is called with the new configuration. + /// Should not be called more than once. + #[allow(clippy::type_complexity)] + pub async fn run(&self, storage: RefcountingStorage, on_fetch: Box>) -> Option>) where S::StoredFile: RefcountedFile { + let state = storage.state.clone(); + let mut fetcher = ConfigFetcher::new(storage, state); + + let mut opaque_state = OpaqueState::default(); + + let mut last_files: Vec> = vec![]; + let mut last_error = None; + + loop { + let first_run_id = fetcher.file_storage.run_id.inc_runners(); + + let runtime_id = self.runtime_id.lock().unwrap().clone(); + let fetched = fetcher.fetch_once(runtime_id.as_str(), self.target.clone(), self.client_id.as_str(), last_error.take(), &mut opaque_state).await; + + let last_run_id = fetcher.file_storage.run_id.dec_runners(); + fetcher.file_storage.inactive.lock().unwrap().retain(|_, v| { + (first_run_id..last_run_id).contains(&v.get_dropped_run_id()) && v.delref() == 0 + }); + + match fetched { + Ok(files) => { + for file in files.iter() { + file.incref(); + } + + for file in last_files { + if file.delref() == 0 { + fetcher.file_storage.expire_file(file); + } + } + + last_files = files; + + last_error = on_fetch(&last_files); + } + Err(e) => error!("{:?}", e), + } + + select! { + _ = self.cancellation.cancelled() => { break; } + _ = { + let mut ns = fetcher.interval.load(Ordering::Relaxed); + if ns == 0 { + ns = self.default_interval.load(Ordering::Relaxed); + } + sleep(Duration::from_nanos(ns)) + } => {} + } + } + + for file in last_files { + if file.delref() == 0 { + fetcher.file_storage.expire_file(file); + } + } + } + + /// Note that due to the async logic, a cancellation does not guarantee a strict ordering: + /// A final on_fetch call from within the run() method may happen after the cancellation. + pub fn cancel(&self) { + self.cancellation.cancel(); + } +} diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs new file mode 100644 index 000000000..7cd43d12a --- /dev/null +++ b/remote-config/src/fetch/single.rs @@ -0,0 +1,29 @@ +use std::sync::Arc; +use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; +use crate::Target; + +pub struct SingleFetcher { + fetcher: ConfigFetcher, + target: Arc, + runtime_id: String, + pub config_id: String, + pub last_error: Option, + opaque_state: OpaqueState, +} + +impl SingleFetcher { + pub fn new(sink: S, target: Target, runtime_id: String, invariants: ConfigInvariants) -> Self { + SingleFetcher { + fetcher: ConfigFetcher::new(sink, Arc::new(ConfigFetcherState::new(invariants))), + target: Arc::new(target), + runtime_id, + config_id: uuid::Uuid::new_v4().to_string(), + last_error: None, + opaque_state: OpaqueState::default(), + } + } + + pub async fn fetch_once(&mut self) -> anyhow::Result>> { + self.fetcher.fetch_once(self.runtime_id.as_str(), self.target.clone(), self.config_id.as_str(), self.last_error.take(), &mut self.opaque_state).await + } +} \ No newline at end of file diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs new file mode 100644 index 000000000..4a967efe9 --- /dev/null +++ b/remote-config/src/lib.rs @@ -0,0 +1,17 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +mod parse; +mod targets; +pub mod fetch; +pub mod dynamic_configuration; + +use serde::{Deserialize, Serialize}; +pub use parse::*; + +#[derive(Debug, Deserialize, Serialize, Clone, Hash, Ord, PartialOrd, Eq, PartialEq)] +pub struct Target { + pub service: String, + pub env: String, + pub app_version: String, +} diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs new file mode 100644 index 000000000..82fd15a94 --- /dev/null +++ b/remote-config/src/parse.rs @@ -0,0 +1,114 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use datadog_live_debugger::LiveDebuggingData; +use crate::dynamic_configuration::data::DynamicConfigFile; + +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +pub enum RemoteConfigSource { + Datadog(u64 /* org_id */), + Employee, +} + +#[derive(Copy, Clone, Eq, Hash, PartialEq)] +pub enum RemoteConfigProduct { + ApmTracing, + LiveDebugger, +} + +impl ToString for RemoteConfigProduct { + fn to_string(&self) -> String { + match self { + RemoteConfigProduct::ApmTracing => "APM_TRACING", + RemoteConfigProduct::LiveDebugger => "LIVE_DEBUGGING", + }.to_string() + } +} + +#[derive(Clone, Eq, Hash, PartialEq)] +pub struct RemoteConfigPath { + pub source: RemoteConfigSource, + pub product: RemoteConfigProduct, + pub config_id: String, + pub name: String, +} + +impl RemoteConfigPath { + pub fn try_parse(path: &str) -> anyhow::Result { + let parts: Vec<_> = path.split('/').collect(); + Ok(RemoteConfigPath { + source: match parts[0] { + "datadog" => { + if parts.len() != 5 { + anyhow::bail!("{} is datadog and does not have exactly 5 parts", path); + } + RemoteConfigSource::Datadog(parts[1].parse()?) + } + "employee" => { + if parts.len() != 4 { + anyhow::bail!("{} is employee and does not have exactly 5 parts", path); + } + RemoteConfigSource::Employee + } + source => anyhow::bail!("Unknown source {}", source), + }, + product: match parts[parts.len() - 3] { + "APM_TRACING" => RemoteConfigProduct::ApmTracing, + "LIVE_DEBUGGING" => RemoteConfigProduct::LiveDebugger, + product => anyhow::bail!("Unknown product {}", product), + }, + config_id: parts[parts.len() - 2].to_string(), + name: parts[parts.len() - 1].to_string(), + }) + } +} + +impl ToString for RemoteConfigPath { + fn to_string(&self) -> String { + match self.source { + RemoteConfigSource::Datadog(id) => format!("datadog/{}/{}/{}/{}", id, self.product.to_string(), self.config_id, self.name), + RemoteConfigSource::Employee => format!("employee/{}/{}/{}", self.product.to_string(), self.config_id, self.name), + } + } +} + +#[derive(Debug)] +pub enum RemoteConfigData { + DynamicConfig(DynamicConfigFile), + LiveDebugger(LiveDebuggingData), +} + +impl RemoteConfigData { + pub fn try_parse(product: RemoteConfigProduct, data: &[u8]) -> anyhow::Result { + Ok(match product { + RemoteConfigProduct::ApmTracing => { + RemoteConfigData::DynamicConfig(serde_json::from_slice(data)?) + }, + RemoteConfigProduct::LiveDebugger => { + let parsed = datadog_live_debugger::parse_json(&String::from_utf8_lossy(data))?; + RemoteConfigData::LiveDebugger(parsed) + } + }) + } +} + +#[derive(Debug)] +pub struct RemoteConfigValue { + pub source: RemoteConfigSource, + pub data: RemoteConfigData, + pub config_id: String, + pub name: String, +} + +impl RemoteConfigValue { + pub fn try_parse(path: &str, data: &[u8]) -> anyhow::Result { + let path = RemoteConfigPath::try_parse(path)?; + let data = RemoteConfigData::try_parse(path.product, data)?; + Ok(RemoteConfigValue { + source: path.source, + data, + config_id: path.config_id, + name: path.name, + }) + } +} diff --git a/remote-config/src/targets.rs b/remote-config/src/targets.rs new file mode 100644 index 000000000..e9abc8bbc --- /dev/null +++ b/remote-config/src/targets.rs @@ -0,0 +1,55 @@ +use std::collections::HashMap; +use std::str::FromStr; +use serde::Deserialize; +use serde_json::value::RawValue; +use time::OffsetDateTime; + +#[derive(Deserialize)] +pub struct TargetsList<'a> { + #[serde(borrow)] + pub signatures: Vec>, + pub signed: TargetsData<'a>, +} + +#[derive(Deserialize)] +pub struct TargetsSignature<'a> { + pub keyid: &'a str, + pub sig: &'a str, +} + +#[derive(Deserialize)] +pub struct TargetsData<'a> { + pub _type: &'a str, + pub custom: TargetsCustom<'a>, + #[serde(with = "time::serde::iso8601")] + pub expires: OffsetDateTime, + pub spec_version : &'a str, + pub targets: HashMap<&'a str, TargetData<'a>>, + pub version: i64, +} + +#[derive(Deserialize)] +pub struct TargetsCustom<'a> { + pub agent_refresh_interval: Option, + pub opaque_backend_state: &'a str, +} + +#[derive(Deserialize)] +pub struct TargetData<'a> { + #[serde(borrow)] + pub custom: HashMap<&'a str, &'a RawValue>, + pub hashes: HashMap<&'a str, &'a str>, + pub length: u32, +} + +impl<'a> TargetsList<'a> { + pub fn try_parse(data: &'a [u8]) -> serde_json::error::Result { + serde_json::from_slice(data) + } +} + +impl<'a> TargetData<'a> { + pub fn try_parse_version(&self) -> Option { + self.custom.get("v").and_then(|v| u64::from_str(v.get()).ok()) + } +} \ No newline at end of file diff --git a/sidecar-ffi/Cargo.toml b/sidecar-ffi/Cargo.toml index a014f466e..120c6c5b0 100644 --- a/sidecar-ffi/Cargo.toml +++ b/sidecar-ffi/Cargo.toml @@ -17,6 +17,8 @@ datadog-ipc = { path = "../ipc" } ddcommon = { path = "../ddcommon" } ddcommon-ffi = { path = "../ddcommon-ffi", default-features = false } ddtelemetry-ffi = { path = "../ddtelemetry-ffi", default-features = false } +datadog-remote-config = { path = "../remote-config" } +datadog-live-debugger = { path = "../live-debugger" } paste = "1" libc = "0.2" diff --git a/sidecar-ffi/cbindgen.toml b/sidecar-ffi/cbindgen.toml index 9413fc82f..10d450339 100644 --- a/sidecar-ffi/cbindgen.toml +++ b/sidecar-ffi/cbindgen.toml @@ -34,4 +34,4 @@ must_use = "DDOG_CHECK_RETURN" [parse] parse_deps = true -include = ["ddcommon", "ddtelemetry", "datadog-sidecar", "ddtelemetry-ffi", "ddcommon-ffi", "datadog-ipc"] +include = ["ddcommon", "ddtelemetry", "datadog-sidecar", "ddtelemetry-ffi", "ddcommon-ffi", "datadog-ipc", "datadog-live-debugger"] diff --git a/sidecar-ffi/src/lib.rs b/sidecar-ffi/src/lib.rs index 1ccab1281..f202060b6 100644 --- a/sidecar-ffi/src/lib.rs +++ b/sidecar-ffi/src/lib.rs @@ -33,7 +33,12 @@ use std::os::unix::prelude::FromRawFd; #[cfg(windows)] use std::os::windows::io::{FromRawHandle, RawHandle}; use std::slice; +use std::sync::Arc; use std::time::Duration; +use datadog_live_debugger::debugger_defs::DebuggerPayload; +use datadog_remote_config::fetch::ConfigInvariants; +use datadog_remote_config::Target; +use datadog_sidecar::shm_remote_config::RemoteConfigReader; #[repr(C)] pub struct NativeFile { @@ -189,6 +194,42 @@ pub extern "C" fn ddog_agent_remote_config_reader_drop(_: Box>) { } +#[no_mangle] +#[allow(clippy::missing_safety_doc)] +pub unsafe extern "C" fn ddog_remote_config_reader_for_endpoint<'a>( + language: &ffi::CharSlice<'a>, + tracer_version: &ffi::CharSlice<'a>, + endpoint: &Endpoint, + service_name: ffi::CharSlice, + env_name: ffi::CharSlice, + app_version: ffi::CharSlice, +) -> Box { + Box::new(RemoteConfigReader::new(&ConfigInvariants { + language: language.to_utf8_lossy().into(), + tracer_version: tracer_version.to_utf8_lossy().into(), + endpoint: endpoint.clone(), + }, &Arc::new(Target { + service: service_name.to_utf8_lossy().into(), + env: env_name.to_utf8_lossy().into(), + app_version: app_version.to_utf8_lossy().into(), + }))) +} + +#[no_mangle] +pub extern "C" fn ddog_remote_config_read<'a>( + reader: &'a mut RemoteConfigReader, + data: &mut ffi::CharSlice<'a>, +) -> bool { + let (new, contents) = reader.read(); + // c_char may be u8 or i8 depending on target... convert it. + let contents: &[c_char] = unsafe { std::mem::transmute::<&[u8], &[c_char]>(contents) }; + *data = contents.into(); + new +} + +#[no_mangle] +pub extern "C" fn ddog_remote_config_reader_drop(_: Box) {} + #[no_mangle] pub extern "C" fn ddog_sidecar_transport_drop(_: Box) {} @@ -375,7 +416,7 @@ pub unsafe extern "C" fn ddog_sidecar_telemetry_flushServiceData( /// Enqueues a list of actions to be performed. #[no_mangle] #[allow(clippy::missing_safety_doc)] -pub unsafe extern "C" fn ddog_sidecar_telemetry_end( +pub unsafe extern "C" fn ddog_sidecar_lifecycle_end( transport: &mut Box, instance_id: &InstanceId, queue_id: &QueueId, @@ -429,7 +470,9 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( session_id: ffi::CharSlice, agent_endpoint: &Endpoint, dogstatsd_endpoint: &Endpoint, - flush_interval_milliseconds: u64, + language: ffi::CharSlice, + tracer_version: ffi::CharSlice, + flush_interval_milliseconds: u32, force_flush_size: usize, force_drop_size: usize, log_level: ffi::CharSlice, @@ -437,11 +480,14 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( ) -> MaybeError { try_c!(blocking::set_session_config( transport, + libc::getpid(), session_id.to_utf8_lossy().into(), &SessionConfig { endpoint: agent_endpoint.clone(), dogstatsd_endpoint: dogstatsd_endpoint.clone(), - flush_interval: Duration::from_millis(flush_interval_milliseconds), + language: language.to_utf8_lossy().into(), + tracer_version: tracer_version.to_utf8_lossy().into(), + flush_interval: Duration::from_millis(flush_interval_milliseconds as u64), force_flush_size, force_drop_size, log_level: log_level.to_utf8_lossy().into(), @@ -449,7 +495,7 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( config::FromEnv::log_method() } else { LogMethod::File(String::from(log_path.to_utf8_lossy()).into()) - } + }, }, )); @@ -536,6 +582,49 @@ pub unsafe extern "C" fn ddog_sidecar_send_trace_v04_bytes( MaybeError::None } +#[no_mangle] +#[allow(clippy::missing_safety_doc)] +#[allow(improper_ctypes_definitions)] // DebuggerPayload is just a pointer, we hide its internals +pub unsafe extern "C" fn ddog_sidecar_send_debugger_data( + transport: &mut Box, + instance_id: &InstanceId, + payloads: Vec> +) -> MaybeError { + if payloads.is_empty() { + return MaybeError::None; + } + + try_c!(blocking::send_debugger_data_shm_vec( + transport, + instance_id, + payloads, + )); + + MaybeError::None +} + +#[no_mangle] +#[allow(clippy::missing_safety_doc)] +pub unsafe extern "C" fn ddog_sidecar_set_remote_config_data( + transport: &mut Box, + instance_id: &InstanceId, + queue_id: &QueueId, + service_name: ffi::CharSlice, + env_name: ffi::CharSlice, + app_version: ffi::CharSlice, +) -> MaybeError { + try_c!(blocking::set_remote_config_data( + transport, + instance_id, + queue_id, + service_name.to_utf8_lossy().into(), + env_name.to_utf8_lossy().into(), + app_version.to_utf8_lossy().into(), + )); + + MaybeError::None +} + /// Dumps the current state of the sidecar. #[no_mangle] #[allow(clippy::missing_safety_doc)] diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index bd445f7c6..41d20fb75 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -21,6 +21,8 @@ ddtelemetry = { path = "../ddtelemetry", features = ["tracing"] } datadog-trace-protobuf = { path = "../trace-protobuf" } datadog-trace-utils = { path = "../trace-utils" } datadog-trace-normalization = { path = "../trace-normalization" } +datadog-remote-config = { path = "../remote-config" } +datadog-live-debugger = { path = "../live-debugger" } futures = { version = "0.3", default-features = false } manual_future = "0.1.1" @@ -34,10 +36,12 @@ datadog-ipc-macros = { path = "../ipc/macros" } rand = "0.8.3" regex = { version = "1" } -serde = { version = "1.0", features = ["derive"] } +serde = { version = "1.0", features = ["derive", "rc"] } serde_with = "3.6.0" bincode = { version = "1.3.3" } +serde_json = "1.0" rmp-serde = "1.1.1" +base64 = "0.21.0" spawn_worker = { path = "../spawn_worker" } zwohash = "0.1.2" sys-info = { version = "0.9.0" } diff --git a/sidecar/src/entry.rs b/sidecar/src/entry.rs index 29d621435..08b985bc3 100644 --- a/sidecar/src/entry.rs +++ b/sidecar/src/entry.rs @@ -98,6 +98,7 @@ where // Await everything else to completion _ = telemetry_handle.await; + server.shutdown(); _ = server.trace_flusher.join().await; Ok(()) diff --git a/sidecar/src/lib.rs b/sidecar/src/lib.rs index f4aee9414..777bd871a 100644 --- a/sidecar/src/lib.rs +++ b/sidecar/src/lib.rs @@ -9,6 +9,7 @@ pub mod entry; pub mod log; pub mod one_way_shared_memory; mod self_telemetry; +pub mod shm_remote_config; pub mod setup; mod tracer; mod watchdog; diff --git a/sidecar/src/one_way_shared_memory.rs b/sidecar/src/one_way_shared_memory.rs index 7382d2bc5..5949e5db0 100644 --- a/sidecar/src/one_way_shared_memory.rs +++ b/sidecar/src/one_way_shared_memory.rs @@ -207,6 +207,18 @@ impl>> OneWayShmWriter { handle.replace(mapped); } + pub fn as_slice(&self) -> &[u8] { + let handle = self.handle.lock().unwrap(); + let mapped = handle.as_ref().unwrap(); + let data = unsafe { &*(mapped.as_slice() as *const [u8] as *const RawData) }; + if data.meta.size > 0 { + let slice = data.as_slice(); + &slice[..slice.len() - 1] // ignore the trailing zero + } else { + b"" + } + } + pub fn size(&self) -> usize { self.handle .lock() diff --git a/sidecar/src/service/blocking.rs b/sidecar/src/service/blocking.rs index adef3699e..9ece9036d 100644 --- a/sidecar/src/service/blocking.rs +++ b/sidecar/src/service/blocking.rs @@ -6,7 +6,7 @@ use super::{ SidecarInterfaceRequest, SidecarInterfaceResponse, }; use crate::dogstatsd::DogStatsDAction; -use datadog_ipc::platform::{Channel, ShmHandle}; +use datadog_ipc::platform::{Channel, FileBackedHandle, ShmHandle}; use datadog_ipc::transport::blocking::BlockingTransport; use std::sync::Mutex; use std::{ @@ -14,6 +14,8 @@ use std::{ io, time::{Duration, Instant}, }; +use std::hash::Hash; +use serde::Serialize; use tracing::info; /// `SidecarTransport` is a wrapper around a BlockingTransport struct from the `datadog_ipc` crate @@ -199,11 +201,13 @@ pub fn register_service_and_flush_queued_actions( /// An `io::Result<()>` indicating the result of the operation. pub fn set_session_config( transport: &mut SidecarTransport, + pid: libc::pid_t, session_id: String, config: &SessionConfig, ) -> io::Result<()> { transport.send(SidecarInterfaceRequest::SetSessionConfig { session_id, + pid, config: config.clone(), }) } @@ -261,6 +265,98 @@ pub fn send_trace_v04_shm( }) } +/// Sends raw data from shared memory to the debugger endpoint. +/// +/// # Arguments +/// +/// * `transport` - The transport used for communication. +/// * `instance_id` - The ID of the instance. +/// * `handle` - The handle to the shared memory. +/// +/// # Returns +/// +/// An `io::Result<()>` indicating the result of the operation. +pub fn send_debugger_data_shm( + transport: &mut SidecarTransport, + instance_id: &InstanceId, + handle: ShmHandle, +) -> io::Result<()> { + transport.send(SidecarInterfaceRequest::SendDebuggerDataShm { + instance_id: instance_id.clone(), + handle, + }) +} + +/// Sends a collection of debugger palyloads to the debugger endpoint. +/// +/// # Arguments +/// +/// * `transport` - The transport used for communication. +/// * `instance_id` - The ID of the instance. +/// * `payloads` - The payloads to be sent +/// +/// # Returns +/// +/// An `anyhow::Result<()>` indicating the result of the operation. +pub fn send_debugger_data_shm_vec( + transport: &mut SidecarTransport, + instance_id: &InstanceId, + payloads: Vec>, +) -> anyhow::Result<()> { + struct SizeCount(usize); + + impl io::Write for SizeCount { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0 += buf.len(); + Ok(buf.len()) + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + let mut size_serializer = serde_json::Serializer::new(SizeCount(0)); + payloads.serialize(&mut size_serializer).unwrap(); + + let mut mapped = ShmHandle::new(size_serializer.into_inner().0)?.map()?; + let mut serializer = serde_json::Serializer::new(mapped.as_slice_mut()); + payloads.serialize(&mut serializer).unwrap(); + + Ok(send_debugger_data_shm(transport, instance_id, mapped.into())?) +} + +/// Sets the state of the current remote config operation. +/// The queue id is shared with telemetry and the associated data will be freed upon a +/// `Lifecycle::Stop` event. +/// +/// # Arguments +/// +/// * `transport` - The transport used for communication. +/// * `instance_id` - The ID of the instance. +/// * `queue_id` - The unique identifier for the action in the queue. +/// * `service_name` - The name of the service. +/// * `env_name` - The name of the environment. +/// * `app_version` - The metadata of the runtime. +/// +/// # Returns +/// +/// An `io::Result<()>` indicating the result of the operation. +pub fn set_remote_config_data( + transport: &mut SidecarTransport, + instance_id: &InstanceId, + queue_id: &QueueId, + service_name: String, + env_name: String, + app_version: String, +) -> io::Result<()> { + transport.send(SidecarInterfaceRequest::SetRemoteConfigData { + instance_id: instance_id.clone(), + queue_id: *queue_id, + service_name, + env_name, + app_version, + }) +} + /// Sends DogStatsD actions. /// /// # Arguments diff --git a/sidecar/src/service/mod.rs b/sidecar/src/service/mod.rs index 6541d84ee..5ee1771f7 100644 --- a/sidecar/src/service/mod.rs +++ b/sidecar/src/service/mod.rs @@ -29,6 +29,7 @@ use sidecar_interface::{SidecarInterface, SidecarInterfaceRequest, SidecarInterf pub mod blocking; mod instance_id; mod queue_id; +mod remote_configs; mod request_identification; mod runtime_info; mod runtime_metadata; @@ -43,6 +44,8 @@ pub(crate) mod tracing; pub struct SessionConfig { pub endpoint: Endpoint, pub dogstatsd_endpoint: Endpoint, + pub language: String, + pub tracer_version: String, pub flush_interval: Duration, pub force_flush_size: usize, pub force_drop_size: usize, diff --git a/sidecar/src/service/remote_configs.rs b/sidecar/src/service/remote_configs.rs new file mode 100644 index 000000000..df5f73e0f --- /dev/null +++ b/sidecar/src/service/remote_configs.rs @@ -0,0 +1,65 @@ +use std::collections::hash_map::Entry; +use std::fmt::{Debug, Formatter}; +use std::sync::{Arc, Mutex}; +use zwohash::HashMap; +use datadog_remote_config::fetch::{ConfigInvariants, NotifyTarget}; +use crate::shm_remote_config::{ShmRemoteConfigs, ShmRemoteConfigsGuard}; + +#[derive(Default, Clone, Hash, Eq, PartialEq)] +pub struct RemoteConfigNotifyTarget { + pub pid: libc::pid_t, + #[cfg(windows)] + // contains address in that process address space of the notification function + pub notify_function: libc::c_void, +} + +impl Debug for RemoteConfigNotifyTarget { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.pid.fmt(f) + } +} + +impl NotifyTarget for RemoteConfigNotifyTarget { + #[cfg(not(windows))] + fn notify(&self) { + unsafe { libc::kill(self.pid, libc::SIGVTALRM) }; + } + + #[cfg(windows)] + fn notify(&self) { + // TODO: CreateRemoteThread -> ddtrace_set_all_thread_vm_interrupt + } +} + +#[derive(Default, Clone)] +pub struct RemoteConfigs(Arc>>>); +pub type RemoteConfigsGuard = ShmRemoteConfigsGuard; + +impl RemoteConfigs { + pub fn add_runtime( + &self, + invariants: ConfigInvariants, + runtime_id: String, + notify_target: RemoteConfigNotifyTarget, + env: String, + service: String, + app_version: String, + ) -> RemoteConfigsGuard { + match self.0.lock().unwrap().entry(invariants) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + let this = self.0.clone(); + let invariants = e.key().clone(); + e.insert(ShmRemoteConfigs::new(invariants.clone(), Box::new(move || { + this.lock().unwrap().remove(&invariants); + }))) + } + }.add_runtime(runtime_id, notify_target, env, service, app_version) + } + + pub fn shutdown(&self) { + for (_, rc) in self.0.lock().unwrap().drain() { + rc.shutdown(); + } + } +} diff --git a/sidecar/src/service/runtime_info.rs b/sidecar/src/service/runtime_info.rs index f0ccd2fc5..73d46a0ab 100644 --- a/sidecar/src/service/runtime_info.rs +++ b/sidecar/src/service/runtime_info.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::service::{ + remote_configs::RemoteConfigsGuard, telemetry::{AppInstance, AppOrQueue}, InstanceId, QueueId, }; @@ -34,6 +35,7 @@ pub(crate) struct RuntimeInfo { pub(crate) apps: Arc>, app_or_actions: Arc>>, #[cfg(feature = "tracing")] + remote_config_guards: Arc>>, pub(crate) instance_id: InstanceId, } @@ -122,6 +124,16 @@ impl RuntimeInfo { pub(crate) fn lock_app_or_actions(&self) -> MutexGuard> { self.app_or_actions.lock().unwrap() } + + /// Locks the remote config guards map and returns a mutable reference to it. + /// + /// # Returns + /// + /// * `MutexGuard>` - A mutable reference to the remote + /// config guards map. + pub(crate) fn lock_remote_config_guards(&self) -> MutexGuard> { + self.remote_config_guards.lock().unwrap() + } } // TODO: APM-1079 - Add unit tests for RuntimeInfo diff --git a/sidecar/src/service/session_info.rs b/sidecar/src/service/session_info.rs index 5900affe2..22591c730 100644 --- a/sidecar/src/service/session_info.rs +++ b/sidecar/src/service/session_info.rs @@ -5,10 +5,12 @@ use std::{ collections::HashMap, sync::{Arc, Mutex, MutexGuard}, }; +use std::sync::atomic::AtomicI32; use futures::future; use tracing::{enabled, info, Level}; +use datadog_remote_config::fetch::ConfigInvariants; use crate::log::{MultiEnvFilterGuard, MultiWriterGuard}; use crate::{dogstatsd, tracer}; @@ -18,16 +20,35 @@ use crate::service::{InstanceId, RuntimeInfo}; /// /// It contains a list of runtimes, session configuration, tracer configuration, and log guards. /// It also has methods to manage the runtimes and configurations. -#[derive(Default, Clone)] +#[derive(Default)] pub(crate) struct SessionInfo { runtimes: Arc>>, pub(crate) session_config: Arc>>, + debugger_config: Arc>, tracer_config: Arc>, dogstatsd: Arc>, + remote_config_invariants: Arc>>, pub(crate) log_guard: Arc, MultiWriterGuard<'static>)>>>, #[cfg(feature = "tracing")] pub(crate) session_id: String, + pub(crate) pid: Arc, +} + +impl Clone for SessionInfo { + fn clone(&self) -> Self { + SessionInfo { + runtimes: self.runtimes.clone(), + session_config: self.session_config.clone(), + debugger_config: self.debugger_config.clone(), + tracer_config: self.tracer_config.clone(), + dogstatsd: self.dogstatsd.clone(), + remote_config_invariants: self.remote_config_invariants.clone(), + log_guard: self.log_guard.clone(), + session_id: self.session_id.clone(), + pid: self.pid.clone(), + } + } } impl SessionInfo { @@ -155,7 +176,27 @@ impl SessionInfo { { f(&mut self.get_dogstatsd()); } + + pub fn get_debugger_config(&self) -> MutexGuard { + self.debugger_config.lock().unwrap() + } + + pub fn modify_debugger_config(&self, mut f: F) + where + F: FnMut(&mut datadog_live_debugger::sender::Config), + { + f(&mut self.get_debugger_config()); + } + + pub fn set_remote_config_invariants(&self, invariants: ConfigInvariants) { + *self.remote_config_invariants.lock().unwrap() = Some(invariants); + } + + pub fn get_remote_config_invariants(&self) -> MutexGuard> { + self.remote_config_invariants.lock().unwrap() + } } + #[cfg(test)] mod tests { use super::*; diff --git a/sidecar/src/service/sidecar_interface.rs b/sidecar/src/service/sidecar_interface.rs index 8b269949c..857b47268 100644 --- a/sidecar/src/service/sidecar_interface.rs +++ b/sidecar/src/service/sidecar_interface.rs @@ -53,8 +53,13 @@ pub trait SidecarInterface { /// # Arguments /// /// * `session_id` - The ID of the session. + /// * `pid` - The pid of the sidecar client. /// * `config` - The configuration to be set. - async fn set_session_config(session_id: String, config: SessionConfig); + async fn set_session_config( + session_id: String, + pid: libc::pid_t, + config: SessionConfig + ); /// Shuts down a runtime. /// @@ -98,6 +103,32 @@ pub trait SidecarInterface { headers: SerializedTracerHeaderTags, ); + /// Transfers raw data to a live-debugger endpoint. + /// + /// # Arguments + /// * `instance_id` - The ID of the instance. + /// * `handle` - The data to send. + async fn send_debugger_data_shm( + instance_id: InstanceId, + #[SerializedHandle] handle: ShmHandle, + ); + + /// Sets contextual data for the remote config client. + /// + /// # Arguments + /// * `instance_id` - The ID of the instance. + /// * `queue_id` - The unique identifier for the trace context. + /// * `service_name` - The name of the service. + /// * `env_name` - The name of the environment. + /// * `app_version` - The application version. + async fn set_remote_config_data( + instance_id: InstanceId, + queue_id: QueueId, + service_name: String, + env_name: String, + app_version: String, + ); + /// Sends DogStatsD actions. /// /// # Arguments diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index d9ac91865..3b8556bed 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -44,6 +44,8 @@ use crate::service::telemetry::enqueued_telemetry_stats::EnqueuedTelemetryStats; use crate::service::tracing::trace_flusher::TraceFlusherStats; use datadog_ipc::platform::FileBackedHandle; use datadog_ipc::tarpc::server::{Channel, InFlightRequest}; +use datadog_remote_config::fetch::ConfigInvariants; +use crate::service::remote_configs::{RemoteConfigNotifyTarget, RemoteConfigs}; type NoResponse = Ready<()>; @@ -86,6 +88,8 @@ pub struct SidecarServer { Arc>>>, /// Keeps track of the number of submitted payloads. pub submitted_payloads: Arc, + /// All remote config handling + remote_configs: RemoteConfigs, } impl SidecarServer { @@ -323,6 +327,13 @@ impl SidecarServer { self.trace_flusher.enqueue(data); } + async fn send_debugger_data(&self, data: &[u8], target: &Endpoint) { + if let Err(e) = datadog_live_debugger::sender::send(data, target).await { + error!("Error sending data to live debugger endpoint: {e:?}"); + debug!("Attempted to send the following payload: {}", String::from_utf8_lossy(data)); + } + } + async fn compute_stats(&self) -> SidecarStats { let mut telemetry_stats_errors = 0; let telemetry_stats = join_all({ @@ -435,6 +446,10 @@ impl SidecarServer { log_writer: MULTI_LOG_WRITER.stats(), } } + + pub fn shutdown(&self) { + self.remote_configs.shutdown(); + } } impl SidecarInterface for SidecarServer { @@ -466,6 +481,7 @@ impl SidecarInterface for SidecarServer { ) }) { entry.remove(); + rt_info.lock_remote_config_guards().remove(&queue_id); } let apps = rt_info.apps.clone(); tokio::spawn(async move { @@ -488,7 +504,16 @@ impl SidecarInterface for SidecarServer { } }, Entry::Vacant(entry) => { - entry.insert(AppOrQueue::Queue(EnqueuedTelemetryData::processed(actions))); + if actions.len() == 1 && matches!( + actions[0], + SidecarAction::Telemetry(TelemetryActions::Lifecycle( + LifecycleAction::Stop + )) + ) { + rt_info.lock_remote_config_guards().remove(&queue_id); + } else { + entry.insert(AppOrQueue::Queue(EnqueuedTelemetryData::processed(actions))); + } } } @@ -552,6 +577,9 @@ impl SidecarInterface for SidecarServer { self.get_runtime(&instance_id) .lock_app_or_actions() .remove(&queue_id); + self.get_runtime(&instance_id) + .lock_remote_config_guards() + .remove(&queue_id); } app.telemetry.send_msgs(actions).await.ok(); @@ -570,9 +598,11 @@ impl SidecarInterface for SidecarServer { self, _: Context, session_id: String, + pid: libc::pid_t, config: SessionConfig, ) -> Self::SetSessionConfigFut { let session = self.get_session(&session_id); + session.pid.store(pid, Ordering::Relaxed); session.modify_telemetry_config(|cfg| { let endpoint = get_product_endpoint(ddtelemetry::config::PROD_INTAKE_SUBDOMAIN, &config.endpoint); @@ -588,6 +618,16 @@ impl SidecarInterface for SidecarServer { session.configure_dogstatsd(|dogstatsd| { dogstatsd.set_endpoint(config.dogstatsd_endpoint.clone()); }); + session.modify_debugger_config(|cfg| { + let endpoint = + get_product_endpoint(datadog_live_debugger::sender::PROD_INTAKE_SUBDOMAIN, &config.endpoint); + cfg.set_endpoint(endpoint).ok(); + }); + session.set_remote_config_invariants(ConfigInvariants { + language: config.language, + tracer_version: config.tracer_version, + endpoint: config.endpoint, + }); self.trace_flusher .interval_ms .store(config.flush_interval.as_millis() as u64, Ordering::Relaxed); @@ -699,6 +739,52 @@ impl SidecarInterface for SidecarServer { no_response() } + type SendDebuggerDataShmFut = NoResponse; + + fn send_debugger_data_shm( + self, + _: Context, + instance_id: InstanceId, + handle: ShmHandle, + ) -> Self::SendDebuggerDataShmFut { + if let Some(endpoint) = self + .get_session(&instance_id.session_id) + .get_debugger_config() + .endpoint + .clone() + { + tokio::spawn(async move { + match handle.map() { + Ok(mapped) => { + self.send_debugger_data(mapped.as_slice(), &endpoint).await; + } + Err(e) => error!("Failed mapping shared trace data memory: {}", e), + } + }); + } + + no_response() + } + + type SetRemoteConfigDataFut = NoResponse; + + fn set_remote_config_data( + self, + _: Context, + instance_id: InstanceId, + queue_id: QueueId, + service_name: String, + env_name: String, + app_version: String, + ) -> Self::SetRemoteConfigDataFut { + let session = self.get_session(&instance_id.session_id); + let notify_target = RemoteConfigNotifyTarget { pid: session.pid.load(Ordering::Relaxed) }; + session.get_runtime(&instance_id.runtime_id).lock_remote_config_guards().insert(queue_id, self.remote_configs + .add_runtime(session.get_remote_config_invariants().as_ref().expect("Expecting remote config invariants to be set early").clone(), instance_id.runtime_id, notify_target, env_name, service_name, app_version)); + + no_response() + } + type SendDogstatsdActionsFut = NoResponse; fn send_dogstatsd_actions( diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs new file mode 100644 index 000000000..aae096f72 --- /dev/null +++ b/sidecar/src/shm_remote_config.rs @@ -0,0 +1,391 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. + +use crate::one_way_shared_memory::{open_named_shm, OneWayShmReader, OneWayShmWriter, ReaderOpener}; +use base64::prelude::BASE64_URL_SAFE_NO_PAD; +use base64::Engine; +use datadog_ipc::platform::{FileBackedHandle, MappedMem, NamedShmHandle}; +use datadog_remote_config::{RemoteConfigPath, RemoteConfigValue, Target}; +use datadog_remote_config::fetch::{ConfigInvariants, FileRefcountData, FileStorage, MultiTargetFetcher, MultiTargetHandlers, NotifyTarget, RefcountedFile}; +use std::cmp::Reverse; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::default::Default; +use std::ffi::CString; +use std::hash::{Hash, Hasher}; +use std::io; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use priority_queue::PriorityQueue; +use tokio::time::Instant; +use tracing::{debug, error, trace, warn}; +use zwohash::{HashSet, ZwoHasher}; +use crate::primary_sidecar_identifier; + +pub struct RemoteConfigWriter(OneWayShmWriter); +pub struct RemoteConfigReader(OneWayShmReader); + +fn path_for_remote_config(id: &ConfigInvariants, target: &Arc) -> CString { + // We need a stable hash so that the outcome is independent of the process + let mut hasher = ZwoHasher::default(); + id.hash(&mut hasher); + target.hash(&mut hasher); + CString::new(format!("/libdatadog-remote-config-{}-{}", primary_sidecar_identifier(), hasher.finish())).unwrap() +} + +impl RemoteConfigReader { + pub fn new(id: &ConfigInvariants, target: &Arc) -> RemoteConfigReader { + let path = path_for_remote_config(id, target); + RemoteConfigReader(OneWayShmReader::new( + open_named_shm(&path).ok(), + path, + )) + } + + pub fn read(&mut self) -> (bool, &[u8]) { + self.0.read() + } +} + +impl RemoteConfigWriter { + pub fn new(id: &ConfigInvariants, target: &Arc) -> io::Result { + Ok(RemoteConfigWriter(OneWayShmWriter::::new( + path_for_remote_config(id, target), + )?)) + } + + pub fn write(&self, contents: &[u8]) { + self.0.write(contents) + } +} + +impl ReaderOpener + for OneWayShmReader +{ + fn open(&self) -> Option> { + open_named_shm(&self.extra).ok() + } +} + +#[derive(Clone)] +struct ConfigFileStorage { + invariants: ConfigInvariants, + /// All writers + writers: Arc, RemoteConfigWriter>>>, + on_dead: Arc>, +} + +struct StoredShmFile { + handle: Mutex, + refcount: FileRefcountData, +} + +impl RefcountedFile for StoredShmFile { + fn refcount(&self) -> &FileRefcountData { + &self.refcount + } +} + +impl FileStorage for ConfigFileStorage { + type StoredFile = StoredShmFile; + + fn store(&self, version: u64, path: RemoteConfigPath, file: Vec) -> anyhow::Result> { + Ok(Arc::new(StoredShmFile { + handle: Mutex::new(store_shm(version, &path, file)?), + refcount: FileRefcountData::new(version, path), + })) + } + + fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + *file.handle.lock().unwrap() = store_shm(version, &file.refcount.path, contents)?; + Ok(()) + } +} + +fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> io::Result { + let name = format!( + "/libdatadog-remote-config-file-{}-{}-{}", + primary_sidecar_identifier(), + version, + BASE64_URL_SAFE_NO_PAD.encode(path.to_string()) + ); + let mut handle = + NamedShmHandle::create(CString::new(name)?, file.len())? + .map()?; + + handle.as_slice_mut().copy_from_slice(file.as_slice()); + + Ok(handle.into()) +} + +impl MultiTargetHandlers for ConfigFileStorage { + fn fetched(&self, target: &Arc, files: &[Arc]) -> (Option, bool) { + let mut writers = self.writers.lock().unwrap(); + let writer = match writers.entry(target.clone()) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => e.insert(match RemoteConfigWriter::new(&self.invariants, target) { + Ok(w) => w, + Err(e) => { + let msg = format!("Failed acquiring a remote config shm writer: {:?}", e); + error!(msg); + return (Some(msg), false); + }, + }), + }; + + let len = files.iter().map(|f| f.handle.lock().unwrap().get_path().len() + 2).sum(); + let mut serialized = Vec::with_capacity(len); + for file in files.iter() { + serialized.extend_from_slice(file.handle.lock().unwrap().get_path()); + serialized.push(b'\n'); + } + + if writer.0.as_slice() != serialized { + writer.write(&serialized); + + debug!("Active configuration files are: {}", String::from_utf8_lossy(&serialized)); + + (None, true) + } else { + (None, false) + } + } + + fn expired(&self, target: &Arc) { + if let Some(writer) = self.writers.lock().unwrap().remove(target) { + // clear to signal it's no longer being fetched + writer.write(&[]); + } + } + + fn dead(&self) { + (self.on_dead)(); + } +} + +pub struct ShmRemoteConfigsGuard { + target: Arc, + runtime_id: String, + remote_configs: ShmRemoteConfigs, +} + +impl Drop for ShmRemoteConfigsGuard { + fn drop(&mut self) { + self.remote_configs.0.delete_runtime(&self.runtime_id, &self.target); + } +} + +#[derive(Clone)] +pub struct ShmRemoteConfigs(Arc>); + +// we collect services per env, so that we always query, for each runtime + env, all the services +// adding runtimes increases amount of services, removing services after a while + +// one request per (runtime_id, RemoteConfigIdentifier) tuple: extra_services are all services pertaining to that env +// refcounting RemoteConfigIdentifier tuples by their unique runtime_id + +impl ShmRemoteConfigs { + pub fn new(invariants: ConfigInvariants, on_dead: Box) -> Self { + let storage = ConfigFileStorage { + invariants: invariants.clone(), + writers: Default::default(), + on_dead: Arc::new(on_dead), + }; + ShmRemoteConfigs(MultiTargetFetcher::new(storage, invariants)) + } + + pub fn is_dead(&self) -> bool { + self.0.is_dead() + } + + pub fn add_runtime( + &self, + runtime_id: String, + notify_target: N, + env: String, + service: String, + app_version: String, + ) -> ShmRemoteConfigsGuard { + let target = Arc::new(Target { + service, + env, + app_version, + }); + self.0.add_runtime(runtime_id.clone(), notify_target, &target); + ShmRemoteConfigsGuard { + target, + runtime_id, + remote_configs: self.clone(), + } + } + + pub fn shutdown(&self) { + self.0.shutdown(); + } +} + +fn read_config(path: &str) -> anyhow::Result { + let mapped = NamedShmHandle::open(&CString::new(path)?)?.map()?; + + if let Some(rc_path) = path.split('-').nth(6) { + let rc_path = String::from_utf8(BASE64_URL_SAFE_NO_PAD.decode(rc_path)?)?; + RemoteConfigValue::try_parse(&rc_path, mapped.as_slice()) + } else { + anyhow::bail!("could not read config; {} has less than six dashes", path); + } +} + +/// Manages configs. +/// Returns changes to configurations. +/// Switching targets is supported; Remove and Add operations will be yielded upon the next +/// fetch_update() call according to the difference. +/// It is guaranteed that no two configurations sharing the same RemoteConfigPath are applied at +/// once. They will always be Remove()d first, then Add()ed again upon update. +pub struct RemoteConfigManager { + invariants: ConfigInvariants, + active_target: Option>, + active_reader: Option, + encountered_targets: HashMap, (RemoteConfigReader, Vec)>, + unexpired_targets: PriorityQueue, Reverse>, + active_configs: HashSet, + last_read_configs: Vec, + check_configs: Vec, +} + +pub enum RemoteConfigUpdate { + None, + Add(RemoteConfigValue), + Remove(RemoteConfigPath), +} + +impl RemoteConfigManager { + pub fn new(invariants: ConfigInvariants) -> RemoteConfigManager { + RemoteConfigManager { + invariants, + active_target: None, + active_reader: None, + encountered_targets: Default::default(), + unexpired_targets: Default::default(), + active_configs: Default::default(), + last_read_configs: Default::default(), + check_configs: vec![], + } + } + + /// Polls one configuration change. + /// Has to be polled repeatedly until None is returned. + pub fn fetch_update(&mut self) -> RemoteConfigUpdate { + if let Some(ref target) = self.active_target { + let reader = self.active_reader.get_or_insert_with(|| RemoteConfigReader::new(&self.invariants, target)); + + let (changed, data) = reader.read(); + if changed { + 'fetch_new: { + let mut configs = vec![]; + if !data.is_empty() { + let mut i = 0; + let mut start = 0; + while i < data.len() { + if data[i] == b'\n' { + match std::str::from_utf8(&data[start..i]) { + Ok(s) => configs.push(s.to_string()), + Err(e) => { + warn!("Failed reading received configurations {e:?}"); + break 'fetch_new; + } + } + start = i + 1; + } + i += 1; + } + } + self.last_read_configs = configs; + self.check_configs = self.active_configs.iter().cloned().collect(); + } + + while let Some((_, Reverse(instant))) = self.unexpired_targets.peek() { + if *instant < Instant::now() - Duration::from_secs(3666) { + let (target, _) = self.unexpired_targets.pop().unwrap(); + self.encountered_targets.remove(&target); + } + } + } + } + + while let Some(config) = self.check_configs.pop() { + if !self.last_read_configs.contains(&config) { + trace!("Removing remote config file {config}"); + self.active_configs.remove(&config); + return RemoteConfigUpdate::Remove(RemoteConfigPath::try_parse(&config).unwrap()); + } + } + + while let Some(config) = self.last_read_configs.pop() { + if !self.active_configs.contains(&config) { + match read_config(&config) { + Ok(parsed) => { + trace!("Adding remote config file {config}: {parsed:?}"); + self.active_configs.insert(config); + return RemoteConfigUpdate::Add(parsed); + } + Err(e) => warn!("Failed reading remote config file {config}; skipping: {e:?}"), + } + } + } + + RemoteConfigUpdate::None + } + + fn set_target(&mut self, target: Option>) { + let mut current_configs = std::mem::take(&mut self.last_read_configs); + if let Some(old_target) = std::mem::replace(&mut self.active_target, target) { + if let Some(reader) = self.active_reader.take() { + // Reconstruct currently active configurations + if self.check_configs.is_empty() { + if current_configs.is_empty() { + current_configs = self.active_configs.iter().cloned().collect(); + } else { + let mut pending = self.active_configs.clone(); + for config in current_configs { + pending.insert(config); + } + current_configs = pending.into_iter().collect(); + } + } + self.encountered_targets.insert(old_target.clone(), (reader, current_configs)); + self.unexpired_targets.push(old_target, Reverse(Instant::now())); + } + } + if let Some(ref target) = self.active_target { + if let Some((reader, last_fetch)) = self.encountered_targets.remove(target) { + self.active_reader = Some(reader); + self.last_read_configs = last_fetch; + self.unexpired_targets.remove(target); + } + } + } + + /// Sets the currently active target. + pub fn track_target(&mut self, target: &Arc) { + self.set_target(Some(target.clone())); + self.check_configs = self.active_configs.iter().cloned().collect(); + } + + /// Resets the currently active target. The next configuration change polls will emit Remove() + /// for all current tracked active configurations. + pub fn reset_target(&mut self) { + self.set_target(None); + self.check_configs = self.active_configs.iter().cloned().collect(); + } + + pub fn get_target(&self) -> Option<&Arc> { + self.active_target.as_ref() + } + + /// Resets everything, giving up the target and all tracked state of active configurations. + pub fn reset(&mut self) { + self.set_target(None); + self.check_configs.clear(); + self.active_configs.clear(); + } +} diff --git a/sidecar/src/tracer.rs b/sidecar/src/tracer.rs index 9f6034813..00b137ae1 100644 --- a/sidecar/src/tracer.rs +++ b/sidecar/src/tracer.rs @@ -8,7 +8,10 @@ use std::str::FromStr; #[derive(Default)] pub struct Config { + pub raw_endpoint: Option, pub endpoint: Option, + pub language: String, + pub tracer_version: String, } impl Config { diff --git a/trace-protobuf/build.rs b/trace-protobuf/build.rs index 869e452ab..6eb9df712 100644 --- a/trace-protobuf/build.rs +++ b/trace-protobuf/build.rs @@ -119,6 +119,37 @@ fn generate_protobuf() { "#[serde(rename = \"DBType\")]", ); + config.type_attribute("ClientGetConfigsResponse", "#[derive(Deserialize)]"); + config.type_attribute("File", "#[derive(Deserialize)]"); + config.type_attribute( + "ClientGetConfigsRequest", + "#[derive(Deserialize, Serialize)]", + ); + config.type_attribute("Client", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("ClientState", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("ClientTracer", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("ClientAgent", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("ConfigState", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("TargetFileMeta", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("TargetFileHash", "#[derive(Deserialize, Serialize)]"); + + config.field_attribute("File.raw", "#[serde(with = \"serde_bytes\")]"); + config.field_attribute( + "ClientGetConfigsResponse.roots", + "#[serde(with = \"crate::serde\")]", + ); + config.field_attribute( + "ClientGetConfigsResponse.targets", + "#[serde(with = \"serde_bytes\")]", + ); + config.field_attribute("ClientGetConfigsResponse.targets", "#[serde(default)]"); + config.field_attribute("ClientGetConfigsResponse.roots", "#[serde(default)]"); + config.field_attribute("ClientGetConfigsResponse.target_files", "#[serde(default)]"); + config.field_attribute( + "ClientGetConfigsResponse.client_configs", + "#[serde(default)]", + ); + config .compile_protos( &[ @@ -126,6 +157,7 @@ fn generate_protobuf() { "src/pb/tracer_payload.proto", "src/pb/span.proto", "src/pb/stats.proto", + "src/pb/remoteconfig.proto", ], &["src/pb/"], ) @@ -133,10 +165,12 @@ fn generate_protobuf() { // add license, serde imports, custom deserializer code to the top of the protobuf rust structs // file - let add_to_top = "// Copyright 2023-Present Datadog, Inc. https://www.datadoghq.com/ + let license = "// Copyright 2023-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use serde::{Deserialize, Deserializer, Serialize}; +".as_bytes(); + + let null_deser = &[license, "use serde::{Deserialize, Deserializer, Serialize}; fn deserialize_null_into_default<'de, D, T>(deserializer: D) -> Result where @@ -152,9 +186,15 @@ pub fn is_default(t: &T) -> bool { } " - .as_bytes(); + .as_bytes()].concat(); + + let serde_uses = &[license, "use serde::{Deserialize, Serialize}; + +" + .as_bytes()].concat(); - prepend_to_file(add_to_top, &output_path.join("pb.rs")); + prepend_to_file(null_deser, &output_path.join("pb.rs")); + prepend_to_file(serde_uses, &output_path.join("remoteconfig.rs")); } #[cfg(feature = "generate-protobuf")] diff --git a/trace-protobuf/src/lib.rs b/trace-protobuf/src/lib.rs index 8bf9c14fd..c393294d3 100644 --- a/trace-protobuf/src/lib.rs +++ b/trace-protobuf/src/lib.rs @@ -2,7 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 #[rustfmt::skip] +mod serde; pub mod pb; +pub mod remoteconfig; #[cfg(test)] mod pb_test; diff --git a/trace-protobuf/src/pb/remoteconfig.proto b/trace-protobuf/src/pb/remoteconfig.proto new file mode 100644 index 000000000..a78a4e8cb --- /dev/null +++ b/trace-protobuf/src/pb/remoteconfig.proto @@ -0,0 +1,178 @@ +syntax = "proto3"; + +package remoteconfig; + +option go_package = "pkg/proto/pbgo/core"; // golang + +// Backend definitions + +message ConfigMetas { + repeated TopMeta roots = 1; + TopMeta timestamp = 2; + TopMeta snapshot = 3; + TopMeta topTargets = 4; + repeated DelegatedMeta delegatedTargets = 5; +} + +message DirectorMetas { + repeated TopMeta roots = 1; + TopMeta timestamp = 2; + TopMeta snapshot = 3; + TopMeta targets = 4; +} + +message DelegatedMeta { + uint64 version = 1; + string role = 2; + bytes raw = 3; +} + +message TopMeta { + uint64 version = 1; + bytes raw = 2; +} + +message File { + string path = 1; + bytes raw = 2; +} + +// Backend queries + +message LatestConfigsRequest { + string hostname = 1; + string agentVersion = 2; + // timestamp and snapshot versions move in tandem so they are the same. + uint64 current_config_snapshot_version = 3; + uint64 current_config_root_version = 9; + uint64 current_director_root_version = 8; + repeated string products = 4; + repeated string new_products = 5; + repeated Client active_clients = 6; + bytes backend_client_state = 10; + bool has_error = 11; + string error = 12; + string trace_agent_env = 13; + string org_uuid = 14; +} + +message LatestConfigsResponse { + ConfigMetas config_metas = 1; + DirectorMetas director_metas = 2; + repeated File target_files = 3; +} + +message OrgDataResponse { + string uuid = 1; +} + +message OrgStatusResponse { + bool enabled = 1; + bool authorized = 2; +} + +// Client definitions + +message Client { + ClientState state = 1; + string id = 2; + repeated string products = 3; + reserved 4, 5; + bool is_tracer = 6; + ClientTracer client_tracer = 7; + bool is_agent = 8; + ClientAgent client_agent = 9; + uint64 last_seen = 10; + bytes capabilities = 11; +} + +message ClientTracer { + string runtime_id = 1; + string language = 2; + string tracer_version = 3; + string service = 4; + repeated string extra_services = 8; + string env = 5; + string app_version = 6; + repeated string tags = 7; +} + +message ClientAgent { + string name = 1; + string version = 2; + string cluster_name = 3; + string cluster_id = 4; + repeated string cws_workloads = 5; +} + +message ConfigState { + string id = 1; + uint64 version = 2; + string product = 3; + uint64 apply_state = 4; + string apply_error = 5; +} + +message ClientState { + uint64 root_version = 1; + uint64 targets_version = 2; + repeated ConfigState config_states = 3; + bool has_error = 4; + string error = 5; + bytes backend_client_state = 6; +} + +// Client queries + +message TargetFileHash { + string algorithm = 1; + reserved 2; // old hash format + string hash = 3; +} + +message TargetFileMeta { + string path = 1; + int64 length = 2; + repeated TargetFileHash hashes = 3; +} + +message ClientGetConfigsRequest { + Client client = 1; + repeated TargetFileMeta cached_target_files = 2; +} + +message ClientGetConfigsResponse { + repeated bytes roots = 1; + bytes targets = 2; + repeated File target_files = 3; + repeated string client_configs = 4; +} + +// Full state + +message FileMetaState { + uint64 version = 1; + string hash = 2; +} + +message GetStateConfigResponse { + map config_state = 1; + map director_state = 2; + map target_filenames = 3; + repeated Client active_clients = 4; +} + + +message TracerPredicateV1 { + string clientID = 1; + string service = 2; + string environment = 3; + string appVersion = 4; + string tracerVersion = 5; + string language = 6; + string runtimeID = 7; +} + +message TracerPredicates { + repeated TracerPredicateV1 tracer_predicates_v1 = 1; +} diff --git a/trace-protobuf/src/remoteconfig.rs b/trace-protobuf/src/remoteconfig.rs new file mode 100644 index 000000000..6df55ca93 --- /dev/null +++ b/trace-protobuf/src/remoteconfig.rs @@ -0,0 +1,306 @@ +// Copyright 2023-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use serde::{Deserialize, Serialize}; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConfigMetas { + #[prost(message, repeated, tag = "1")] + pub roots: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub timestamp: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub snapshot: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub top_targets: ::core::option::Option, + #[prost(message, repeated, tag = "5")] + pub delegated_targets: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DirectorMetas { + #[prost(message, repeated, tag = "1")] + pub roots: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub timestamp: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub snapshot: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub targets: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DelegatedMeta { + #[prost(uint64, tag = "1")] + pub version: u64, + #[prost(string, tag = "2")] + pub role: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "3")] + pub raw: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TopMeta { + #[prost(uint64, tag = "1")] + pub version: u64, + #[prost(bytes = "vec", tag = "2")] + pub raw: ::prost::alloc::vec::Vec, +} +#[derive(Deserialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct File { + #[prost(string, tag = "1")] + pub path: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "2")] + #[serde(with = "serde_bytes")] + pub raw: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LatestConfigsRequest { + #[prost(string, tag = "1")] + pub hostname: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub agent_version: ::prost::alloc::string::String, + /// timestamp and snapshot versions move in tandem so they are the same. + #[prost(uint64, tag = "3")] + pub current_config_snapshot_version: u64, + #[prost(uint64, tag = "9")] + pub current_config_root_version: u64, + #[prost(uint64, tag = "8")] + pub current_director_root_version: u64, + #[prost(string, repeated, tag = "4")] + pub products: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, repeated, tag = "5")] + pub new_products: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(message, repeated, tag = "6")] + pub active_clients: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "10")] + pub backend_client_state: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "11")] + pub has_error: bool, + #[prost(string, tag = "12")] + pub error: ::prost::alloc::string::String, + #[prost(string, tag = "13")] + pub trace_agent_env: ::prost::alloc::string::String, + #[prost(string, tag = "14")] + pub org_uuid: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LatestConfigsResponse { + #[prost(message, optional, tag = "1")] + pub config_metas: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub director_metas: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub target_files: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OrgDataResponse { + #[prost(string, tag = "1")] + pub uuid: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OrgStatusResponse { + #[prost(bool, tag = "1")] + pub enabled: bool, + #[prost(bool, tag = "2")] + pub authorized: bool, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Client { + #[prost(message, optional, tag = "1")] + pub state: ::core::option::Option, + #[prost(string, tag = "2")] + pub id: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "3")] + pub products: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bool, tag = "6")] + pub is_tracer: bool, + #[prost(message, optional, tag = "7")] + pub client_tracer: ::core::option::Option, + #[prost(bool, tag = "8")] + pub is_agent: bool, + #[prost(message, optional, tag = "9")] + pub client_agent: ::core::option::Option, + #[prost(uint64, tag = "10")] + pub last_seen: u64, + #[prost(bytes = "vec", tag = "11")] + pub capabilities: ::prost::alloc::vec::Vec, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientTracer { + #[prost(string, tag = "1")] + pub runtime_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub language: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub tracer_version: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub service: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "8")] + pub extra_services: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, tag = "5")] + pub env: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub app_version: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "7")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientAgent { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub cluster_name: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub cluster_id: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "5")] + pub cws_workloads: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConfigState { + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub version: u64, + #[prost(string, tag = "3")] + pub product: ::prost::alloc::string::String, + #[prost(uint64, tag = "4")] + pub apply_state: u64, + #[prost(string, tag = "5")] + pub apply_error: ::prost::alloc::string::String, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientState { + #[prost(uint64, tag = "1")] + pub root_version: u64, + #[prost(uint64, tag = "2")] + pub targets_version: u64, + #[prost(message, repeated, tag = "3")] + pub config_states: ::prost::alloc::vec::Vec, + #[prost(bool, tag = "4")] + pub has_error: bool, + #[prost(string, tag = "5")] + pub error: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "6")] + pub backend_client_state: ::prost::alloc::vec::Vec, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TargetFileHash { + #[prost(string, tag = "1")] + pub algorithm: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub hash: ::prost::alloc::string::String, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TargetFileMeta { + #[prost(string, tag = "1")] + pub path: ::prost::alloc::string::String, + #[prost(int64, tag = "2")] + pub length: i64, + #[prost(message, repeated, tag = "3")] + pub hashes: ::prost::alloc::vec::Vec, +} +#[derive(Deserialize, Serialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientGetConfigsRequest { + #[prost(message, optional, tag = "1")] + pub client: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub cached_target_files: ::prost::alloc::vec::Vec, +} +#[derive(Deserialize)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientGetConfigsResponse { + #[prost(bytes = "vec", repeated, tag = "1")] + #[serde(with = "crate::serde")] + #[serde(default)] + pub roots: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(bytes = "vec", tag = "2")] + #[serde(with = "serde_bytes")] + #[serde(default)] + pub targets: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + #[serde(default)] + pub target_files: ::prost::alloc::vec::Vec, + #[prost(string, repeated, tag = "4")] + #[serde(default)] + pub client_configs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FileMetaState { + #[prost(uint64, tag = "1")] + pub version: u64, + #[prost(string, tag = "2")] + pub hash: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetStateConfigResponse { + #[prost(map = "string, message", tag = "1")] + pub config_state: ::std::collections::HashMap< + ::prost::alloc::string::String, + FileMetaState, + >, + #[prost(map = "string, message", tag = "2")] + pub director_state: ::std::collections::HashMap< + ::prost::alloc::string::String, + FileMetaState, + >, + #[prost(map = "string, string", tag = "3")] + pub target_filenames: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + #[prost(message, repeated, tag = "4")] + pub active_clients: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TracerPredicateV1 { + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub service: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub environment: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub app_version: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub tracer_version: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub language: ::prost::alloc::string::String, + #[prost(string, tag = "7")] + pub runtime_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TracerPredicates { + #[prost(message, repeated, tag = "1")] + pub tracer_predicates_v1: ::prost::alloc::vec::Vec, +} diff --git a/trace-protobuf/src/serde.rs b/trace-protobuf/src/serde.rs new file mode 100644 index 000000000..d38dfa8d8 --- /dev/null +++ b/trace-protobuf/src/serde.rs @@ -0,0 +1,34 @@ +use serde::Deserializer; +use serde_bytes::ByteBuf; + +pub trait Deserialize<'de>: Sized { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>; +} + +impl<'de> Deserialize<'de> for Vec> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map(|v: Vec| v.into_iter().map(ByteBuf::into_vec).collect()) + } +} + +impl<'de> Deserialize<'de> for Vec { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + serde::Deserialize::deserialize(deserializer) + } +} + +pub fn deserialize<'de, T, D>(deserializer: D) -> Result + where + T: Deserialize<'de>, + D: Deserializer<'de>, +{ + Deserialize::deserialize(deserializer) +} From 0e0e5cc499f26a935b3170d9983d0557d0f227bf Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 17 May 2024 19:27:13 +0200 Subject: [PATCH 02/26] More dynamic configs, fixes and address several review comments Signed-off-by: Bob Weinand --- .../src/dynamic_configuration/data.rs | 41 ++++- remote-config/src/fetch/fetcher.rs | 41 +++-- remote-config/src/fetch/multitarget.rs | 4 +- remote-config/src/fetch/shared.rs | 3 +- remote-config/src/fetch/single.rs | 2 +- remote-config/src/lib.rs | 36 +++++ remote-config/src/parse.rs | 13 +- sidecar-ffi/cbindgen.toml | 2 +- sidecar-ffi/src/lib.rs | 14 +- sidecar/src/entry.rs | 3 - sidecar/src/service/mod.rs | 3 + sidecar/src/service/sidecar_server.rs | 2 + sidecar/src/shm_remote_config.rs | 34 ++-- trace-protobuf/src/pb/remoteconfig.proto | 89 ---------- trace-protobuf/src/remoteconfig.rs | 152 ------------------ 15 files changed, 156 insertions(+), 283 deletions(-) diff --git a/remote-config/src/dynamic_configuration/data.rs b/remote-config/src/dynamic_configuration/data.rs index cee7a536d..2d5ec1bed 100644 --- a/remote-config/src/dynamic_configuration/data.rs +++ b/remote-config/src/dynamic_configuration/data.rs @@ -1,5 +1,5 @@ use std::collections::HashMap; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize)] pub struct DynamicConfigTarget { @@ -20,11 +20,38 @@ struct TracingHeaderTag { tag_name: String, } +#[derive(Debug, Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum TracingSamplingRuleProvenance { + Customer, + Dynamic, +} + +#[derive(Debug, Deserialize)] +pub struct TracingSamplingRuleTag { + pub key: String, + pub value_glob: String, +} + +#[derive(Debug, Deserialize)] +pub struct TracingSamplingRule { + pub service: String, + pub name: Option, + pub provenance: TracingSamplingRuleProvenance, + pub resource: String, + #[serde(default)] + pub tags: Vec, + pub sample_rate: f64, +} + #[derive(Debug, Deserialize)] pub struct DynamicConfig { tracing_header_tags: Option>, tracing_sample_rate: Option, log_injection_enabled: Option, + tracing_tags: Option>, + tracing_enabled: Option, + tracing_sampling_rules: Option>, } impl From for Vec { @@ -39,6 +66,15 @@ impl From for Vec { if let Some(log_injection) = value.log_injection_enabled { vec.push(Configs::LogInjectionEnabled(log_injection)); } + if let Some(tags) = value.tracing_tags { + vec.push(Configs::TracingTags(tags)); + } + if let Some(enabled) = value.tracing_enabled { + vec.push(Configs::TracingEnabled(enabled)); + } + if let Some(sampling_rules) = value.tracing_sampling_rules { + vec.push(Configs::TracingSamplingRules(sampling_rules)); + } vec } } @@ -47,4 +83,7 @@ pub enum Configs { TracingHeaderTags(HashMap), TracingSampleRate(f64), LogInjectionEnabled(bool), + TracingTags(Vec), // "key:val" format + TracingEnabled(bool), + TracingSamplingRules(Vec), } diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 5c293b337..a4ee10b5b 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -8,14 +8,15 @@ use sha2::{Digest, Sha256, Sha512}; use tracing::{debug, trace, warn}; use datadog_trace_protobuf::remoteconfig::{ClientGetConfigsRequest, ClientGetConfigsResponse, ClientState, ClientTracer, ConfigState, TargetFileHash, TargetFileMeta}; use ddcommon::{connector, Endpoint}; -use crate::{RemoteConfigPath, Target}; +use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; use crate::targets::TargetsList; const PROD_INTAKE_SUBDOMAIN: &str = "config"; -/// Manages files. +/// Manages config files. /// Presents store() and update() operations. -/// It is recommended to minimize the overhead of these operations as they +/// It is recommended to minimize the overhead of these operations as they will be invoked while +/// a lock across all ConfigFetchers referencing the same ConfigFetcherState is held. pub trait FileStorage { type StoredFile; @@ -32,6 +33,8 @@ pub struct ConfigInvariants { pub language: String, pub tracer_version: String, pub endpoint: Endpoint, + pub products: Vec, + pub capabilities: Vec, } struct StoredTargetFile { @@ -112,6 +115,8 @@ impl ConfigFetcher { /// - stores new files, /// - returns all currently active files. /// It also makes sure that old files are dropped before new files are inserted. + /// + /// Returns None if nothing changed. Otherwise Some(active configs). pub async fn fetch_once( &mut self, runtime_id: &str, @@ -119,7 +124,12 @@ impl ConfigFetcher { config_id: &str, last_error: Option, opaque_state: &mut OpaqueState, - ) -> anyhow::Result>> { + ) -> anyhow::Result>>> { + if self.state.endpoint.api_key.is_some() { + // Using remote config talking to the backend directly is not supported. + return Ok(Some(vec![])); + } + let Target { service, env, app_version } = (*target).clone(); let mut cached_target_files = vec![]; @@ -141,8 +151,7 @@ impl ConfigFetcher { backend_client_state: std::mem::take(&mut opaque_state.client_state), }), id: config_id.into(), - // TODO maybe not hardcode requested products? - products: vec!["APM_TRACING".to_string(), "LIVE_DEBUGGING".to_string()], + products: self.state.invariants.products.iter().map(|p| p.to_string()).collect(), is_tracer: true, client_tracer: Some(ClientTracer { runtime_id: runtime_id.to_string(), @@ -157,13 +166,12 @@ impl ConfigFetcher { is_agent: false, client_agent: None, last_seen: 0, - capabilities: vec![], + capabilities: self.state.invariants.capabilities.iter().map(|c| *c as u8).collect(), }), cached_target_files, }; let json = serde_json::to_string(&config_req)?; - // TODO: directly talking to datadog endpoint (once signatures are validated) let req = self.state.endpoint .into_request_builder(concat!("Sidecar/", env!("CARGO_PKG_VERSION")))?; let response = Client::builder() @@ -174,15 +182,21 @@ impl ConfigFetcher { let status = response.status(); let body_bytes = hyper::body::to_bytes(response.into_body()).await?; if status != StatusCode::OK { + // Not active + if status == StatusCode::NOT_FOUND { + trace!("Requested remote config and but remote config not active"); + return Ok(Some(vec![])); + } + let response_body = String::from_utf8(body_bytes.to_vec()).unwrap_or_default(); - anyhow::bail!("Server did not accept traces: {response_body}"); + anyhow::bail!("Server did not accept remote config request: {response_body}"); } - // Agent remote config not active or broken or similar + // Nothing changed if body_bytes.len() <= 3 { - trace!("Requested remote config, but not active; received: {}", String::from_utf8_lossy(body_bytes.as_ref())); - return Ok(vec![]); + trace!("Requested remote config and got an empty reply"); + return Ok(None); } let response: ClientGetConfigsResponse = @@ -190,7 +204,6 @@ impl ConfigFetcher { let decoded_targets = base64::engine::general_purpose::STANDARD.decode(response.targets.as_slice())?; let targets_list = TargetsList::try_parse(decoded_targets.as_slice()).map_err(|e| anyhow::Error::msg(e).context(format!("Decoded targets reply: {}", String::from_utf8_lossy(decoded_targets.as_slice()))))?; - // TODO: eventually also verify the targets_list.signatures for FIPS compliance. opaque_state.client_state = targets_list.signed.custom.opaque_backend_state.as_bytes().to_vec(); if let Some(interval) = targets_list.signed.custom.agent_refresh_interval { @@ -290,7 +303,7 @@ impl ConfigFetcher { } } - Ok(configs) + Ok(Some(configs)) } } diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index f98cdba9c..dad64dcbd 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -315,7 +315,9 @@ impl { let mut status = status.lock().unwrap(); if let KnownTargetStatus::RemoveAt(instant) = *status { - if instant < Instant::now() { + // Voluntarily give up the semaphore for services in RemoveAt status if + // there are only few available permits + if inner_this.fetcher_semaphore.available_permits() < 10 || instant < Instant::now() { // We need to signal that we're in progress of removing to avoid race conditions *status = KnownTargetStatus::Removing(shared_future.clone()); // break here to drop mutex guard and avoid having status and services locked simultaneously diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 9c8a09d40..8af00b0dc 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -221,7 +221,8 @@ impl SharedFetcher { }); match fetched { - Ok(files) => { + Ok(None) => { /* unchanged */ }, + Ok(Some(files)) => { for file in files.iter() { file.incref(); } diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index 7cd43d12a..3bbc2c27b 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -23,7 +23,7 @@ impl SingleFetcher { } } - pub async fn fetch_once(&mut self) -> anyhow::Result>> { + pub async fn fetch_once(&mut self) -> anyhow::Result>>> { self.fetcher.fetch_once(self.runtime_id.as_str(), self.target.clone(), self.config_id.as_str(), self.last_error.take(), &mut self.opaque_state).await } } \ No newline at end of file diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs index 4a967efe9..f7f094d1d 100644 --- a/remote-config/src/lib.rs +++ b/remote-config/src/lib.rs @@ -15,3 +15,39 @@ pub struct Target { pub env: String, pub app_version: String, } + +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum RemoteConfigCapabilities { + AsmActivation = 1, + AsmIpBlocking = 2, + AsmDdRules = 3, + AsmExclusions = 4, + AsmRequestBlocking = 5, + AsmResponseBlocking = 6, + AsmUserBlocking = 7, + AsmCustomRules = 8, + AsmCustomBlockingResponse = 9, + AsmTrustedIps = 10, + AsmApiSecuritySampleRate = 11, + ApmTracingSampleRate = 12, + ApmTracingLogsInjection = 13, + ApmTracingHttpHeaderTags = 14, + ApmTracingCustomTags = 15, + AsmProcessorOverrides = 16, + AsmCustomDataScanners = 17, + AsmExclusionData = 18, + ApmTracingEnabled = 19, + ApmTracingDataStreamsEnabled = 20, + AsmRaspSqli = 21, + AsmRaspLfi = 22, + AsmRaspSsrf = 23, + AsmRaspShi = 24, + AsmRaspXxe = 25, + AsmRaspRce = 26, + AsmRaspNosqli = 27, + AsmRaspXss = 28, + ApmTracingSampleRules = 29, + CsmActivation = 30, +} \ No newline at end of file diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index 82fd15a94..11d41e125 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -1,6 +1,7 @@ // Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +use serde::{Deserialize, Serialize}; use datadog_live_debugger::LiveDebuggingData; use crate::dynamic_configuration::data::DynamicConfigFile; @@ -10,7 +11,8 @@ pub enum RemoteConfigSource { Employee, } -#[derive(Copy, Clone, Eq, Hash, PartialEq)] +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] pub enum RemoteConfigProduct { ApmTracing, LiveDebugger, @@ -92,6 +94,15 @@ impl RemoteConfigData { } } +impl From<&RemoteConfigData> for RemoteConfigProduct { + fn from(value: &RemoteConfigData) -> Self { + match value { + RemoteConfigData::DynamicConfig(_) => RemoteConfigProduct::ApmTracing, + RemoteConfigData::LiveDebugger(_) => RemoteConfigProduct::LiveDebugger, + } + } +} + #[derive(Debug)] pub struct RemoteConfigValue { pub source: RemoteConfigSource, diff --git a/sidecar-ffi/cbindgen.toml b/sidecar-ffi/cbindgen.toml index 10d450339..75568489f 100644 --- a/sidecar-ffi/cbindgen.toml +++ b/sidecar-ffi/cbindgen.toml @@ -34,4 +34,4 @@ must_use = "DDOG_CHECK_RETURN" [parse] parse_deps = true -include = ["ddcommon", "ddtelemetry", "datadog-sidecar", "ddtelemetry-ffi", "ddcommon-ffi", "datadog-ipc", "datadog-live-debugger"] +include = ["ddcommon", "ddtelemetry", "datadog-sidecar", "ddtelemetry-ffi", "ddcommon-ffi", "datadog-ipc", "datadog-live-debugger", "datadog-remote-config"] diff --git a/sidecar-ffi/src/lib.rs b/sidecar-ffi/src/lib.rs index f202060b6..5728ea3b9 100644 --- a/sidecar-ffi/src/lib.rs +++ b/sidecar-ffi/src/lib.rs @@ -37,7 +37,7 @@ use std::sync::Arc; use std::time::Duration; use datadog_live_debugger::debugger_defs::DebuggerPayload; use datadog_remote_config::fetch::ConfigInvariants; -use datadog_remote_config::Target; +use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct, Target}; use datadog_sidecar::shm_remote_config::RemoteConfigReader; #[repr(C)] @@ -203,11 +203,17 @@ pub unsafe extern "C" fn ddog_remote_config_reader_for_endpoint<'a>( service_name: ffi::CharSlice, env_name: ffi::CharSlice, app_version: ffi::CharSlice, + remote_config_products: *const RemoteConfigProduct, + remote_config_products_count: usize, + remote_config_capabilities: *const RemoteConfigCapabilities, + remote_config_capabilities_count: usize, ) -> Box { Box::new(RemoteConfigReader::new(&ConfigInvariants { language: language.to_utf8_lossy().into(), tracer_version: tracer_version.to_utf8_lossy().into(), endpoint: endpoint.clone(), + products: slice::from_raw_parts(remote_config_products, remote_config_products_count).to_vec(), + capabilities: slice::from_raw_parts(remote_config_capabilities, remote_config_capabilities_count).to_vec(), }, &Arc::new(Target { service: service_name.to_utf8_lossy().into(), env: env_name.to_utf8_lossy().into(), @@ -477,6 +483,10 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( force_drop_size: usize, log_level: ffi::CharSlice, log_path: ffi::CharSlice, + remote_config_products: *const RemoteConfigProduct, + remote_config_products_count: usize, + remote_config_capabilities: *const RemoteConfigCapabilities, + remote_config_capabilities_count: usize, ) -> MaybeError { try_c!(blocking::set_session_config( transport, @@ -496,6 +506,8 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( } else { LogMethod::File(String::from(log_path.to_utf8_lossy()).into()) }, + remote_config_products: slice::from_raw_parts(remote_config_products, remote_config_products_count).to_vec(), + remote_config_capabilities: slice::from_raw_parts(remote_config_capabilities, remote_config_capabilities_count).to_vec(), }, )); diff --git a/sidecar/src/entry.rs b/sidecar/src/entry.rs index 08b985bc3..1f4000d0b 100644 --- a/sidecar/src/entry.rs +++ b/sidecar/src/entry.rs @@ -114,9 +114,6 @@ where #[cfg(feature = "tokio-console")] console_subscriber::init(); - #[cfg(unix)] - let mut builder = tokio::runtime::Builder::new_current_thread(); - #[cfg(windows)] let mut builder = tokio::runtime::Builder::new_multi_thread(); let runtime = builder.enable_all().build()?; let _g = runtime.enter(); diff --git a/sidecar/src/service/mod.rs b/sidecar/src/service/mod.rs index 5ee1771f7..d97efb736 100644 --- a/sidecar/src/service/mod.rs +++ b/sidecar/src/service/mod.rs @@ -11,6 +11,7 @@ use ddtelemetry::worker::TelemetryActions; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; +use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct}; // public types we want to bring up to top level of service:: scope pub use instance_id::InstanceId; @@ -51,6 +52,8 @@ pub struct SessionConfig { pub force_drop_size: usize, pub log_level: String, pub log_file: config::LogMethod, + pub remote_config_products: Vec, + pub remote_config_capabilities: Vec, } #[derive(Debug, Deserialize, Serialize)] diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index 3b8556bed..9e30daa3e 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -627,6 +627,8 @@ impl SidecarInterface for SidecarServer { language: config.language, tracer_version: config.tracer_version, endpoint: config.endpoint, + products: config.remote_config_products, + capabilities: config.remote_config_capabilities, }); self.trace_flusher .interval_ms diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index aae096f72..60267bfe0 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -19,7 +19,7 @@ use std::time::Duration; use priority_queue::PriorityQueue; use tokio::time::Instant; use tracing::{debug, error, trace, warn}; -use zwohash::{HashSet, ZwoHasher}; +use zwohash::ZwoHasher; use crate::primary_sidecar_identifier; pub struct RemoteConfigWriter(OneWayShmWriter); @@ -247,7 +247,7 @@ pub struct RemoteConfigManager { active_reader: Option, encountered_targets: HashMap, (RemoteConfigReader, Vec)>, unexpired_targets: PriorityQueue, Reverse>, - active_configs: HashSet, + active_configs: HashMap, last_read_configs: Vec, check_configs: Vec, } @@ -300,7 +300,7 @@ impl RemoteConfigManager { } } self.last_read_configs = configs; - self.check_configs = self.active_configs.iter().cloned().collect(); + self.check_configs = self.active_configs.keys().cloned().collect(); } while let Some((_, Reverse(instant))) = self.unexpired_targets.peek() { @@ -315,17 +315,23 @@ impl RemoteConfigManager { while let Some(config) = self.check_configs.pop() { if !self.last_read_configs.contains(&config) { trace!("Removing remote config file {config}"); - self.active_configs.remove(&config); - return RemoteConfigUpdate::Remove(RemoteConfigPath::try_parse(&config).unwrap()); + if let Some(path) = self.active_configs.remove(&config) { + return RemoteConfigUpdate::Remove(path); + } } } while let Some(config) = self.last_read_configs.pop() { - if !self.active_configs.contains(&config) { + if !self.active_configs.contains_key(&config) { match read_config(&config) { Ok(parsed) => { trace!("Adding remote config file {config}: {parsed:?}"); - self.active_configs.insert(config); + self.active_configs.insert(config, RemoteConfigPath { + source: parsed.source.clone(), + product: (&parsed.data).into(), + config_id: parsed.config_id.clone(), + name: parsed.name.clone(), + }); return RemoteConfigUpdate::Add(parsed); } Err(e) => warn!("Failed reading remote config file {config}; skipping: {e:?}"), @@ -342,15 +348,7 @@ impl RemoteConfigManager { if let Some(reader) = self.active_reader.take() { // Reconstruct currently active configurations if self.check_configs.is_empty() { - if current_configs.is_empty() { - current_configs = self.active_configs.iter().cloned().collect(); - } else { - let mut pending = self.active_configs.clone(); - for config in current_configs { - pending.insert(config); - } - current_configs = pending.into_iter().collect(); - } + current_configs.extend(self.active_configs.keys().cloned()); } self.encountered_targets.insert(old_target.clone(), (reader, current_configs)); self.unexpired_targets.push(old_target, Reverse(Instant::now())); @@ -368,14 +366,14 @@ impl RemoteConfigManager { /// Sets the currently active target. pub fn track_target(&mut self, target: &Arc) { self.set_target(Some(target.clone())); - self.check_configs = self.active_configs.iter().cloned().collect(); + self.check_configs = self.active_configs.keys().cloned().collect(); } /// Resets the currently active target. The next configuration change polls will emit Remove() /// for all current tracked active configurations. pub fn reset_target(&mut self) { self.set_target(None); - self.check_configs = self.active_configs.iter().cloned().collect(); + self.check_configs = self.active_configs.keys().cloned().collect(); } pub fn get_target(&self) -> Option<&Arc> { diff --git a/trace-protobuf/src/pb/remoteconfig.proto b/trace-protobuf/src/pb/remoteconfig.proto index a78a4e8cb..7b1aa7f89 100644 --- a/trace-protobuf/src/pb/remoteconfig.proto +++ b/trace-protobuf/src/pb/remoteconfig.proto @@ -6,71 +6,11 @@ option go_package = "pkg/proto/pbgo/core"; // golang // Backend definitions -message ConfigMetas { - repeated TopMeta roots = 1; - TopMeta timestamp = 2; - TopMeta snapshot = 3; - TopMeta topTargets = 4; - repeated DelegatedMeta delegatedTargets = 5; -} - -message DirectorMetas { - repeated TopMeta roots = 1; - TopMeta timestamp = 2; - TopMeta snapshot = 3; - TopMeta targets = 4; -} - -message DelegatedMeta { - uint64 version = 1; - string role = 2; - bytes raw = 3; -} - -message TopMeta { - uint64 version = 1; - bytes raw = 2; -} - message File { string path = 1; bytes raw = 2; } -// Backend queries - -message LatestConfigsRequest { - string hostname = 1; - string agentVersion = 2; - // timestamp and snapshot versions move in tandem so they are the same. - uint64 current_config_snapshot_version = 3; - uint64 current_config_root_version = 9; - uint64 current_director_root_version = 8; - repeated string products = 4; - repeated string new_products = 5; - repeated Client active_clients = 6; - bytes backend_client_state = 10; - bool has_error = 11; - string error = 12; - string trace_agent_env = 13; - string org_uuid = 14; -} - -message LatestConfigsResponse { - ConfigMetas config_metas = 1; - DirectorMetas director_metas = 2; - repeated File target_files = 3; -} - -message OrgDataResponse { - string uuid = 1; -} - -message OrgStatusResponse { - bool enabled = 1; - bool authorized = 2; -} - // Client definitions message Client { @@ -147,32 +87,3 @@ message ClientGetConfigsResponse { repeated File target_files = 3; repeated string client_configs = 4; } - -// Full state - -message FileMetaState { - uint64 version = 1; - string hash = 2; -} - -message GetStateConfigResponse { - map config_state = 1; - map director_state = 2; - map target_filenames = 3; - repeated Client active_clients = 4; -} - - -message TracerPredicateV1 { - string clientID = 1; - string service = 2; - string environment = 3; - string appVersion = 4; - string tracerVersion = 5; - string language = 6; - string runtimeID = 7; -} - -message TracerPredicates { - repeated TracerPredicateV1 tracer_predicates_v1 = 1; -} diff --git a/trace-protobuf/src/remoteconfig.rs b/trace-protobuf/src/remoteconfig.rs index 6df55ca93..ba9035010 100644 --- a/trace-protobuf/src/remoteconfig.rs +++ b/trace-protobuf/src/remoteconfig.rs @@ -3,50 +3,6 @@ use serde::{Deserialize, Serialize}; -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ConfigMetas { - #[prost(message, repeated, tag = "1")] - pub roots: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "2")] - pub timestamp: ::core::option::Option, - #[prost(message, optional, tag = "3")] - pub snapshot: ::core::option::Option, - #[prost(message, optional, tag = "4")] - pub top_targets: ::core::option::Option, - #[prost(message, repeated, tag = "5")] - pub delegated_targets: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DirectorMetas { - #[prost(message, repeated, tag = "1")] - pub roots: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "2")] - pub timestamp: ::core::option::Option, - #[prost(message, optional, tag = "3")] - pub snapshot: ::core::option::Option, - #[prost(message, optional, tag = "4")] - pub targets: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DelegatedMeta { - #[prost(uint64, tag = "1")] - pub version: u64, - #[prost(string, tag = "2")] - pub role: ::prost::alloc::string::String, - #[prost(bytes = "vec", tag = "3")] - pub raw: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TopMeta { - #[prost(uint64, tag = "1")] - pub version: u64, - #[prost(bytes = "vec", tag = "2")] - pub raw: ::prost::alloc::vec::Vec, -} #[derive(Deserialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -57,61 +13,6 @@ pub struct File { #[serde(with = "serde_bytes")] pub raw: ::prost::alloc::vec::Vec, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LatestConfigsRequest { - #[prost(string, tag = "1")] - pub hostname: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub agent_version: ::prost::alloc::string::String, - /// timestamp and snapshot versions move in tandem so they are the same. - #[prost(uint64, tag = "3")] - pub current_config_snapshot_version: u64, - #[prost(uint64, tag = "9")] - pub current_config_root_version: u64, - #[prost(uint64, tag = "8")] - pub current_director_root_version: u64, - #[prost(string, repeated, tag = "4")] - pub products: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(string, repeated, tag = "5")] - pub new_products: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(message, repeated, tag = "6")] - pub active_clients: ::prost::alloc::vec::Vec, - #[prost(bytes = "vec", tag = "10")] - pub backend_client_state: ::prost::alloc::vec::Vec, - #[prost(bool, tag = "11")] - pub has_error: bool, - #[prost(string, tag = "12")] - pub error: ::prost::alloc::string::String, - #[prost(string, tag = "13")] - pub trace_agent_env: ::prost::alloc::string::String, - #[prost(string, tag = "14")] - pub org_uuid: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LatestConfigsResponse { - #[prost(message, optional, tag = "1")] - pub config_metas: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub director_metas: ::core::option::Option, - #[prost(message, repeated, tag = "3")] - pub target_files: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OrgDataResponse { - #[prost(string, tag = "1")] - pub uuid: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OrgStatusResponse { - #[prost(bool, tag = "1")] - pub enabled: bool, - #[prost(bool, tag = "2")] - pub authorized: bool, -} #[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -251,56 +152,3 @@ pub struct ClientGetConfigsResponse { #[serde(default)] pub client_configs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FileMetaState { - #[prost(uint64, tag = "1")] - pub version: u64, - #[prost(string, tag = "2")] - pub hash: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetStateConfigResponse { - #[prost(map = "string, message", tag = "1")] - pub config_state: ::std::collections::HashMap< - ::prost::alloc::string::String, - FileMetaState, - >, - #[prost(map = "string, message", tag = "2")] - pub director_state: ::std::collections::HashMap< - ::prost::alloc::string::String, - FileMetaState, - >, - #[prost(map = "string, string", tag = "3")] - pub target_filenames: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - #[prost(message, repeated, tag = "4")] - pub active_clients: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TracerPredicateV1 { - #[prost(string, tag = "1")] - pub client_id: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub service: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub environment: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub app_version: ::prost::alloc::string::String, - #[prost(string, tag = "5")] - pub tracer_version: ::prost::alloc::string::String, - #[prost(string, tag = "6")] - pub language: ::prost::alloc::string::String, - #[prost(string, tag = "7")] - pub runtime_id: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TracerPredicates { - #[prost(message, repeated, tag = "1")] - pub tracer_predicates_v1: ::prost::alloc::vec::Vec, -} From 42ce03980ad17da9eaf57d78ca19c8eb2bcf334d Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 24 May 2024 17:59:33 +0200 Subject: [PATCH 03/26] Add tests Signed-off-by: Bob Weinand --- ipc/src/lib.rs | 2 + remote-config/Cargo.toml | 11 +- .../src/dynamic_configuration/data.rs | 25 ++ remote-config/src/fetch/fetcher.rs | 232 +++++++++++++++- remote-config/src/fetch/mod.rs | 3 + remote-config/src/fetch/multitarget.rs | 259 ++++++++++++++++-- remote-config/src/fetch/shared.rs | 242 +++++++++++++++- remote-config/src/fetch/test_server.rs | 144 ++++++++++ remote-config/src/parse.rs | 2 +- remote-config/src/targets.rs | 5 + sidecar/Cargo.toml | 3 +- sidecar/src/shm_remote_config.rs | 196 ++++++++++++- trace-protobuf/build.rs | 4 +- trace-protobuf/src/remoteconfig.rs | 4 +- trace-protobuf/src/serde.rs | 22 +- 15 files changed, 1089 insertions(+), 65 deletions(-) create mode 100644 remote-config/src/fetch/test_server.rs diff --git a/ipc/src/lib.rs b/ipc/src/lib.rs index b3dc197cf..68783f126 100644 --- a/ipc/src/lib.rs +++ b/ipc/src/lib.rs @@ -1,6 +1,8 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 +extern crate core; + pub mod example_interface; pub mod handles; pub mod transport; diff --git a/remote-config/Cargo.toml b/remote-config/Cargo.toml index 70f3062a3..4f9490397 100644 --- a/remote-config/Cargo.toml +++ b/remote-config/Cargo.toml @@ -4,12 +4,16 @@ license = "Apache 2.0" name = "datadog-remote-config" version = "0.0.1" +[features] +test = [] + [dependencies] anyhow = { version = "1.0" } ddcommon = { path = "../ddcommon" } datadog-trace-protobuf = { path = "../trace-protobuf" } datadog-live-debugger = { path = "../live-debugger" } hyper = { version = "0.14", features = ["client"], default-features = false } +http = "0.2" base64 = "0.21.0" sha2 = "0.10" uuid = "1.7.0" @@ -17,7 +21,12 @@ futures-util = "0.3" tokio = { version = "1.36.0" } tokio-util = "0.7.10" manual_future = "0.1.1" -time = { version = "0.3", features = ["parsing", "serde"] } +time = { version = "0.3", features = ["parsing", "serde", "formatting"] } tracing = { version = "0.1", default-features = false } serde = "1.0" serde_json = { version = "1.0", features = ["raw_value"] } + +[dev-dependencies] +hyper = { version = "0.14", features = ["client", "server"], default-features = false } +lazy_static = "1.4.0" +futures = "0.3" diff --git a/remote-config/src/dynamic_configuration/data.rs b/remote-config/src/dynamic_configuration/data.rs index 2d5ec1bed..8be6e49ae 100644 --- a/remote-config/src/dynamic_configuration/data.rs +++ b/remote-config/src/dynamic_configuration/data.rs @@ -2,12 +2,14 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize)] +#[cfg_attr(feature = "test", derive(Serialize))] pub struct DynamicConfigTarget { pub service: String, pub env: String, } #[derive(Debug, Deserialize)] +#[cfg_attr(feature = "test", derive(Serialize))] pub struct DynamicConfigFile { pub action: String, pub service_target: DynamicConfigTarget, @@ -15,6 +17,7 @@ pub struct DynamicConfigFile { } #[derive(Debug, Deserialize)] +#[cfg_attr(feature = "test", derive(Serialize))] struct TracingHeaderTag { header: String, tag_name: String, @@ -28,12 +31,14 @@ pub enum TracingSamplingRuleProvenance { } #[derive(Debug, Deserialize)] +#[cfg_attr(feature = "test", derive(Serialize))] pub struct TracingSamplingRuleTag { pub key: String, pub value_glob: String, } #[derive(Debug, Deserialize)] +#[cfg_attr(feature = "test", derive(Serialize))] pub struct TracingSamplingRule { pub service: String, pub name: Option, @@ -45,6 +50,7 @@ pub struct TracingSamplingRule { } #[derive(Debug, Deserialize)] +#[cfg_attr(feature = "test", derive(Default, Serialize))] pub struct DynamicConfig { tracing_header_tags: Option>, tracing_sample_rate: Option, @@ -87,3 +93,22 @@ pub enum Configs { TracingEnabled(bool), TracingSamplingRules(Vec), } + +#[cfg(feature = "test")] +pub mod tests { + use super::*; + + pub fn dummy_dynamic_config(enabled: bool) -> DynamicConfigFile { + DynamicConfigFile { + action: "".to_string(), + service_target : DynamicConfigTarget { + service: "".to_string(), + env: "".to_string(), + }, + lib_config: DynamicConfig { + tracing_enabled: Some(enabled), + ..DynamicConfig::default() + }, + } + } +} diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index a4ee10b5b..d7d785d01 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -1,9 +1,9 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; use base64::Engine; use hyper::http::uri::{PathAndQuery, Scheme}; -use hyper::{Body, Client, StatusCode}; +use hyper::{Client, StatusCode}; use sha2::{Digest, Sha256, Sha512}; use tracing::{debug, trace, warn}; use datadog_trace_protobuf::remoteconfig::{ClientGetConfigsRequest, ClientGetConfigsResponse, ClientState, ClientTracer, ConfigState, TargetFileHash, TargetFileMeta}; @@ -170,13 +170,15 @@ impl ConfigFetcher { }), cached_target_files, }; - let json = serde_json::to_string(&config_req)?; let req = self.state.endpoint - .into_request_builder(concat!("Sidecar/", env!("CARGO_PKG_VERSION")))?; + .into_request_builder(concat!("Sidecar/", env!("CARGO_PKG_VERSION")))? + .method(http::Method::POST) + .header(http::header::CONTENT_TYPE, ddcommon::header::APPLICATION_JSON) + .body(serde_json::to_string(&config_req)?)?; let response = Client::builder() .build(connector::Connector::default()) - .request(req.body(Body::from(json))?) + .request(req) .await .map_err(|e| anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)))?; let status = response.status(); @@ -219,9 +221,8 @@ impl ConfigFetcher { let mut target_files = self.state.target_files_by_path.lock().unwrap(); if self.state.expire_unused_files { - target_files.retain(|k, _| { - targets_list.signed.targets.contains_key(k.as_str()) - }); + let retain: HashSet<_> = response.client_configs.iter().collect(); + target_files.retain(|k, _| { retain.contains(k) }); } for (path, target_file) in targets_list.signed.targets { @@ -327,3 +328,218 @@ fn get_product_endpoint(subdomain: &str, endpoint: &Endpoint) -> Endpoint { api_key: endpoint.api_key.clone(), } } + +#[cfg(test)] +pub mod tests { + use http::Response; + use hyper::Body; + use lazy_static::lazy_static; + use crate::fetch::test_server::RemoteConfigServer; + use crate::RemoteConfigSource; + use super::*; + + lazy_static! { + pub static ref PATH_FIRST: RemoteConfigPath = RemoteConfigPath { + source: RemoteConfigSource::Employee, + product: RemoteConfigProduct::ApmTracing, + config_id: "1234".to_string(), + name: "config".to_string(), + }; + + pub static ref PATH_SECOND: RemoteConfigPath = RemoteConfigPath { + source: RemoteConfigSource::Employee, + product: RemoteConfigProduct::ApmTracing, + config_id: "9876".to_string(), + name: "config".to_string(), + }; + + pub static ref DUMMY_TARGET: Arc = Arc::new(Target { + service: "service".to_string(), + env: "env".to_string(), + app_version: "1.3.5".to_string(), + }); + } + + static DUMMY_RUNTIME_ID: &'static str = "3b43524b-a70c-45dc-921d-34504e50c5eb"; + + #[derive(Default)] + pub struct Storage { + pub files: Mutex>>>, + } + + pub struct PathStore { + path: RemoteConfigPath, + storage: Arc, + pub data: Arc> + } + + #[derive(Debug, Eq, PartialEq)] + pub struct DataStore { + pub version: u64, + pub contents: String, + } + + impl Drop for PathStore { + fn drop(&mut self) { + self.storage.files.lock().unwrap().remove(&self.path); + } + } + + impl FileStorage for Arc { + type StoredFile = PathStore; + + fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + let data = Arc::new(Mutex::new(DataStore { + version, + contents: String::from_utf8(contents).unwrap(), + })); + assert!(self.files.lock().unwrap().insert(path.clone(), data.clone()).is_none()); + Ok(Arc::new(PathStore { + path: path.clone(), + storage: self.clone(), + data, + })) + } + + fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + *file.data.lock().unwrap() = DataStore { + version, + contents: String::from_utf8(contents).unwrap(), + }; + Ok(()) + } + } + + #[tokio::test] + async fn test_inactive() { + let server = RemoteConfigServer::spawn(); + let storage = Arc::new(Storage::default()); + let mut fetcher = ConfigFetcher::new(storage.clone(), Arc::new(ConfigFetcherState::new(server.dummy_invariants()))); + let mut opaque_state = OpaqueState::default(); + + let mut response = Response::new(Body::from("")); + *response.status_mut() = StatusCode::NOT_FOUND; + *server.next_response.lock().unwrap() = Some(response); + + let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", Some("test".to_string()), &mut opaque_state).await.unwrap().unwrap(); + + assert!(fetched.is_empty()); + } + + #[tokio::test] + async fn test_fetch_cache() { + let server = RemoteConfigServer::spawn(); + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, "v1".to_string())); + + let storage = Arc::new(Storage::default()); + + let invariants = ConfigInvariants { + language: "php".to_string(), + tracer_version: "1.2.3".to_string(), + endpoint: server.endpoint.clone(), + products: vec![RemoteConfigProduct::ApmTracing, RemoteConfigProduct::LiveDebugger], + capabilities: vec![RemoteConfigCapabilities::ApmTracingCustomTags], + }; + + + let mut fetcher = ConfigFetcher::new(storage.clone(), Arc::new(ConfigFetcherState::new(invariants))); + let mut opaque_state = OpaqueState::default(); + + { + let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", Some("test".to_string()), &mut opaque_state).await.unwrap().unwrap(); + + let req = server.last_request.lock().unwrap(); + let req = req.as_ref().unwrap(); + assert!(req.cached_target_files.is_empty()); + + let client = req.client.as_ref().unwrap(); + assert_eq!(client.capabilities, &[RemoteConfigCapabilities::ApmTracingCustomTags as u8]); + assert_eq!(client.products, &["APM_TRACING", "LIVE_DEBUGGING"]); + assert_eq!(client.is_tracer, true); + assert_eq!(client.is_agent, false); + assert_eq!(client.id, "foo"); + + let state = client.state.as_ref().unwrap(); + assert_eq!(state.error, "test"); + assert_eq!(state.has_error, true); + assert!(state.config_states.is_empty()); + assert!(state.backend_client_state.is_empty()); + + let tracer = client.client_tracer.as_ref().unwrap(); + assert_eq!(tracer.service, DUMMY_TARGET.service); + assert_eq!(tracer.env, DUMMY_TARGET.env); + assert_eq!(tracer.app_version, DUMMY_TARGET.app_version); + assert_eq!(tracer.runtime_id, DUMMY_RUNTIME_ID); + assert_eq!(tracer.language, "php"); + assert_eq!(tracer.tracer_version, "1.2.3"); + + + assert_eq!(String::from_utf8_lossy(&opaque_state.client_state), "some state"); + assert_eq!(fetched.len(), 1); + assert_eq!(storage.files.lock().unwrap().len(), 1); + + assert!(Arc::ptr_eq(&fetched[0].data, storage.files.lock().unwrap().get(&PATH_FIRST).unwrap())); + assert_eq!(fetched[0].data.lock().unwrap().contents, "v1"); + assert_eq!(fetched[0].data.lock().unwrap().version, 1); + } + + { + let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap(); + assert!(fetched.is_none()); // no change + + let req = server.last_request.lock().unwrap(); + let req = req.as_ref().unwrap(); + assert_eq!(req.cached_target_files.len(), 1); + + let client = req.client.as_ref().unwrap(); + assert_eq!(client.capabilities, &[RemoteConfigCapabilities::ApmTracingCustomTags as u8]); + assert_eq!(client.products, &["APM_TRACING", "LIVE_DEBUGGING"]); + assert_eq!(client.is_tracer, true); + assert_eq!(client.is_agent, false); + assert_eq!(client.id, "foo"); + + let state = client.state.as_ref().unwrap(); + assert_eq!(state.error, "test"); + assert_eq!(state.has_error, true); + assert!(state.config_states.is_empty()); + assert!(state.backend_client_state.is_empty()); + + let cached = &req.cached_target_files[0]; + assert_eq!(cached.path, PATH_FIRST.to_string()); + assert_eq!(cached.length, 2); + assert_eq!(cached.hashes.len(), 1); + } + + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, "v2".to_string())); + server.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string())); + + { + let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap().unwrap(); + assert_eq!(fetched.len(), 2); + assert_eq!(storage.files.lock().unwrap().len(), 2); + + let (first, second) = if fetched[0].data.lock().unwrap().version == 2 { (0, 1) } else { (1, 0) }; + + assert!(Arc::ptr_eq(&fetched[first].data, storage.files.lock().unwrap().get(&PATH_FIRST).unwrap())); + assert_eq!(fetched[first].data.lock().unwrap().contents, "v2"); + assert_eq!(fetched[first].data.lock().unwrap().version, 2); + + assert!(Arc::ptr_eq(&fetched[second].data, storage.files.lock().unwrap().get(&PATH_SECOND).unwrap())); + assert_eq!(fetched[second].data.lock().unwrap().contents, "X"); + assert_eq!(fetched[second].data.lock().unwrap().version, 1); + } + + { + let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap(); + assert!(fetched.is_none()); // no change + } + + server.files.lock().unwrap().remove(&PATH_FIRST); + + { + let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap().unwrap(); + assert_eq!(fetched.len(), 1); + assert_eq!(storage.files.lock().unwrap().len(), 1); + } + } +} diff --git a/remote-config/src/fetch/mod.rs b/remote-config/src/fetch/mod.rs index a97f4415e..96f8b8948 100644 --- a/remote-config/src/fetch/mod.rs +++ b/remote-config/src/fetch/mod.rs @@ -1,8 +1,11 @@ +#[cfg(any(test, feature = "test"))] +pub mod test_server; mod fetcher; mod single; mod shared; mod multitarget; +#[cfg_attr(test, allow(ambiguous_glob_reexports))] // ignore mod tests re-export pub use fetcher::*; pub use single::*; pub use shared::*; diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index dad64dcbd..4874644b6 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -6,12 +6,13 @@ use std::collections::{HashMap, HashSet}; use std::default::Default; use std::fmt::Debug; use std::hash::Hash; -use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; use futures_util::future::Shared; use futures_util::FutureExt; use manual_future::ManualFuture; +use tokio::select; use tokio::sync::Semaphore; use tokio::time::Instant; use tracing::{debug, error, trace}; @@ -31,7 +32,7 @@ pub struct MultiTargetFetcher, HashSet>>, /// Keyed by runtime_id runtimes: Mutex>>, - pub remote_config_interval: AtomicU32, + pub remote_config_interval: AtomicU64, /// All services by target in use services: Mutex, KnownTarget>>, pending_async_insertions: AtomicU32, @@ -89,7 +90,7 @@ impl storage: RefcountingStorage::new(storage, ConfigFetcherState::new(invariants)), target_runtimes: Mutex::new(Default::default()), runtimes: Mutex::new(Default::default()), - remote_config_interval: AtomicU32::new(5000), + remote_config_interval: AtomicU64::new(5_000_000_000), services: Mutex::new(Default::default()), pending_async_insertions: AtomicU32::new(0), fetcher_semaphore: Semaphore::new(Self::DEFAULT_CLIENTS_LIMIT as usize), @@ -221,10 +222,6 @@ impl } } - fn requires_synthetic_id(info: &RuntimeInfo) -> bool { - info.targets.len() > 1 - } - pub fn add_runtime( self: &Arc, runtime_id: String, @@ -235,11 +232,25 @@ impl match self.runtimes.lock().unwrap().entry(runtime_id) { Entry::Occupied(mut runtime_entry) => { let info = runtime_entry.get_mut(); + let primary_target = if info.targets.len() == 1 { + info.targets.keys().next().cloned() + } else { + None + }; match info.targets.entry(target.clone()) { Entry::Occupied(mut e) => *e.get_mut() += 1, Entry::Vacant(e) => { + // it's the second usage here + if let Some(primary_target) = primary_target { + let mut services = self.services.lock().unwrap(); + let known_target = services.get_mut(&primary_target).unwrap(); + if !known_target.synthetic_id { + known_target.synthetic_id = true; + *known_target.fetcher.runtime_id.lock().unwrap() = Self::generate_synthetic_id(); + } + } e.insert(1); - self.add_target(Self::requires_synthetic_id(info), runtime_entry.key(), target.clone()); + self.add_target(true, runtime_entry.key(), target.clone()); }, } } @@ -249,7 +260,7 @@ impl notify_target, targets: HashMap::from([(target.clone(), 1)]), }; - self.add_target(Self::requires_synthetic_id(&info), e.key(), target.clone()); + self.add_target(info.targets.len() > 1, e.key(), target.clone()); e.insert(info); } } @@ -293,6 +304,7 @@ impl let this = self.clone(); let fetcher = known_target.fetcher.clone(); let status = known_target.status.clone(); + fetcher.default_interval.store(self.remote_config_interval.load(Ordering::Relaxed), Ordering::Relaxed); tokio::spawn(async move { // Relatively primitive, no prioritization or anything. It is not expected that this // semaphore is ever awaiting under standard usage. Can be improved if needed, e.g. @@ -311,22 +323,7 @@ impl let inner_fetcher = fetcher.clone(); let inner_this = this.clone(); - fetcher.run(this.storage.clone(), Box::new(move |files| { - { - let mut status = status.lock().unwrap(); - if let KnownTargetStatus::RemoveAt(instant) = *status { - // Voluntarily give up the semaphore for services in RemoveAt status if - // there are only few available permits - if inner_this.fetcher_semaphore.available_permits() < 10 || instant < Instant::now() { - // We need to signal that we're in progress of removing to avoid race conditions - *status = KnownTargetStatus::Removing(shared_future.clone()); - // break here to drop mutex guard and avoid having status and services locked simultaneously - inner_fetcher.cancel(); - return None; - } - } - } // unlock status - + let fetcher_fut = fetcher.run(this.storage.clone(), Box::new(move |files| { let (error, notify) = inner_this.storage.storage.fetched(&inner_fetcher.target, files); if notify { @@ -347,7 +344,31 @@ impl } error - })).await; + })).shared(); + + loop { + { + let mut status = status.lock().unwrap(); + if let KnownTargetStatus::RemoveAt(instant) = *status { + // Voluntarily give up the semaphore for services in RemoveAt status if + // there are only few available permits + if this.fetcher_semaphore.available_permits() < 10 || instant < Instant::now() { + // We need to signal that we're in progress of removing to avoid race conditions + *status = KnownTargetStatus::Removing(shared_future.clone()); + // break here to drop mutex guard and avoid having status and services locked simultaneously + fetcher.cancel(); + break; + } + } + } // unlock mutex + + select! { + _ = tokio::time::sleep(Duration::from_nanos(fetcher.default_interval.load(Ordering::Relaxed))) => {}, + _ = fetcher_fut.clone() => { + break; + } + } + } this.storage.storage.expired(&fetcher.target); @@ -379,3 +400,189 @@ impl } } } + +#[cfg(test)] +mod tests { + use std::hash::Hasher; + use std::sync::atomic::AtomicU8; + use manual_future::ManualFutureCompleter; + use crate::fetch::fetcher::tests::*; + use crate::fetch::shared::tests::*; + use crate::fetch::test_server::RemoteConfigServer; + use crate::{RemoteConfigPath, Target}; + use super::*; + + #[derive(Clone)] + struct MultiFileStorage { + rc: RcFileStorage, + on_dead_completer: Arc>>>, + recent_fetches: Arc, Vec>>>>, + awaiting_fetches: Arc, + awaited_fetched_done: Arc>>>, + expected_expirations: Arc, ManualFutureCompleter<()>>>>, + } + + impl MultiFileStorage { + pub fn await_fetches(&self, num: u8) -> ManualFuture<()> { + let (future, completer) = ManualFuture::new(); + + self.recent_fetches.lock().unwrap().clear(); + self.awaiting_fetches.store(num, Ordering::SeqCst); + *self.awaited_fetched_done.lock().unwrap() = Some(completer); + + future + } + + pub fn expect_expiration(&self, target: &Arc) -> ManualFuture<()> { + let (future, completer) = ManualFuture::new(); + self.expected_expirations.lock().unwrap().insert(target.clone(), completer); + future + } + } + + impl MultiTargetHandlers for MultiFileStorage { + fn fetched(&self, target: &Arc, files: &[Arc]) -> (Option, bool) { + match self.recent_fetches.lock().unwrap().entry(target.clone()) { + Entry::Occupied(_) => panic!("Double fetch without recent_fetches clear"), + Entry::Vacant(e) => { e.insert(files.to_vec()); }, + } + + match self.awaiting_fetches.fetch_sub(1, Ordering::SeqCst) { + 2.. => {}, + 1 => { tokio::spawn(self.awaited_fetched_done.lock().unwrap().take().unwrap().complete(())); }, + ..=0 => panic!("Got unexpected fetch"), + } + + (None, true) + } + + fn expired(&self, target: &Arc) { + tokio::spawn(self.expected_expirations.lock().unwrap().remove(target).unwrap().complete(())); + } + + fn dead(&self) { + tokio::spawn(self.on_dead_completer.lock().unwrap().take().unwrap().complete(())); + } + } + + impl FileStorage for MultiFileStorage { + type StoredFile = ::StoredFile; + + fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + self.rc.store(version, path, contents) + } + + fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + self.rc.update(file, version, contents) + } + } + + #[derive(Default, Debug)] + struct NotifyState { + notifications: Mutex>, + } + + impl NotifyState { + fn assert_notified(&self, ids: &[u8]) { + let mut notified = std::mem::take(&mut *self.notifications.lock().unwrap()).into_iter().collect::>(); + notified.sort(); + assert_eq!(notified, ids); + } + } + + #[derive(Clone, Debug)] + struct Notifier { + id: u8, + state: Arc, + } + + impl Hash for Notifier { + fn hash(&self, state: &mut H) { + state.write_u8(self.id) + } + } + + impl PartialEq for Notifier { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } + } + + impl Eq for Notifier {} + + impl NotifyTarget for Notifier { + fn notify(&self) { + self.state.notifications.lock().unwrap().insert(self.id); + } + } + + static RT_ID_1: &'static str = "3b43524b-a70c-45dc-921d-34504e50c5eb"; + static RT_ID_2: &'static str = "ae588386-8464-43ba-bd3a-3e2d36b2c22c"; + static RT_ID_3: &'static str = "0125dff8-d9a7-4fd3-a0c2-0ca3b12816a1"; + + + #[tokio::test] + async fn test_multi_fetcher() { + let server = RemoteConfigServer::spawn(); + let (on_dead, on_dead_completer) = ManualFuture::new(); + let storage = MultiFileStorage { + rc: RcFileStorage::default(), + on_dead_completer: Arc::new(Mutex::new(Some(on_dead_completer))), + recent_fetches: Default::default(), + awaiting_fetches: Default::default(), + awaited_fetched_done: Default::default(), + expected_expirations: Default::default(), + }; + let state = Arc::new(NotifyState::default()); + + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, "v1".to_string())); + + let fut = storage.await_fetches(1); + + let fetcher = MultiTargetFetcher::::new(storage.clone(), server.dummy_invariants()); + fetcher.remote_config_interval.store(1000, Ordering::SeqCst); + + fetcher.add_runtime(RT_ID_1.to_string(), Notifier { id: 1, state: state.clone() }, &OTHER_TARGET); + assert_eq!(*fetcher.services.lock().unwrap().get(&*OTHER_TARGET).unwrap().fetcher.runtime_id.lock().unwrap(), RT_ID_1); + + fetcher.add_runtime(RT_ID_1.to_string(), Notifier { id: 1, state: state.clone() }, &DUMMY_TARGET); + fetcher.add_runtime(RT_ID_2.to_string(), Notifier { id: 2, state: state.clone() }, &DUMMY_TARGET); + + assert_eq!(*fetcher.services.lock().unwrap().get(&*DUMMY_TARGET).unwrap().fetcher.runtime_id.lock().unwrap(), RT_ID_2); + assert_ne!(*fetcher.services.lock().unwrap().get(&*OTHER_TARGET).unwrap().fetcher.runtime_id.lock().unwrap(), RT_ID_1); + + assert_eq!(fetcher.runtimes.lock().unwrap().len(), 2); // two runtimes + assert_eq!(fetcher.target_runtimes.lock().unwrap().len(), 2); // two fetchers + + fetcher.add_runtime(RT_ID_3.to_string(), Notifier { id: 3, state: state.clone() }, &OTHER_TARGET); + + fut.await; + state.assert_notified(&[1, 2]); + + let last_fetched: Vec<_> = storage.recent_fetches.lock().unwrap().get(&*DUMMY_TARGET).unwrap().iter().map(|p| p.store.data.clone()).collect(); + assert_eq!(last_fetched.len(), 1); + + let fut = storage.await_fetches(2); + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![OTHER_TARGET.clone()], 1, "v1".to_string())); + + fut.await; + state.assert_notified(&[1, 2, 3]); + + let new_fetched: Vec<_> = storage.recent_fetches.lock().unwrap().get(&*OTHER_TARGET).unwrap().iter().map(|p| p.store.data.clone()).collect(); + assert_eq!(storage.recent_fetches.lock().unwrap().get(&*OTHER_TARGET).unwrap().len(), 1); + if !Arc::ptr_eq(&new_fetched[0], &last_fetched[0]) { + assert_eq!(*new_fetched[0].lock().unwrap(), *last_fetched[0].lock().unwrap()); + } + + fetcher.delete_runtime(RT_ID_1, &OTHER_TARGET); + fetcher.delete_runtime(RT_ID_1, &DUMMY_TARGET); + fetcher.delete_runtime(RT_ID_2, &DUMMY_TARGET); + fetcher.delete_runtime(RT_ID_3, &OTHER_TARGET); + + fetcher.shutdown(); + storage.expect_expiration(&DUMMY_TARGET); + storage.expect_expiration(&OTHER_TARGET); + + on_dead.await + } +} diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 8af00b0dc..a4c5cdfed 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -132,7 +132,7 @@ impl Clone for RefcountingStorage where S::StoredFile impl RefcountingStorage where S::StoredFile: RefcountedFile { pub fn new(storage: S, mut state: ConfigFetcherState) -> Self { - state.expire_unused_files = true; + state.expire_unused_files = false; RefcountingStorage { storage, state: Arc::new(state), @@ -217,25 +217,27 @@ impl SharedFetcher { let last_run_id = fetcher.file_storage.run_id.dec_runners(); fetcher.file_storage.inactive.lock().unwrap().retain(|_, v| { - (first_run_id..last_run_id).contains(&v.get_dropped_run_id()) && v.delref() == 0 + (first_run_id..last_run_id).contains(&v.get_dropped_run_id()) && v.delref() == 1 }); match fetched { Ok(None) => { /* unchanged */ }, Ok(Some(files)) => { - for file in files.iter() { - file.incref(); - } + if !files.is_empty() || !last_files.is_empty() { + for file in files.iter() { + file.incref(); + } - for file in last_files { - if file.delref() == 0 { - fetcher.file_storage.expire_file(file); + for file in last_files { + if file.delref() == 1 { + fetcher.file_storage.expire_file(file); + } } - } - last_files = files; + last_files = files; - last_error = on_fetch(&last_files); + last_error = on_fetch(&last_files); + } } Err(e) => error!("{:?}", e), } @@ -253,7 +255,7 @@ impl SharedFetcher { } for file in last_files { - if file.delref() == 0 { + if file.delref() == 1 { fetcher.file_storage.expire_file(file); } } @@ -261,7 +263,223 @@ impl SharedFetcher { /// Note that due to the async logic, a cancellation does not guarantee a strict ordering: /// A final on_fetch call from within the run() method may happen after the cancellation. + /// Cancelling from within on_fetch callback is always final. pub fn cancel(&self) { self.cancellation.cancel(); } } + +#[cfg(test)] +pub mod tests { + use futures::future::join_all; + use std::sync::Arc; + use lazy_static::lazy_static; + use crate::fetch::fetcher::tests::*; + use crate::fetch::test_server::RemoteConfigServer; + use crate::Target; + use super::*; + + lazy_static! { + pub static ref OTHER_TARGET: Arc = Arc::new(Target { + service: "other".to_string(), + env: "env".to_string(), + app_version: "7.8.9".to_string(), + }); + } + + pub struct RcPathStore { + pub store: Arc, + refcounted: FileRefcountData, + } + + impl RefcountedFile for RcPathStore { + fn refcount(&self) -> &FileRefcountData { + &self.refcounted + } + } + + #[derive(Default, Clone)] + pub struct RcFileStorage(Arc); + + impl FileStorage for RcFileStorage { + type StoredFile = RcPathStore; + + fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + Ok(Arc::new(RcPathStore { + store: self.0.store(version, path.clone(), contents)?, + refcounted: FileRefcountData::new(version, path), + })) + } + + fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + self.0.update(&file.store, version, contents) + } + } + + #[tokio::test] + async fn test_single_fetcher() { + let server = RemoteConfigServer::spawn(); + let storage = RcFileStorage::default(); + let rc_storage = RefcountingStorage::new(storage.clone(), ConfigFetcherState::new(server.dummy_invariants())); + + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, "v1".to_string())); + + let fetcher = SharedFetcher::new(DUMMY_TARGET.clone(), "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string()); + let iteration = AtomicU32::new(0); + let inner_fetcher = unsafe { &*(&fetcher as *const SharedFetcher) }; + let inner_storage = storage.clone(); + fetcher.run(rc_storage, Box::new(move |fetched| { + match iteration.fetch_add(1, Ordering::SeqCst) { + 0 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); + + server.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string())); + + Some("error".to_string()) + }, + 1 => { + assert_eq!(fetched.len(), 2); + let req = server.last_request.lock().unwrap(); + let req = req.as_ref().unwrap(); + let client = req.client.as_ref().unwrap(); + let state = client.state.as_ref().unwrap(); + assert_eq!(state.error, "error"); + + server.files.lock().unwrap().remove(&PATH_SECOND); + + None + }, + 2 => { + assert_eq!(fetched.len(), 1); + assert_eq!(inner_storage.0.files.lock().unwrap().len(), 1); + let req = server.last_request.lock().unwrap(); + let req = req.as_ref().unwrap(); + let client = req.client.as_ref().unwrap(); + let state = client.state.as_ref().unwrap(); + assert_eq!(state.has_error, false); + + inner_fetcher.cancel(); + + None + } + _ => panic!("Unexpected"), + } + })).await; + + assert!(storage.0.files.lock().unwrap().is_empty()); + } + + #[tokio::test] + async fn test_parallel_fetchers() { + let server = RemoteConfigServer::spawn(); + let storage = RcFileStorage::default(); + let rc_storage = RefcountingStorage::new(storage.clone(), ConfigFetcherState::new(server.dummy_invariants())); + + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone(), OTHER_TARGET.clone()], 1, "v1".to_string())); + server.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string())); + + let server_1 = server.clone(); + let server_1_storage = storage.clone(); + let server_first_1 = move || { + assert_eq!(server_1_storage.0.files.lock().unwrap().len(), 2); + server_1.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![OTHER_TARGET.clone()], 1, "v1".to_string())); + server_1.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone(), OTHER_TARGET.clone()], 1, "X".to_string())); + }; + let server_first_2 = server_first_1.clone(); + + let server_2 = server.clone(); + let server_2_storage = storage.clone(); + let server_second_1 = move || { + assert_eq!(server_2_storage.0.files.lock().unwrap().len(), 2); + server_2.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, "v2".to_string())); + server_2.files.lock().unwrap().remove(&PATH_SECOND); + }; + let server_second_2 = server_second_1.clone(); + + let server_3 = server.clone(); + let server_3_storage = storage.clone(); + let server_third_1 = move || { + // one file should be expired by now + assert_eq!(server_3_storage.0.files.lock().unwrap().len(), 1); + server_3.files.lock().unwrap().clear(); + }; + let server_third_2 = server_third_1.clone(); + + let fetcher_1 = SharedFetcher::new(DUMMY_TARGET.clone(), "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string()); + let fetcher_2 = SharedFetcher::new(OTHER_TARGET.clone(), "ae588386-8464-43ba-bd3a-3e2d36b2c22c".to_string()); + let iteration = Arc::new(AtomicU32::new(0)); + let iteration_1 = iteration.clone(); + let iteration_2 = iteration.clone(); + let inner_fetcher_1 = unsafe { &*(&fetcher_1 as *const SharedFetcher) }; + let inner_fetcher_2 = unsafe { &*(&fetcher_2 as *const SharedFetcher) }; + join_all(vec![fetcher_1.run(rc_storage.clone(), Box::new(move |fetched| { + match iteration_1.fetch_add(1, Ordering::SeqCst) { + i @ 0|i @ 1 => { + assert_eq!(fetched.len(), 2); + + if i == 1 { + server_first_1(); + } + }, + i @ 2|i @ 3 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "X"); + + if i == 3 { + server_second_1(); + } + }, + i @ 4|i @ 5 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v2"); + + if i == 5 { + server_third_1(); + } + }, + 6 | 7 => { + assert_eq!(fetched.len(), 0); + + inner_fetcher_1.cancel(); + }, + _ => panic!("Unexpected"), + } + None + })), fetcher_2.run(rc_storage, Box::new(move |fetched| { + match iteration_2.fetch_add(1, Ordering::SeqCst) { + i @ 0|i @ 1 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); + + if i == 1 { + server_first_2(); + } + }, + i @ 2|i @ 3 => { + assert_eq!(fetched.len(), 2); + + if i == 3 { + server_second_2(); + } + }, + i @ 4|i @ 5 => { + assert_eq!(fetched.len(), 0); + + if i == 5 { + server_third_2(); + } + }, + 6 | 7 => { + assert_eq!(fetched.len(), 0); + + inner_fetcher_2.cancel(); + }, + _ => panic!("Unexpected"), + } + None + }))]).await; + + assert!(storage.0.files.lock().unwrap().is_empty()); + } +} diff --git a/remote-config/src/fetch/test_server.rs b/remote-config/src/fetch/test_server.rs new file mode 100644 index 000000000..95bf5e921 --- /dev/null +++ b/remote-config/src/fetch/test_server.rs @@ -0,0 +1,144 @@ +use std::collections::HashMap; +use std::convert::Infallible; +use std::net::TcpListener; +use std::sync::{Arc, Mutex}; +use base64::Engine; +use http::{Request, Response}; +use hyper::{Body, Server}; +use hyper::service::{make_service_fn, service_fn}; +use serde_json::value::to_raw_value; +use sha2::{Digest, Sha256}; +use time::OffsetDateTime; +use tokio::select; +use tokio::sync::mpsc::Sender; +use datadog_trace_protobuf::remoteconfig::{ClientGetConfigsRequest, ClientGetConfigsResponse, File}; +use ddcommon::Endpoint; +use crate::fetch::ConfigInvariants; +use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; +use crate::targets::{TargetData, TargetsCustom, TargetsData, TargetsList}; + +pub struct RemoteConfigServer { + pub last_request: Mutex>, + pub files: Mutex>, u64, String)>>, + pub next_response: Mutex>>, + pub endpoint: Endpoint, + #[allow(dead_code)] // stops receiver on drop + shutdown_complete_tx: Sender<()>, +} + +impl RemoteConfigServer { + pub fn spawn() -> Arc { + let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + let port = listener.local_addr().unwrap().port(); + let (shutdown_complete_tx, mut shutdown_complete_rx) = tokio::sync::mpsc::channel::<()>(1); + let server = Arc::new(RemoteConfigServer { + last_request: Mutex::new(None), + files: Default::default(), + next_response: Mutex::new(None), + endpoint: Endpoint { + url: format!("http://127.0.0.1:{port}/").parse().unwrap(), + api_key: None, + }, + shutdown_complete_tx, + }); + let this = server.clone(); + tokio::spawn(async move { + let service = make_service_fn(|_conn| { + let this = this.clone(); + async move { + Ok::<_, Infallible>(service_fn(move |req: Request| { + let this = this.clone(); + async move { + let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap(); + let request: ClientGetConfigsRequest = serde_json::from_str(&String::from_utf8(body_bytes.to_vec()).unwrap()).unwrap(); + let response = if let Some(response) = this.next_response.lock().unwrap().take() { + response + } else { + let known: HashMap<_, _> = request.cached_target_files.iter().map(|m| (m.path.clone(), m.hashes[0].hash.clone())).collect(); + let files = this.files.lock().unwrap(); + let applied_files: HashMap<_, _> = files.iter().filter(|(_, (targets, _, _))| { + let tracer = request.client.as_ref().unwrap().client_tracer.as_ref().unwrap(); + targets.iter().any(|t| t.service == tracer.service && t.env == tracer.env && t.app_version == tracer.app_version) + }).collect(); + let states = &request.client.as_ref().unwrap().state.as_ref().unwrap().config_states; + if applied_files.len() == states.len() && states.iter().all(|s| { + for (p, (_, v, _)) in applied_files.iter() { + if p.product.to_string() == s.product && p.config_id == s.id && *v == s.version { + return true; + } + } + false + }) { + Response::new( Body::from("{}")) + } else { + let target_info: Vec<_> = applied_files.iter().map(|(p, (_, v, file))| { + (p.to_string(), format!("{:x}", Sha256::digest(file)), to_raw_value(v).unwrap(), file.clone()) + }).filter(|(p, hash, _, _)| if let Some(existing) = known.get(p) { + existing != hash + } else { + true + }).collect(); + let targets = TargetsList { + signatures: vec![], + signed: TargetsData { + _type: "", + custom: TargetsCustom { + agent_refresh_interval: Some(1000), + opaque_backend_state: "some state", + }, + expires: OffsetDateTime::from_unix_timestamp(253402300799).unwrap(), + spec_version: "1.0.0", + targets: target_info.iter().map(|(p, hash, version, _)| { + (p.as_str(), TargetData { + custom: HashMap::from([("v", &**version)]), + hashes: HashMap::from([("sha256", hash.as_str())]), + length: 0, + }) + }).collect(), + version: 1, + }, + }; + let response = ClientGetConfigsResponse { + roots: vec![] /* not checked */, + targets: base64::engine::general_purpose::STANDARD.encode(serde_json::to_vec(&targets).unwrap()).into_bytes(), + target_files: target_info.iter().map(|(p, _, _, file)| { + File { + path: p.to_string(), + raw: base64::engine::general_purpose::STANDARD.encode(file).into_bytes(), + } + }).collect(), + client_configs: applied_files.keys().map(|k| k.to_string()).collect(), + }; + Response::new(Body::from(serde_json::to_vec(&response).unwrap())) + } + }; + *this.last_request.lock().unwrap() = Some(request); + Ok::<_, Infallible>(response) + } + })) + } + }); + let server = Server::from_tcp(listener).unwrap().serve(service); + + select! { + server_result = server => { + if let Err(e) = server_result { + eprintln!("server connection error: {}", e); + } + }, + _ = shutdown_complete_rx.recv() => {}, + } + }); + server + } + + pub fn dummy_invariants(&self) -> ConfigInvariants { + ConfigInvariants { + language: "php".to_string(), + tracer_version: "1.2.3".to_string(), + endpoint: self.endpoint.clone(), + products: vec![RemoteConfigProduct::ApmTracing, RemoteConfigProduct::LiveDebugger], + capabilities: vec![RemoteConfigCapabilities::ApmTracingCustomTags], + } + } +} diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index 11d41e125..d9128a62b 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -27,7 +27,7 @@ impl ToString for RemoteConfigProduct { } } -#[derive(Clone, Eq, Hash, PartialEq)] +#[derive(Debug, Clone, Eq, Hash, PartialEq)] pub struct RemoteConfigPath { pub source: RemoteConfigSource, pub product: RemoteConfigProduct, diff --git a/remote-config/src/targets.rs b/remote-config/src/targets.rs index e9abc8bbc..bb0861959 100644 --- a/remote-config/src/targets.rs +++ b/remote-config/src/targets.rs @@ -5,6 +5,7 @@ use serde_json::value::RawValue; use time::OffsetDateTime; #[derive(Deserialize)] +#[cfg_attr(any(test, feature = "test"), derive(serde::Serialize))] pub struct TargetsList<'a> { #[serde(borrow)] pub signatures: Vec>, @@ -12,12 +13,14 @@ pub struct TargetsList<'a> { } #[derive(Deserialize)] +#[cfg_attr(any(test, feature = "test"), derive(serde::Serialize))] pub struct TargetsSignature<'a> { pub keyid: &'a str, pub sig: &'a str, } #[derive(Deserialize)] +#[cfg_attr(any(test, feature = "test"), derive(serde::Serialize))] pub struct TargetsData<'a> { pub _type: &'a str, pub custom: TargetsCustom<'a>, @@ -29,12 +32,14 @@ pub struct TargetsData<'a> { } #[derive(Deserialize)] +#[cfg_attr(any(test, feature = "test"), derive(serde::Serialize))] pub struct TargetsCustom<'a> { pub agent_refresh_interval: Option, pub opaque_backend_state: &'a str, } #[derive(Deserialize)] +#[cfg_attr(any(test, feature = "test"), derive(serde::Serialize))] pub struct TargetData<'a> { #[serde(borrow)] pub custom: HashMap<&'a str, &'a RawValue>, diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index 41d20fb75..bf7169701 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -44,6 +44,7 @@ rmp-serde = "1.1.1" base64 = "0.21.0" spawn_worker = { path = "../spawn_worker" } zwohash = "0.1.2" +sha2 = "0.10" sys-info = { version = "0.9.0" } tokio = { version = "1.35.1", features = ["fs", "sync", "io-util", "signal", "rt-multi-thread"] } tokio-util = { version = "0.7", features = ["codec"] } @@ -94,5 +95,5 @@ microseh = "0.1.1" libc = { version = "0.2" } tempfile = { version = "3.3" } httpmock = "0.7.0" +datadog-remote-config = { path = "../remote-config", features = ["test"] } datadog-trace-utils = { path = "../trace-utils", features = ["test-utils"] } - diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index 60267bfe0..fcb1b3230 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -17,6 +17,7 @@ use std::io; use std::sync::{Arc, Mutex}; use std::time::Duration; use priority_queue::PriorityQueue; +use sha2::{Digest, Sha224}; use tokio::time::Instant; use tracing::{debug, error, trace, warn}; use zwohash::ZwoHasher; @@ -30,7 +31,8 @@ fn path_for_remote_config(id: &ConfigInvariants, target: &Arc) -> CStrin let mut hasher = ZwoHasher::default(); id.hash(&mut hasher); target.hash(&mut hasher); - CString::new(format!("/libdatadog-remote-config-{}-{}", primary_sidecar_identifier(), hasher.finish())).unwrap() + // datadog remote config, on macos we're restricted to 31 chars + CString::new(format!("/ddrc{}-{}", primary_sidecar_identifier(), hasher.finish())).unwrap() } impl RemoteConfigReader { @@ -72,7 +74,7 @@ struct ConfigFileStorage { invariants: ConfigInvariants, /// All writers writers: Arc, RemoteConfigWriter>>>, - on_dead: Arc>, + on_dead: Arc>>>, } struct StoredShmFile { @@ -102,13 +104,19 @@ impl FileStorage for ConfigFileStorage { } } -fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> io::Result { +fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> anyhow::Result { let name = format!( - "/libdatadog-remote-config-file-{}-{}-{}", + "ddrc{}-{}", primary_sidecar_identifier(), version, - BASE64_URL_SAFE_NO_PAD.encode(path.to_string()) ); + // as much signal as possible to be collision free + let hashed_path = BASE64_URL_SAFE_NO_PAD.encode(Sha224::digest(&path.to_string())); + #[cfg(target_os = "macos")] + let sliced_path = &hashed_path[..30 - name.len()]; + #[cfg(not(target_os = "macos"))] + let sliced_path = &hashed_path; + let name = format!("/{}-{}", name, sliced_path); let mut handle = NamedShmHandle::create(CString::new(name)?, file.len())? .map()?; @@ -137,6 +145,8 @@ impl MultiTargetHandlers for ConfigFileStorage { let mut serialized = Vec::with_capacity(len); for file in files.iter() { serialized.extend_from_slice(file.handle.lock().unwrap().get_path()); + serialized.push(b':'); + serialized.extend_from_slice(BASE64_URL_SAFE_NO_PAD.encode(file.refcount.path.to_string()).as_bytes()); serialized.push(b'\n'); } @@ -159,7 +169,7 @@ impl MultiTargetHandlers for ConfigFileStorage { } fn dead(&self) { - (self.on_dead)(); + (self.on_dead.lock().unwrap().take().expect("The MultiTargetHandler must not be used anymore once on_dead is called"))(); } } @@ -185,11 +195,11 @@ pub struct ShmRemoteConfigs(Arc ShmRemoteConfigs { - pub fn new(invariants: ConfigInvariants, on_dead: Box) -> Self { + pub fn new(invariants: ConfigInvariants, on_dead: Box) -> Self { let storage = ConfigFileStorage { invariants: invariants.clone(), writers: Default::default(), - on_dead: Arc::new(on_dead), + on_dead: Arc::new(Mutex::new(Some(on_dead))), }; ShmRemoteConfigs(MultiTargetFetcher::new(storage, invariants)) } @@ -225,13 +235,13 @@ impl ShmRemoteConfigs { } fn read_config(path: &str) -> anyhow::Result { - let mapped = NamedShmHandle::open(&CString::new(path)?)?.map()?; - if let Some(rc_path) = path.split('-').nth(6) { + if let [shm_path, rc_path] = &path.split(':').collect::>()[..] { + let mapped = NamedShmHandle::open(&CString::new(*shm_path)?)?.map()?; let rc_path = String::from_utf8(BASE64_URL_SAFE_NO_PAD.decode(rc_path)?)?; RemoteConfigValue::try_parse(&rc_path, mapped.as_slice()) } else { - anyhow::bail!("could not read config; {} has less than six dashes", path); + anyhow::bail!("could not read config; {} does not have exactly one colon", path); } } @@ -252,6 +262,7 @@ pub struct RemoteConfigManager { check_configs: Vec, } +#[derive(Debug)] pub enum RemoteConfigUpdate { None, Add(RemoteConfigValue), @@ -387,3 +398,166 @@ impl RemoteConfigManager { self.active_configs.clear(); } } + +#[cfg(test)] +mod tests { + use lazy_static::lazy_static; + use manual_future::ManualFuture; + use datadog_remote_config::dynamic_configuration::data::{Configs, tests::dummy_dynamic_config}; + use super::*; + use datadog_remote_config::fetch::test_server::RemoteConfigServer; + use datadog_remote_config::{RemoteConfigData, RemoteConfigProduct, RemoteConfigSource}; + + lazy_static! { + static ref PATH_FIRST: RemoteConfigPath = RemoteConfigPath { + source: RemoteConfigSource::Employee, + product: RemoteConfigProduct::ApmTracing, + config_id: "1234".to_string(), + name: "config".to_string(), + }; + + static ref PATH_SECOND: RemoteConfigPath = RemoteConfigPath { + source: RemoteConfigSource::Employee, + product: RemoteConfigProduct::ApmTracing, + config_id: "9876".to_string(), + name: "config".to_string(), + }; + + static ref DUMMY_TARGET: Arc = Arc::new(Target { + service: "service".to_string(), + env: "env".to_string(), + app_version: "1.3.5".to_string(), + }); + } + + #[derive(Debug, Clone)] + struct NotifyDummy(Arc>); + + impl Hash for NotifyDummy { + fn hash(&self, _state: &mut H) {} + } + + impl Eq for NotifyDummy {} + + impl PartialEq for NotifyDummy { + fn eq(&self, _other: &Self) -> bool { true } + } + + impl NotifyTarget for NotifyDummy { + fn notify(&self) { + let channel = self.0.clone(); + tokio::spawn(async move { channel.send(()).await.unwrap(); }); + } + } + + #[tokio::test] + async fn test_shm_updates() { + let server = RemoteConfigServer::spawn(); + + let (on_dead, on_dead_completer) = ManualFuture::new(); + let shm = ShmRemoteConfigs::new(server.dummy_invariants(), Box::new(|| { tokio::spawn(on_dead_completer.complete(())); })); + + let mut manager = RemoteConfigManager::new(server.dummy_invariants()); + + server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, serde_json::to_string(&dummy_dynamic_config(true)).unwrap())); + + // Nothing yet. (No target) + assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); + + manager.track_target(&DUMMY_TARGET); + // remote end has not fetched anything yet + assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); + + let (sender, mut receiver) = tokio::sync::mpsc::channel(1); + + let shm_guard = shm.add_runtime("3b43524b-a70c-45dc-921d-34504e50c5eb".to_string(), NotifyDummy(Arc::new(sender)), DUMMY_TARGET.env.to_string(), DUMMY_TARGET.service.to_string(), DUMMY_TARGET.app_version.to_string()); + + receiver.recv().await; + + if let RemoteConfigUpdate::Add(update) = manager.fetch_update() { + assert_eq!(update.config_id, PATH_FIRST.config_id); + assert_eq!(update.source, PATH_FIRST.source); + assert_eq!(update.name, PATH_FIRST.name); + if let RemoteConfigData::DynamicConfig(data) = update.data { + assert!(matches!(>::from(data.lib_config)[0], Configs::TracingEnabled(true))); + } else { + unreachable!(); + } + } else { + unreachable!(); + } + + // just one update + assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); + + { + let mut files = server.files.lock().unwrap(); + files.insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, serde_json::to_string(&dummy_dynamic_config(false)).unwrap())); + files.insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, serde_json::to_string(&dummy_dynamic_config(true)).unwrap())); + } + + receiver.recv().await; + + // files must be first removed; avoids (in practice) two concurring settings to overlap + let x = manager.fetch_update(); + if let RemoteConfigUpdate::Remove(update) = x { + assert_eq!(&update, &*PATH_FIRST); + } else { + unreachable!(); + } + + // then the adds + let was_second = if let RemoteConfigUpdate::Add(update) = manager.fetch_update() { + update.config_id == PATH_SECOND.config_id + } else { + unreachable!(); + }; + if let RemoteConfigUpdate::Add(update) = manager.fetch_update() { + assert_eq!(&update.config_id, if was_second { &PATH_FIRST.config_id } else { &PATH_SECOND.config_id }); + } else { + unreachable!(); + }; + + // And done + assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); + + // Reset will keep old targets for a while in memory + manager.reset_target(); + + // and start to remove + let was_second = if let RemoteConfigUpdate::Remove(update) = manager.fetch_update() { + &update == &*PATH_SECOND + } else { + unreachable!(); + }; + + manager.track_target(&DUMMY_TARGET); + // If we re-track it's added again immediately + if let RemoteConfigUpdate::Add(update) = manager.fetch_update() { + assert_eq!(&update.config_id, if was_second { &PATH_SECOND.config_id } else { &PATH_FIRST.config_id }); + } else { + unreachable!(); + }; + + assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); + + drop(shm_guard); + shm.shutdown(); + + on_dead.await; + + // After proper shutdown it must be like all configs were removed + let was_second = if let RemoteConfigUpdate::Remove(update) = manager.fetch_update() { + &update == &*PATH_SECOND + } else { + unreachable!(); + }; + if let RemoteConfigUpdate::Remove(update) = manager.fetch_update() { + assert_eq!(&update, if was_second { &*PATH_FIRST } else { &*PATH_SECOND }); + } else { + unreachable!(); + }; + + assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); + } +} diff --git a/trace-protobuf/build.rs b/trace-protobuf/build.rs index 6eb9df712..6a427ac16 100644 --- a/trace-protobuf/build.rs +++ b/trace-protobuf/build.rs @@ -119,8 +119,8 @@ fn generate_protobuf() { "#[serde(rename = \"DBType\")]", ); - config.type_attribute("ClientGetConfigsResponse", "#[derive(Deserialize)]"); - config.type_attribute("File", "#[derive(Deserialize)]"); + config.type_attribute("ClientGetConfigsResponse", "#[derive(Deserialize, Serialize)]"); + config.type_attribute("File", "#[derive(Deserialize, Serialize)]"); config.type_attribute( "ClientGetConfigsRequest", "#[derive(Deserialize, Serialize)]", diff --git a/trace-protobuf/src/remoteconfig.rs b/trace-protobuf/src/remoteconfig.rs index ba9035010..43ad94452 100644 --- a/trace-protobuf/src/remoteconfig.rs +++ b/trace-protobuf/src/remoteconfig.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; -#[derive(Deserialize)] +#[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct File { @@ -133,7 +133,7 @@ pub struct ClientGetConfigsRequest { #[prost(message, repeated, tag = "2")] pub cached_target_files: ::prost::alloc::vec::Vec, } -#[derive(Deserialize)] +#[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ClientGetConfigsResponse { diff --git a/trace-protobuf/src/serde.rs b/trace-protobuf/src/serde.rs index d38dfa8d8..2920eb75e 100644 --- a/trace-protobuf/src/serde.rs +++ b/trace-protobuf/src/serde.rs @@ -1,4 +1,4 @@ -use serde::Deserializer; +use serde::{Deserializer, Serializer}; use serde_bytes::ByteBuf; pub trait Deserialize<'de>: Sized { @@ -32,3 +32,23 @@ pub fn deserialize<'de, T, D>(deserializer: D) -> Result { Deserialize::deserialize(deserializer) } + +pub trait Serialize: Sized { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer; +} + +impl Serialize for &Vec> { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.collect_seq(self.iter()) + } +} + +pub fn serialize(value: T, serializer: S) -> Result + where + T: Serialize, + S: Serializer, +{ + value.serialize(serializer) +} From 1c2671f009950bc1180f5793986278803d74879d Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Sat, 25 May 2024 02:01:56 +0200 Subject: [PATCH 04/26] Windows support Signed-off-by: Bob Weinand --- sidecar-ffi/src/lib.rs | 8 ++++- sidecar/src/service/blocking.rs | 11 +++++- sidecar/src/service/remote_configs.rs | 45 +++++++++++++++++++++--- sidecar/src/service/session_info.rs | 4 +++ sidecar/src/service/sidecar_interface.rs | 10 +++++- sidecar/src/service/sidecar_server.rs | 34 ++++++++++++++++-- sidecar/src/shm_remote_config.rs | 19 ++++++---- 7 files changed, 116 insertions(+), 15 deletions(-) diff --git a/sidecar-ffi/src/lib.rs b/sidecar-ffi/src/lib.rs index 5728ea3b9..eca288c50 100644 --- a/sidecar-ffi/src/lib.rs +++ b/sidecar-ffi/src/lib.rs @@ -483,14 +483,20 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( force_drop_size: usize, log_level: ffi::CharSlice, log_path: ffi::CharSlice, + #[allow(unused)] // On FFI layer we cannot conditionally compile, so we need the arg + remote_config_notify_function: *mut c_void, remote_config_products: *const RemoteConfigProduct, remote_config_products_count: usize, remote_config_capabilities: *const RemoteConfigCapabilities, remote_config_capabilities_count: usize, ) -> MaybeError { + #[cfg(unix)] + let remote_config_notify_target = libc::getpid(); + #[cfg(windows)] + let remote_config_notify_target = remote_config_notify_function; try_c!(blocking::set_session_config( transport, - libc::getpid(), + remote_config_notify_target, session_id.to_utf8_lossy().into(), &SessionConfig { endpoint: agent_endpoint.clone(), diff --git a/sidecar/src/service/blocking.rs b/sidecar/src/service/blocking.rs index 9ece9036d..48f217038 100644 --- a/sidecar/src/service/blocking.rs +++ b/sidecar/src/service/blocking.rs @@ -193,6 +193,8 @@ pub fn register_service_and_flush_queued_actions( /// # Arguments /// /// * `transport` - The transport used for communication. +/// * `remote_config_notify_function` (windows): a function pointer to be invoked +/// * `pid` (unix): the pid of the remote process /// * `session_id` - The ID of the session. /// * `config` - The configuration to be set. /// @@ -201,13 +203,20 @@ pub fn register_service_and_flush_queued_actions( /// An `io::Result<()>` indicating the result of the operation. pub fn set_session_config( transport: &mut SidecarTransport, + #[cfg(unix)] pid: libc::pid_t, + #[cfg(windows)] + remote_config_notify_function: *mut libc::c_void, session_id: String, config: &SessionConfig, ) -> io::Result<()> { + #[cfg(unix)] + let remote_config_notify_target = pid; + #[cfg(windows)] + let remote_config_notify_target = crate::service::remote_configs::RemoteConfigNotifyFunction(remote_config_notify_function); transport.send(SidecarInterfaceRequest::SetSessionConfig { session_id, - pid, + remote_config_notify_target, config: config.clone(), }) } diff --git a/sidecar/src/service/remote_configs.rs b/sidecar/src/service/remote_configs.rs index df5f73e0f..00bfa8394 100644 --- a/sidecar/src/service/remote_configs.rs +++ b/sidecar/src/service/remote_configs.rs @@ -1,20 +1,53 @@ use std::collections::hash_map::Entry; -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; use std::sync::{Arc, Mutex}; use zwohash::HashMap; use datadog_remote_config::fetch::{ConfigInvariants, NotifyTarget}; use crate::shm_remote_config::{ShmRemoteConfigs, ShmRemoteConfigsGuard}; -#[derive(Default, Clone, Hash, Eq, PartialEq)] +#[cfg(windows)] +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub struct RemoteConfigNotifyFunction(pub *mut libc::c_void); +#[cfg(windows)] +unsafe impl Send for RemoteConfigNotifyFunction {} +#[cfg(windows)] +unsafe impl Sync for RemoteConfigNotifyFunction {} +#[cfg(windows)] +impl Default for RemoteConfigNotifyFunction { + fn default() -> Self { + return RemoteConfigNotifyFunction(std::ptr::null_mut()) + } +} + +#[cfg(windows)] +impl serde::Serialize for RemoteConfigNotifyFunction { + fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + serializer.serialize_u64(self.0 as u64) + } +} + +#[cfg(windows)] +impl<'de> serde::Deserialize<'de> for RemoteConfigNotifyFunction { + fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de> { + >::deserialize(deserializer).map(|p| RemoteConfigNotifyFunction(p as *mut libc::c_void)) + } +} + +#[derive(Clone, Hash, Eq, PartialEq)] +#[cfg_attr(windows, derive(Debug))] pub struct RemoteConfigNotifyTarget { + #[cfg(unix)] pub pid: libc::pid_t, #[cfg(windows)] + pub process_handle: crate::service::sidecar_server::ProcessHandle, + #[cfg(windows)] // contains address in that process address space of the notification function - pub notify_function: libc::c_void, + pub notify_function: RemoteConfigNotifyFunction, } +#[cfg(unix)] impl Debug for RemoteConfigNotifyTarget { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.pid.fmt(f) } } @@ -28,6 +61,10 @@ impl NotifyTarget for RemoteConfigNotifyTarget { #[cfg(windows)] fn notify(&self) { // TODO: CreateRemoteThread -> ddtrace_set_all_thread_vm_interrupt + unsafe { + let dummy = 0; + kernel32::CreateRemoteThread(self.process_handle.0, std::ptr::null_mut(), 0, Some(std::mem::transmute(self.notify_function.0)), &dummy as *const i32 as winapi::LPVOID, 0, std::ptr::null_mut()); + } } } diff --git a/sidecar/src/service/session_info.rs b/sidecar/src/service/session_info.rs index 22591c730..d654da116 100644 --- a/sidecar/src/service/session_info.rs +++ b/sidecar/src/service/session_info.rs @@ -28,6 +28,8 @@ pub(crate) struct SessionInfo { tracer_config: Arc>, dogstatsd: Arc>, remote_config_invariants: Arc>>, + #[cfg(windows)] + pub(crate) remote_config_notify_function: Arc>, pub(crate) log_guard: Arc, MultiWriterGuard<'static>)>>>, #[cfg(feature = "tracing")] @@ -44,6 +46,8 @@ impl Clone for SessionInfo { tracer_config: self.tracer_config.clone(), dogstatsd: self.dogstatsd.clone(), remote_config_invariants: self.remote_config_invariants.clone(), + #[cfg(windows)] + remote_config_notify_function: self.remote_config_notify_function.clone(), log_guard: self.log_guard.clone(), session_id: self.session_id.clone(), pid: self.pid.clone(), diff --git a/sidecar/src/service/sidecar_interface.rs b/sidecar/src/service/sidecar_interface.rs index 857b47268..22e63471b 100644 --- a/sidecar/src/service/sidecar_interface.rs +++ b/sidecar/src/service/sidecar_interface.rs @@ -10,6 +10,14 @@ use anyhow::Result; use datadog_ipc::platform::ShmHandle; use datadog_ipc::tarpc; +// This is a bit weird, but depending on the OS we're interested in different things... +// and the macro expansion is not going to be happy with #[cfg()] instructions inside them. +// So we'll just define a type, a pid on unix, a function pointer on windows. +#[cfg(unix)] +type RemoteConfigNotifyTarget = libc::pid_t; +#[cfg(windows)] +type RemoteConfigNotifyTarget = crate::service::remote_configs::RemoteConfigNotifyFunction; + /// The `SidecarInterface` trait defines the necessary methods for the sidecar service. /// /// These methods include operations such as enqueueing actions, registering services, setting @@ -57,7 +65,7 @@ pub trait SidecarInterface { /// * `config` - The configuration to be set. async fn set_session_config( session_id: String, - pid: libc::pid_t, + remote_config_notify_target: RemoteConfigNotifyTarget, config: SessionConfig ); diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index 9e30daa3e..8428a9a43 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -70,6 +70,15 @@ struct SidecarStats { log_filter: TemporarilyRetainedMapStats, } +#[cfg(windows)] +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub struct ProcessHandle(pub winapi::HANDLE); + +#[cfg(windows)] +unsafe impl Send for ProcessHandle {} +#[cfg(windows)] +unsafe impl Sync for ProcessHandle {} + /// The `SidecarServer` struct represents a server that handles sidecar operations. /// /// It maintains a list of active sessions and a counter for each session. @@ -90,6 +99,9 @@ pub struct SidecarServer { pub submitted_payloads: Arc, /// All remote config handling remote_configs: RemoteConfigs, + /// The ProcessHandle tied to the connection + #[cfg(windows)] + process_handle: Option, } impl SidecarServer { @@ -102,7 +114,9 @@ impl SidecarServer { /// # Arguments /// /// * `async_channel`: An `AsyncChannel` that represents the connection to the client. - pub async fn accept_connection(self, async_channel: AsyncChannel) { + pub async fn accept_connection(mut self, async_channel: AsyncChannel) { + #[cfg(windows)] + { self.process_handle = async_channel.metadata.lock().unwrap().process_handle().map(|p| ProcessHandle(p as winapi::HANDLE)); } let server = tarpc::server::BaseChannel::new( tarpc::server::Config { pending_response_buffer: 10000, @@ -598,11 +612,17 @@ impl SidecarInterface for SidecarServer { self, _: Context, session_id: String, + #[cfg(unix)] pid: libc::pid_t, + #[cfg(windows)] + remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: SessionConfig, ) -> Self::SetSessionConfigFut { let session = self.get_session(&session_id); - session.pid.store(pid, Ordering::Relaxed); + #[cfg(unix)] + { session.pid.store(pid, Ordering::Relaxed); } + #[cfg(windows)] + { *session.remote_config_notify_function.lock().unwrap() = remote_config_notify_function; } session.modify_telemetry_config(|cfg| { let endpoint = get_product_endpoint(ddtelemetry::config::PROD_INTAKE_SUBDOMAIN, &config.endpoint); @@ -780,6 +800,16 @@ impl SidecarInterface for SidecarServer { app_version: String, ) -> Self::SetRemoteConfigDataFut { let session = self.get_session(&instance_id.session_id); + #[cfg(windows)] + let notify_target = if let Some(handle) = self.process_handle { + RemoteConfigNotifyTarget { + process_handle: handle, + notify_function: *session.remote_config_notify_function.lock().unwrap(), + } + } else { + return no_response(); + }; + #[cfg(unix)] let notify_target = RemoteConfigNotifyTarget { pid: session.pid.load(Ordering::Relaxed) }; session.get_runtime(&instance_id.runtime_id).lock_remote_config_guards().insert(queue_id, self.remote_configs .add_runtime(session.get_remote_config_invariants().as_ref().expect("Expecting remote config invariants to be set early").clone(), instance_id.runtime_id, notify_target, env_name, service_name, app_version)); diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index fcb1b3230..fe1d541ee 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -14,6 +14,7 @@ use std::default::Default; use std::ffi::CString; use std::hash::{Hash, Hasher}; use std::io; +use std::io::Write; use std::sync::{Arc, Mutex}; use std::time::Duration; use priority_queue::PriorityQueue; @@ -117,11 +118,15 @@ fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> anyhow::Re #[cfg(not(target_os = "macos"))] let sliced_path = &hashed_path; let name = format!("/{}-{}", name, sliced_path); - let mut handle = - NamedShmHandle::create(CString::new(name)?, file.len())? - .map()?; + let len = file.len(); + #[cfg(windows)] + let len = len + 4; + let mut handle = NamedShmHandle::create(CString::new(name)?, len)?.map()?; - handle.as_slice_mut().copy_from_slice(file.as_slice()); + let mut target_slice = handle.as_slice_mut(); + #[cfg(windows)] + { target_slice.write(&(file.len() as u32).to_ne_bytes())?; } + target_slice.copy_from_slice(file.as_slice()); Ok(handle.into()) } @@ -235,11 +240,13 @@ impl ShmRemoteConfigs { } fn read_config(path: &str) -> anyhow::Result { - if let [shm_path, rc_path] = &path.split(':').collect::>()[..] { let mapped = NamedShmHandle::open(&CString::new(*shm_path)?)?.map()?; let rc_path = String::from_utf8(BASE64_URL_SAFE_NO_PAD.decode(rc_path)?)?; - RemoteConfigValue::try_parse(&rc_path, mapped.as_slice()) + let data = mapped.as_slice(); + #[cfg(windows)] + let data = &data[4..(4 + u32::from_ne_bytes((&data[0..4]).try_into()?) as usize)]; + RemoteConfigValue::try_parse(&rc_path, data) } else { anyhow::bail!("could not read config; {} does not have exactly one colon", path); } From 73aaa289793abf222b7316eea1afa7a8b1a39e30 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 12 Jun 2024 17:58:53 +0200 Subject: [PATCH 05/26] Minor live-debugger stuff Signed-off-by: Bob Weinand --- live-debugger-ffi/src/evaluator.rs | 58 +++++++++++++++------------- live-debugger/src/expr_eval.rs | 62 ++++++++++++++++-------------- 2 files changed, 64 insertions(+), 56 deletions(-) diff --git a/live-debugger-ffi/src/evaluator.rs b/live-debugger-ffi/src/evaluator.rs index e89f301c8..bd48b66ff 100644 --- a/live-debugger-ffi/src/evaluator.rs +++ b/live-debugger-ffi/src/evaluator.rs @@ -1,9 +1,11 @@ // Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +use std::borrow::Cow; use datadog_live_debugger::{DslString, ProbeCondition}; use ddcommon_ffi::CharSlice; use std::ffi::c_void; +use std::mem::transmute; #[repr(C)] pub enum IntermediateValue<'a> { @@ -14,11 +16,11 @@ pub enum IntermediateValue<'a> { Referenced(&'a c_void), } -impl<'a> From<&'a datadog_live_debugger::IntermediateValue<&c_void>> for IntermediateValue<'a> { - fn from(value: &'a datadog_live_debugger::IntermediateValue<&c_void>) -> Self { +impl<'a> From<&'a datadog_live_debugger::IntermediateValue<'a, c_void>> for IntermediateValue<'a> { + fn from(value: &'a datadog_live_debugger::IntermediateValue<'a, c_void>) -> Self { match value { datadog_live_debugger::IntermediateValue::String(s) => { - IntermediateValue::String(s.as_str().into()) + IntermediateValue::String(s.as_ref().into()) } datadog_live_debugger::IntermediateValue::Number(n) => IntermediateValue::Number(*n), datadog_live_debugger::IntermediateValue::Bool(b) => IntermediateValue::Bool(*b), @@ -41,55 +43,56 @@ pub struct VoidCollection { #[derive(Clone)] pub struct Evaluator { pub equals: - for<'a> extern "C" fn(&'a c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, + for<'a> extern "C" fn(&'a mut c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, pub greater_than: - for<'a> extern "C" fn(&'a c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, + for<'a> extern "C" fn(&'a mut c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, pub greater_or_equals: - for<'a> extern "C" fn(&'a c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, + for<'a> extern "C" fn(&'a mut c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, pub fetch_identifier: - for<'a, 'b> extern "C" fn(&'a c_void, &CharSlice<'b>) -> Option<&'a c_void>, // special values: @duration, @return, @exception + for<'a, 'b> extern "C" fn(&'a mut c_void, &CharSlice<'b>) -> Option<&'a c_void>, // special values: @duration, @return, @exception pub fetch_index: for<'a, 'b> extern "C" fn( - &'a c_void, + &'a mut c_void, &'a c_void, IntermediateValue<'b>, ) -> Option<&'a c_void>, pub fetch_nested: for<'a, 'b> extern "C" fn( - &'a c_void, + &'a mut c_void, &'a c_void, IntermediateValue<'b>, ) -> Option<&'a c_void>, - pub length: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> u64, - pub try_enumerate: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> VoidCollection, - pub stringify: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> VoidCollection, - pub convert_index: for<'a> extern "C" fn(&'a c_void, &'a c_void) -> isize, // return < 0 on error + pub length: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> u64, + pub try_enumerate: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> VoidCollection, + pub stringify: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> VoidCollection, + pub convert_index: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> isize, // return < 0 on error } static mut FFI_EVALUATOR: Option = None; +#[allow(mutable_transmutes)] // SAFETY: It's the &mut c_void context we receive from input functions static EVALUATOR: datadog_live_debugger::Evaluator = datadog_live_debugger::Evaluator { equals: |context, a, b| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().equals)(context, (&a).into(), (&b).into()) + (FFI_EVALUATOR.as_ref().unwrap().equals)(transmute(context), (&a).into(), (&b).into()) }, greater_than: |context, a, b| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().greater_than)(context, (&a).into(), (&b).into()) + (FFI_EVALUATOR.as_ref().unwrap().greater_than)(transmute(context), (&a).into(), (&b).into()) }, greater_or_equals: |context, a, b| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().greater_or_equals)(context, (&a).into(), (&b).into()) + (FFI_EVALUATOR.as_ref().unwrap().greater_or_equals)(transmute(context), (&a).into(), (&b).into()) }, fetch_identifier: |context, name| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().fetch_identifier)(context, &CharSlice::from(name)) + (FFI_EVALUATOR.as_ref().unwrap().fetch_identifier)(transmute(context), &CharSlice::from(name)) }, fetch_index: |context, base, index| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().fetch_index)(context, base, (&index).into()) + (FFI_EVALUATOR.as_ref().unwrap().fetch_index)(transmute(context), base, (&index).into()) }, fetch_nested: |context, base, member| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().fetch_nested)(context, base, (&member).into()) + (FFI_EVALUATOR.as_ref().unwrap().fetch_nested)(transmute(context), base, (&member).into()) }, length: |context, value| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().length)(context, value) + (FFI_EVALUATOR.as_ref().unwrap().length)(transmute(context), value) }, try_enumerate: |context, value| unsafe { - let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(context, value); + let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(transmute(context), value); if collection.count < 0 { None } else { @@ -104,7 +107,7 @@ static EVALUATOR: datadog_live_debugger::Evaluator = } }, stringify: |context, value| unsafe { - let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(context, value); + let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(transmute(context), value); if collection.count < 0 { unreachable!() } @@ -118,10 +121,10 @@ static EVALUATOR: datadog_live_debugger::Evaluator = let copy = string.clone(); std::mem::forget(string); (collection.free)(collection); - copy + Cow::Owned(copy) }, convert_index: |context, value| unsafe { - let index = (FFI_EVALUATOR.as_ref().unwrap().convert_index)(context, value); + let index = (FFI_EVALUATOR.as_ref().unwrap().convert_index)(transmute(context), value); if index < 0 { None } else { @@ -137,15 +140,16 @@ pub unsafe extern "C" fn register_expr_evaluator(eval: &Evaluator) { } #[no_mangle] -pub extern "C" fn evaluate_condition(condition: &ProbeCondition, context: &c_void) -> bool { +pub extern "C" fn evaluate_condition(condition: &ProbeCondition, context: &mut c_void) -> bool { datadog_live_debugger::eval_condition(&EVALUATOR, condition, context) } -pub fn evaluate_string(condition: &DslString, context: &c_void) -> String { +pub fn evaluate_string(condition: &DslString, context: &mut c_void) -> String { datadog_live_debugger::eval_string(&EVALUATOR, condition, context) } // This is unsafe, but we want to use it as function pointer... +#[no_mangle] extern "C" fn drop_void_collection_string(void: VoidCollection) { unsafe { String::from_raw_parts( @@ -159,7 +163,7 @@ extern "C" fn drop_void_collection_string(void: VoidCollection) { #[no_mangle] pub extern "C" fn evaluate_unmanaged_string( condition: &DslString, - context: &c_void, + context: &mut c_void, ) -> VoidCollection { let string = evaluate_string(condition, context); let new = VoidCollection { diff --git a/live-debugger/src/expr_eval.rs b/live-debugger/src/expr_eval.rs index c91830ee9..8a92160e0 100644 --- a/live-debugger/src/expr_eval.rs +++ b/live-debugger/src/expr_eval.rs @@ -1,6 +1,7 @@ // Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +use std::borrow::Cow; use crate::expr_defs::{ BinaryComparison, CollectionMatch, CollectionSource, Condition, DslPart, NumberSource, Reference, StringComparison, StringSource, Value, @@ -17,25 +18,25 @@ pub struct ProbeValue(pub(crate) Value); #[derive(Debug)] pub struct ProbeCondition(pub(crate) Condition); -pub enum IntermediateValue { - String(String), +pub enum IntermediateValue<'a, I> { + String(Cow<'a, str>), Number(f64), Bool(bool), Null, - Referenced(I), + Referenced(&'a I), } pub struct Evaluator { - pub equals: for<'a> fn(&'a C, IntermediateValue<&'a I>, IntermediateValue<&'a I>) -> bool, - pub greater_than: for<'a> fn(&'a C, IntermediateValue<&'a I>, IntermediateValue<&'a I>) -> bool, + pub equals: for<'a> fn(&'a C, IntermediateValue<'a, I>, IntermediateValue<'a, I>) -> bool, + pub greater_than: for<'a> fn(&'a C, IntermediateValue<'a, I>, IntermediateValue<'a, I>) -> bool, pub greater_or_equals: - for<'a> fn(&'a C, IntermediateValue<&'a I>, IntermediateValue<&'a I>) -> bool, + for<'a> fn(&'a C, IntermediateValue<'a, I>, IntermediateValue<'a, I>) -> bool, pub fetch_identifier: for<'a> fn(&'a C, &str) -> Option<&'a I>, // special values: @duration, @return, @exception - pub fetch_index: for<'a> fn(&'a C, &'a I, IntermediateValue<&'a I>) -> Option<&'a I>, - pub fetch_nested: for<'a> fn(&'a C, &'a I, IntermediateValue<&'a I>) -> Option<&'a I>, + pub fetch_index: for<'a> fn(&'a C, &'a I, IntermediateValue<'a, I>) -> Option<&'a I>, + pub fetch_nested: for<'a> fn(&'a C, &'a I, IntermediateValue<'a, I>) -> Option<&'a I>, pub length: for<'a> fn(&'a C, &'a I) -> u64, pub try_enumerate: for<'a> fn(&'a C, &'a I) -> Option>, - pub stringify: for<'a> fn(&'a C, &'a I) -> String, + pub stringify: for<'a> fn(&'a C, &'a I) -> Cow<'a, str>, pub convert_index: for<'a> fn(&'a C, &'a I) -> Option, } @@ -48,7 +49,7 @@ struct Eval<'a, I, C> { } impl<'a, I, C> Eval<'a, I, C> { - fn value(&mut self, value: &'a Value) -> EvalResult> { + fn value(&mut self, value: &'a Value) -> EvalResult> { Ok(match value { Value::Bool(condition) => IntermediateValue::Bool(self.condition(condition)?), Value::String(s) => self.string_source(s)?, @@ -56,7 +57,7 @@ impl<'a, I, C> Eval<'a, I, C> { }) } - fn number_source(&mut self, value: &'a NumberSource) -> EvalResult> { + fn number_source(&mut self, value: &'a NumberSource) -> EvalResult> { Ok(match value { NumberSource::Number(n) => IntermediateValue::Number(*n), NumberSource::CollectionSize(collection) => { @@ -81,9 +82,9 @@ impl<'a, I, C> Eval<'a, I, C> { }) } - fn convert_index(&mut self, value: IntermediateValue<&'a I>) -> EvalResult { + fn convert_index(&mut self, value: IntermediateValue<'a, I>) -> EvalResult { Ok(match value { - IntermediateValue::String(s) => return usize::from_str(s.as_str()).map_err(|_| ()), + IntermediateValue::String(s) => return usize::from_str(&s).map_err(|_| ()), IntermediateValue::Number(n) => n as usize, IntermediateValue::Bool(_) => return Err(()), IntermediateValue::Null => 0, @@ -98,9 +99,9 @@ impl<'a, I, C> Eval<'a, I, C> { self.convert_index(value) } - fn string_source(&mut self, value: &'a StringSource) -> EvalResult> { + fn string_source(&mut self, value: &'a StringSource) -> EvalResult> { Ok(match value { - StringSource::String(s) => IntermediateValue::String(s.to_string()), + StringSource::String(s) => IntermediateValue::String(Cow::Borrowed(s.as_str())), StringSource::Substring(boxed) => { let (string, start, end) = &**boxed; let str = self.stringify(string)?; @@ -110,7 +111,10 @@ impl<'a, I, C> Eval<'a, I, C> { return Err(()); } end = min(end, str.len()); - IntermediateValue::String(str[start..end].to_string()) + IntermediateValue::String(match str { + Cow::Owned(s) => Cow::Owned(s[start..end].to_string()), + Cow::Borrowed(s) => Cow::Borrowed(&s[start..end]) + }) } StringSource::Null => IntermediateValue::Null, StringSource::Reference(reference) => { @@ -188,19 +192,19 @@ impl<'a, I, C> Eval<'a, I, C> { }) } - fn stringify_intermediate(&mut self, value: IntermediateValue<&'a I>) -> String { + fn stringify_intermediate(&mut self, value: IntermediateValue<'a, I>) -> Cow<'a, str> { match value { - IntermediateValue::String(s) => s.to_string(), - IntermediateValue::Number(n) => n.to_string(), - IntermediateValue::Bool(b) => b.to_string(), - IntermediateValue::Null => "".to_string(), + IntermediateValue::String(s) => s, + IntermediateValue::Number(n) => Cow::Owned(n.to_string()), + IntermediateValue::Bool(b) => Cow::Owned(b.to_string()), + IntermediateValue::Null => Cow::Borrowed(""), IntermediateValue::Referenced(referenced) => { (self.eval.stringify)(self.context, referenced) } } } - fn stringify(&mut self, value: &'a StringSource) -> EvalResult { + fn stringify(&mut self, value: &'a StringSource) -> EvalResult> { let value = self.string_source(value)?; Ok(self.stringify_intermediate(value)) } @@ -218,7 +222,7 @@ impl<'a, I, C> Eval<'a, I, C> { StringComparison::Matches => { return Regex::new(needle.as_str()) .map_err(|_| ()) - .map(|r| r.is_match(haystack.as_str())) + .map(|r| r.is_match(&haystack)) } } } @@ -317,7 +321,7 @@ where dsl.0 .iter() .map(|p| match p { - DslPart::String(ref str) => str.to_string(), + DslPart::String(ref str) => Cow::Borrowed(str.as_str()), DslPart::Ref(ref reference) => { let mut eval = Eval { eval, @@ -336,20 +340,20 @@ where .ok() .unwrap_or_default() .map(|vec| { - format!( + Cow::Owned(format!( "[{}]", vec.iter() .map(|referenced| eval.stringify_intermediate( IntermediateValue::Referenced(referenced) )) - .collect::>() + .collect::>>() .join(", ") - ) + )) }), } - .unwrap_or("UNDEFINED".to_string()) + .unwrap_or(Cow::Borrowed("UNDEFINED")) } }) - .collect::>() + .collect::>>() .join("") } From 65d8e1d2db49016dc133997026017d18bed8ebef Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 12 Jun 2024 18:23:43 +0200 Subject: [PATCH 06/26] Remove live debugger stuff Signed-off-by: Bob Weinand --- Cargo.lock | 254 ++++++++-------- Cargo.toml | 2 - live-debugger-ffi/Cargo.toml | 14 - live-debugger-ffi/cbindgen.toml | 35 --- live-debugger-ffi/src/data.rs | 280 ------------------ live-debugger-ffi/src/evaluator.rs | 176 ----------- live-debugger-ffi/src/lib.rs | 7 - live-debugger-ffi/src/query.rs | 39 --- live-debugger-ffi/src/sender.rs | 117 -------- live-debugger/Cargo.toml | 15 - live-debugger/src/debugger_defs.rs | 78 ----- live-debugger/src/expr_defs.rs | 82 ------ live-debugger/src/expr_eval.rs | 359 ----------------------- live-debugger/src/lib.rs | 16 - live-debugger/src/parse_json.rs | 216 -------------- live-debugger/src/parse_json_expr.rs | 211 ------------- live-debugger/src/parse_util.rs | 13 - live-debugger/src/probe_defs.rs | 134 --------- live-debugger/src/sender.rs | 94 ------ remote-config/Cargo.toml | 1 - remote-config/src/parse.rs | 16 +- sidecar-ffi/Cargo.toml | 1 - sidecar-ffi/cbindgen.toml | 2 +- sidecar-ffi/src/lib.rs | 21 -- sidecar/Cargo.toml | 1 - sidecar/src/service/blocking.rs | 63 +--- sidecar/src/service/session_info.rs | 13 - sidecar/src/service/sidecar_interface.rs | 10 - sidecar/src/service/sidecar_server.rs | 40 +-- sidecar/src/shm_remote_config.rs | 2 + 30 files changed, 142 insertions(+), 2170 deletions(-) delete mode 100644 live-debugger-ffi/Cargo.toml delete mode 100644 live-debugger-ffi/cbindgen.toml delete mode 100644 live-debugger-ffi/src/data.rs delete mode 100644 live-debugger-ffi/src/evaluator.rs delete mode 100644 live-debugger-ffi/src/lib.rs delete mode 100644 live-debugger-ffi/src/query.rs delete mode 100644 live-debugger-ffi/src/sender.rs delete mode 100644 live-debugger/Cargo.toml delete mode 100644 live-debugger/src/debugger_defs.rs delete mode 100644 live-debugger/src/expr_defs.rs delete mode 100644 live-debugger/src/expr_eval.rs delete mode 100644 live-debugger/src/lib.rs delete mode 100644 live-debugger/src/parse_json.rs delete mode 100644 live-debugger/src/parse_json_expr.rs delete mode 100644 live-debugger/src/parse_util.rs delete mode 100644 live-debugger/src/probe_defs.rs delete mode 100644 live-debugger/src/sender.rs diff --git a/Cargo.lock b/Cargo.lock index c676723c9..5ef7d2df1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "gimli 0.28.1", + "gimli", ] [[package]] @@ -401,9 +401,9 @@ checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "aws-lc-rs" -version = "1.8.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +checksum = "474d7cec9d0a1126fad1b224b767fcbf351c23b0309bb21ec210bcfd379926a5" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -413,9 +413,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.19.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +checksum = "7505fc3cb7acbf42699a43a79dd9caa4ed9e99861dfbb837c5c0fb5a0a8d2980" dependencies = [ "bindgen", "cc", @@ -521,7 +521,6 @@ dependencies = [ "hyper 0.14.28", "once_cell", "serde_json", - "strum", "tempfile", ] @@ -543,7 +542,7 @@ dependencies = [ "bitflags 2.5.0", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.12.1", "lazy_static", "lazycell", "log", @@ -592,25 +591,33 @@ checksum = "a1d084b0137aaa901caf9f1e8b21daa6aa24d41cd806e111335541eff9683bd6" [[package]] name = "blazesym" -version = "0.2.0-rc.0" -source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-rc.0#2f393f66a448f46ea71889e81a8866799762463d" +version = "0.2.0-alpha.11" +source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-alpha.11#16bfee4bca2fe73e19f9530d334a9523d9551cbd" dependencies = [ "cpp_demangle", - "gimli 0.30.0", + "gimli", "libc", - "miniz_oxide", "rustc-demangle", ] [[package]] name = "blazesym-c" -version = "0.1.0-alpha.1" -source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-rc.0#2f393f66a448f46ea71889e81a8866799762463d" +version = "0.0.0" +source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-alpha.11#16bfee4bca2fe73e19f9530d334a9523d9551cbd" dependencies = [ "blazesym", "memoffset 0.9.1", ] +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + [[package]] name = "blocking" version = "1.6.0" @@ -768,7 +775,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da6bc11b07529f16944307272d5bd9b22530bc7d05751717c9d416586cedab49" dependencies = [ "clap 3.2.25", - "heck 0.4.1", + "heck", "indexmap 1.9.3", "log", "proc-macro2", @@ -915,7 +922,7 @@ version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", "syn 2.0.58", @@ -1037,6 +1044,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + [[package]] name = "crc32fast" version = "1.4.0" @@ -1057,7 +1073,6 @@ dependencies = [ "ciborium", "clap 4.4.18", "criterion-plot", - "csv", "is-terminal", "itertools 0.10.5", "num-traits", @@ -1124,24 +1139,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] -name = "csv" -version = "1.3.0" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" -dependencies = [ - "memchr", + "generic-array", + "typenum", ] [[package]] @@ -1259,15 +1263,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "datadog-ddsketch" -version = "10.0.0" -dependencies = [ - "prost 0.11.9", - "prost-build", - "protoc-bin-vendored", -] - [[package]] name = "datadog-ipc" version = "0.1.0" @@ -1378,6 +1373,30 @@ dependencies = [ "sysinfo", ] +[[package]] +name = "datadog-remote-config" +version = "0.0.1" +dependencies = [ + "anyhow", + "base64 0.21.7", + "datadog-trace-protobuf", + "ddcommon", + "futures", + "futures-util", + "http 0.2.12", + "hyper 0.14.28", + "lazy_static", + "manual_future", + "serde", + "serde_json", + "sha2", + "time", + "tokio", + "tokio-util 0.7.10", + "tracing", + "uuid", +] + [[package]] name = "datadog-serverless-trace-mini-agent" version = "0.1.0" @@ -1395,6 +1414,7 @@ version = "0.0.1" dependencies = [ "anyhow", "arrayref", + "base64 0.21.7", "bincode", "bytes", "cadence", @@ -1402,6 +1422,7 @@ dependencies = [ "console-subscriber", "datadog-ipc", "datadog-ipc-macros", + "datadog-remote-config", "datadog-sidecar-macros", "datadog-trace-normalization", "datadog-trace-protobuf", @@ -1429,7 +1450,9 @@ dependencies = [ "rmp-serde", "sendfd", "serde", + "serde_json", "serde_with", + "sha2", "simd-json", "spawn_worker", "sys-info", @@ -1450,6 +1473,7 @@ name = "datadog-sidecar-ffi" version = "0.0.1" dependencies = [ "datadog-ipc", + "datadog-remote-config", "datadog-sidecar", "datadog-trace-utils", "ddcommon", @@ -1472,7 +1496,7 @@ dependencies = [ [[package]] name = "datadog-trace-mini-agent" -version = "0.4.2" +version = "0.4.0" dependencies = [ "anyhow", "async-trait", @@ -1497,7 +1521,6 @@ name = "datadog-trace-normalization" version = "10.0.0" dependencies = [ "anyhow", - "criterion", "datadog-trace-protobuf", "duplicate", "rand", @@ -1530,8 +1553,6 @@ dependencies = [ "protoc-bin-vendored", "serde", "serde_bytes", - "serde_json", - "tokio", ] [[package]] @@ -1551,9 +1572,7 @@ dependencies = [ "log", "prost 0.11.9", "rand", - "rmp", "rmp-serde", - "rmpv", "serde", "serde_json", "tokio", @@ -1594,6 +1613,7 @@ dependencies = [ "build_common", "ddcommon", "hyper 0.14.28", + "serde", ] [[package]] @@ -1601,8 +1621,6 @@ name = "ddtelemetry" version = "10.0.0" dependencies = [ "anyhow", - "base64 0.22.1", - "datadog-ddsketch", "ddcommon", "futures", "hashbrown 0.12.3", @@ -1671,6 +1689,16 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + [[package]] name = "dirs-next" version = "2.0.0" @@ -1704,7 +1732,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0a4be4cd710e92098de6ad258e6e7c24af11c29c5142f3c6f2a545652480ff8" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro-error", ] @@ -2037,6 +2065,16 @@ dependencies = [ "slab", ] +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "getrandom" version = "0.2.14" @@ -2055,12 +2093,6 @@ name = "gimli" version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" - -[[package]] -name = "gimli" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e1d97fbe9722ba9bbd0c97051c2956e726562b61f86a25a4360398a40edfc9" dependencies = [ "fallible-iterator", "indexmap 2.2.6", @@ -2171,12 +2203,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - [[package]] name = "hermit-abi" version = "0.1.19" @@ -2322,9 +2348,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", @@ -2360,10 +2386,10 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.0", + "hyper 1.3.1", "hyper-util", "rustls", - "rustls-native-certs 0.7.1", + "rustls-native-certs 0.7.0", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2384,16 +2410,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.4.0", + "hyper 1.3.1", "pin-project-lite", "socket2 0.5.6", "tokio", @@ -2691,12 +2717,12 @@ checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.4", ] [[package]] @@ -2880,7 +2906,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", - "simd-adler32", ] [[package]] @@ -3480,7 +3505,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck 0.4.1", + "heck", "itertools 0.10.5", "lazy_static", "log", @@ -3749,9 +3774,9 @@ dependencies = [ [[package]] name = "rmp" -version = "0.8.14" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" dependencies = [ "byteorder", "num-traits", @@ -3769,16 +3794,6 @@ dependencies = [ "serde", ] -[[package]] -name = "rmpv" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58450723cd9ee93273ce44a20b6ec4efe17f8ed2e3631474387bfdecf18bb2a9" -dependencies = [ - "num-traits", - "rmp", -] - [[package]] name = "rustc-demangle" version = "0.1.23" @@ -3820,13 +3835,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "a218f0f6d05669de4eabfb24f31ce802035c952429d037507b4a4a39f0e60c5b" dependencies = [ "aws-lc-rs", + "log", "once_cell", - "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -3847,9 +3862,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.2", @@ -3885,9 +3900,9 @@ checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" -version = "0.102.5" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a6fccd794a42c2c105b513a2f62bc3fd8f3ba57a4593677ceb0bd035164d78" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "aws-lc-rs", "ring", @@ -4006,9 +4021,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", @@ -4089,6 +4104,17 @@ dependencies = [ "syn 2.0.58", ] +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -4120,12 +4146,6 @@ dependencies = [ "libc", ] -[[package]] -name = "simd-adler32" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" - [[package]] name = "simd-json" version = "0.13.9" @@ -4249,33 +4269,11 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "strum" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" -dependencies = [ - "heck 0.5.0", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.58", -] - [[package]] name = "subtle" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "symbolic-common" @@ -4908,6 +4906,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + [[package]] name = "unicase" version = "2.7.0" diff --git a/Cargo.toml b/Cargo.toml index 8e6c01c68..9f1d27b7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,8 +15,6 @@ members = [ "tools", "ipc", "ipc/macros", - "live-debugger", - "live-debugger-ffi", "remote-config", "sidecar", "sidecar/macros", diff --git a/live-debugger-ffi/Cargo.toml b/live-debugger-ffi/Cargo.toml deleted file mode 100644 index 24ca50886..000000000 --- a/live-debugger-ffi/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -# Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -# This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -[package] -name = "datadog-live-debugger-ffi" -version = "0.0.1" -edition = "2021" - -[lib] -crate-type = ["lib", "staticlib", "cdylib"] - -[dependencies] -datadog-live-debugger = { path = "../live-debugger" } -ddcommon-ffi = { path = "../ddcommon-ffi" } diff --git a/live-debugger-ffi/cbindgen.toml b/live-debugger-ffi/cbindgen.toml deleted file mode 100644 index 2d29f5793..000000000 --- a/live-debugger-ffi/cbindgen.toml +++ /dev/null @@ -1,35 +0,0 @@ -# Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -# This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -language = "C" -tab_width = 2 -header = """// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -typedef struct ddog_DebuggerCapture ddog_DebuggerCapture; -typedef struct ddog_DebuggerValue ddog_DebuggerValue; -""" -include_guard = "DDOG_LIVE_DEBUGGER_H" -style = "both" - -no_includes = true -sys_includes = ["stdbool.h", "stddef.h", "stdint.h", "stdio.h"] -includes = ["common.h"] - -[export] -prefix = "ddog_" -renaming_overrides_prefixing = true - -[export.mangle] -rename_types = "PascalCase" - -[enum] -prefix_with_name = true -rename_variants = "ScreamingSnakeCase" - -[fn] -must_use = "DDOG_CHECK_RETURN" - -[parse] -parse_deps = true -include = ["datadog-live-debugger", "ddcommon-ffi"] diff --git a/live-debugger-ffi/src/data.rs b/live-debugger-ffi/src/data.rs deleted file mode 100644 index d3a031db1..000000000 --- a/live-debugger-ffi/src/data.rs +++ /dev/null @@ -1,280 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -use datadog_live_debugger::{ - Capture, DslString, EvaluateAt, InBodyLocation, MetricKind, ProbeCondition, ProbeValue, - SpanProbeTarget, -}; -use ddcommon_ffi::{CharSlice, Option}; - -#[repr(C)] -pub struct CharSliceVec<'a> { - pub strings: *const CharSlice<'a>, - pub string_count: usize, -} - -impl<'a> Drop for CharSliceVec<'a> { - fn drop(&mut self) { - unsafe { - Vec::from_raw_parts( - self.strings as *mut CharSlice, - self.string_count, - self.string_count, - ) - }; - } -} - -impl<'a> From<&'a Vec> for CharSliceVec<'a> { - fn from(from: &'a Vec) -> Self { - let char_slices: Vec> = from.iter().map(|s| s.as_str().into()).collect(); - let new = CharSliceVec { - strings: char_slices.as_ptr(), - string_count: char_slices.len(), - }; - std::mem::forget(char_slices); - new - } -} - -#[repr(C)] -pub struct MetricProbe<'a> { - pub kind: MetricKind, - pub name: CharSlice<'a>, - pub value: &'a ProbeValue, -} - -impl<'a> From<&'a datadog_live_debugger::MetricProbe> for MetricProbe<'a> { - fn from(from: &'a datadog_live_debugger::MetricProbe) -> Self { - MetricProbe { - kind: from.kind, - name: from.name.as_str().into(), - value: &from.value, - } - } -} - -#[repr(C)] -pub struct LogProbe<'a> { - pub segments: &'a DslString, - pub when: &'a ProbeCondition, - pub capture: &'a Capture, - pub sampling_snapshots_per_second: u32, -} - -impl<'a> From<&'a datadog_live_debugger::LogProbe> for LogProbe<'a> { - fn from(from: &'a datadog_live_debugger::LogProbe) -> Self { - LogProbe { - segments: &from.segments, - when: &from.when, - capture: &from.capture, - sampling_snapshots_per_second: from.sampling_snapshots_per_second, - } - } -} - -#[repr(C)] -pub struct Tag<'a> { - pub name: CharSlice<'a>, - pub value: &'a DslString, -} - -#[repr(C)] -pub struct SpanProbeDecoration<'a> { - pub condition: &'a ProbeCondition, - pub tags: *const Tag<'a>, - pub tags_count: usize, -} - -impl<'a> From<&'a datadog_live_debugger::SpanProbeDecoration> for SpanProbeDecoration<'a> { - fn from(from: &'a datadog_live_debugger::SpanProbeDecoration) -> Self { - let tags: Vec<_> = from - .tags - .iter() - .map(|(name, value)| Tag { - name: name.as_str().into(), - value, - }) - .collect(); - - let new = SpanProbeDecoration { - condition: &from.condition, - tags: tags.as_ptr(), - tags_count: tags.len(), - }; - std::mem::forget(tags); - new - } -} - -impl<'a> Drop for SpanProbeDecoration<'a> { - fn drop(&mut self) { - unsafe { - Vec::from_raw_parts( - self.tags as *mut CharSlice, - self.tags_count, - self.tags_count, - ) - }; - } -} - -#[repr(C)] -pub struct SpanDecorationProbe<'a> { - pub target: SpanProbeTarget, - pub decorations: *const SpanProbeDecoration<'a>, - pub decorations_count: usize, -} - -impl<'a> From<&'a datadog_live_debugger::SpanDecorationProbe> for SpanDecorationProbe<'a> { - fn from(from: &'a datadog_live_debugger::SpanDecorationProbe) -> Self { - let tags: Vec<_> = from.decorations.iter().map(Into::into).collect(); - let new = SpanDecorationProbe { - target: from.target, - decorations: tags.as_ptr(), - decorations_count: tags.len(), - }; - std::mem::forget(tags); - new - } -} - -impl<'a> Drop for SpanDecorationProbe<'a> { - fn drop(&mut self) { - unsafe { - Vec::from_raw_parts( - self.decorations as *mut SpanProbeDecoration, - self.decorations_count, - self.decorations_count, - ) - }; - } -} - -#[repr(C)] -pub enum ProbeType<'a> { - Metric(MetricProbe<'a>), - Log(LogProbe<'a>), - Span, - SpanDecoration(SpanDecorationProbe<'a>), -} - -impl<'a> From<&'a datadog_live_debugger::ProbeType> for ProbeType<'a> { - fn from(from: &'a datadog_live_debugger::ProbeType) -> Self { - match from { - datadog_live_debugger::ProbeType::Metric(metric) => ProbeType::Metric(metric.into()), - datadog_live_debugger::ProbeType::Log(log) => ProbeType::Log(log.into()), - datadog_live_debugger::ProbeType::Span(_) => ProbeType::Span, - datadog_live_debugger::ProbeType::SpanDecoration(span_decoration) => { - ProbeType::SpanDecoration(span_decoration.into()) - } - } - } -} - -#[repr(C)] -pub struct ProbeTarget<'a> { - pub type_name: Option>, - pub method_name: Option>, - pub source_file: Option>, - pub signature: Option>, - pub lines: CharSliceVec<'a>, - pub in_body_location: InBodyLocation, -} - -impl<'a> From<&'a datadog_live_debugger::ProbeTarget> for ProbeTarget<'a> { - fn from(from: &'a datadog_live_debugger::ProbeTarget) -> Self { - ProbeTarget { - type_name: from.type_name.as_ref().map(|s| s.as_str().into()).into(), - method_name: from.method_name.as_ref().map(|s| s.as_str().into()).into(), - source_file: from.source_file.as_ref().map(|s| s.as_str().into()).into(), - signature: from.signature.as_ref().map(|s| s.as_str().into()).into(), - lines: (&from.lines).into(), - in_body_location: from.in_body_location, - } - } -} - -#[repr(C)] -pub struct Probe<'a> { - pub id: CharSlice<'a>, - pub version: u64, - pub language: Option>, - pub tags: CharSliceVec<'a>, - pub target: ProbeTarget<'a>, // "where" is rust keyword - pub evaluate_at: EvaluateAt, - pub probe: ProbeType<'a>, -} - -impl<'a> From<&'a datadog_live_debugger::Probe> for Probe<'a> { - fn from(from: &'a datadog_live_debugger::Probe) -> Self { - Probe { - id: from.id.as_str().into(), - version: from.version, - language: from.language.as_ref().map(|s| s.as_str().into()).into(), - tags: (&from.tags).into(), - target: (&from.target).into(), - evaluate_at: from.evaluate_at, - probe: (&from.probe).into(), - } - } -} - -#[repr(C)] -pub struct FilterList<'a> { - pub package_prefixes: CharSliceVec<'a>, - pub classes: CharSliceVec<'a>, -} - -impl<'a> From<&'a datadog_live_debugger::FilterList> for FilterList<'a> { - fn from(from: &'a datadog_live_debugger::FilterList) -> Self { - FilterList { - package_prefixes: (&from.package_prefixes).into(), - classes: (&from.classes).into(), - } - } -} - -#[repr(C)] -pub struct ServiceConfiguration<'a> { - pub id: CharSlice<'a>, - pub allow: FilterList<'a>, - pub deny: FilterList<'a>, - pub sampling_snapshots_per_second: u32, -} - -impl<'a> From<&'a datadog_live_debugger::ServiceConfiguration> for ServiceConfiguration<'a> { - fn from(from: &'a datadog_live_debugger::ServiceConfiguration) -> Self { - ServiceConfiguration { - id: from.id.as_str().into(), - allow: (&from.allow).into(), - deny: (&from.deny).into(), - sampling_snapshots_per_second: from.sampling_snapshots_per_second, - } - } -} - -#[repr(C)] -pub enum LiveDebuggingData<'a> { - None, - Probe(Probe<'a>), - ServiceConfiguration(ServiceConfiguration<'a>), -} - -impl<'a> From<&'a datadog_live_debugger::LiveDebuggingData> for LiveDebuggingData<'a> { - fn from(from: &'a datadog_live_debugger::LiveDebuggingData) -> Self { - match from { - datadog_live_debugger::LiveDebuggingData::Probe(probe) => { - LiveDebuggingData::Probe(probe.into()) - } - datadog_live_debugger::LiveDebuggingData::ServiceConfiguration(config) => { - LiveDebuggingData::ServiceConfiguration(config.into()) - } - } - } -} - -#[no_mangle] -pub extern "C" fn ddog_capture_defaults() -> Capture { - Capture::default() -} diff --git a/live-debugger-ffi/src/evaluator.rs b/live-debugger-ffi/src/evaluator.rs deleted file mode 100644 index bd48b66ff..000000000 --- a/live-debugger-ffi/src/evaluator.rs +++ /dev/null @@ -1,176 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -use std::borrow::Cow; -use datadog_live_debugger::{DslString, ProbeCondition}; -use ddcommon_ffi::CharSlice; -use std::ffi::c_void; -use std::mem::transmute; - -#[repr(C)] -pub enum IntermediateValue<'a> { - String(CharSlice<'a>), - Number(f64), - Bool(bool), - Null, - Referenced(&'a c_void), -} - -impl<'a> From<&'a datadog_live_debugger::IntermediateValue<'a, c_void>> for IntermediateValue<'a> { - fn from(value: &'a datadog_live_debugger::IntermediateValue<'a, c_void>) -> Self { - match value { - datadog_live_debugger::IntermediateValue::String(s) => { - IntermediateValue::String(s.as_ref().into()) - } - datadog_live_debugger::IntermediateValue::Number(n) => IntermediateValue::Number(*n), - datadog_live_debugger::IntermediateValue::Bool(b) => IntermediateValue::Bool(*b), - datadog_live_debugger::IntermediateValue::Null => IntermediateValue::Null, - datadog_live_debugger::IntermediateValue::Referenced(value) => { - IntermediateValue::Referenced(value) - } - } - } -} - -#[repr(C)] -pub struct VoidCollection { - pub count: isize, // set to < 0 on error - pub elements: *const c_void, - pub free: extern "C" fn(VoidCollection), -} - -#[repr(C)] -#[derive(Clone)] -pub struct Evaluator { - pub equals: - for<'a> extern "C" fn(&'a mut c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, - pub greater_than: - for<'a> extern "C" fn(&'a mut c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, - pub greater_or_equals: - for<'a> extern "C" fn(&'a mut c_void, IntermediateValue<'a>, IntermediateValue<'a>) -> bool, - pub fetch_identifier: - for<'a, 'b> extern "C" fn(&'a mut c_void, &CharSlice<'b>) -> Option<&'a c_void>, // special values: @duration, @return, @exception - pub fetch_index: for<'a, 'b> extern "C" fn( - &'a mut c_void, - &'a c_void, - IntermediateValue<'b>, - ) -> Option<&'a c_void>, - pub fetch_nested: for<'a, 'b> extern "C" fn( - &'a mut c_void, - &'a c_void, - IntermediateValue<'b>, - ) -> Option<&'a c_void>, - pub length: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> u64, - pub try_enumerate: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> VoidCollection, - pub stringify: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> VoidCollection, - pub convert_index: for<'a> extern "C" fn(&'a mut c_void, &'a c_void) -> isize, // return < 0 on error -} - -static mut FFI_EVALUATOR: Option = None; -#[allow(mutable_transmutes)] // SAFETY: It's the &mut c_void context we receive from input functions -static EVALUATOR: datadog_live_debugger::Evaluator = - datadog_live_debugger::Evaluator { - equals: |context, a, b| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().equals)(transmute(context), (&a).into(), (&b).into()) - }, - greater_than: |context, a, b| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().greater_than)(transmute(context), (&a).into(), (&b).into()) - }, - greater_or_equals: |context, a, b| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().greater_or_equals)(transmute(context), (&a).into(), (&b).into()) - }, - fetch_identifier: |context, name| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().fetch_identifier)(transmute(context), &CharSlice::from(name)) - }, - fetch_index: |context, base, index| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().fetch_index)(transmute(context), base, (&index).into()) - }, - fetch_nested: |context, base, member| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().fetch_nested)(transmute(context), base, (&member).into()) - }, - length: |context, value| unsafe { - (FFI_EVALUATOR.as_ref().unwrap().length)(transmute(context), value) - }, - try_enumerate: |context, value| unsafe { - let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(transmute(context), value); - if collection.count < 0 { - None - } else { - // We need to copy, Vec::from_raw_parts with only free in the allocator would be unstable... - let mut vec = Vec::with_capacity(collection.count as usize); - vec.extend_from_slice(std::slice::from_raw_parts( - collection.elements as *const &c_void, - collection.count as usize, - )); - (collection.free)(collection); - Some(vec) - } - }, - stringify: |context, value| unsafe { - let collection = (FFI_EVALUATOR.as_ref().unwrap().try_enumerate)(transmute(context), value); - if collection.count < 0 { - unreachable!() - } - - // We need to copy... - let string = String::from_raw_parts( - collection.elements as *mut u8, - collection.count as usize, - collection.count as usize, - ); - let copy = string.clone(); - std::mem::forget(string); - (collection.free)(collection); - Cow::Owned(copy) - }, - convert_index: |context, value| unsafe { - let index = (FFI_EVALUATOR.as_ref().unwrap().convert_index)(transmute(context), value); - if index < 0 { - None - } else { - Some(index as usize) - } - }, - }; - -#[no_mangle] -#[allow(clippy::missing_safety_doc)] -pub unsafe extern "C" fn register_expr_evaluator(eval: &Evaluator) { - FFI_EVALUATOR = Some(eval.clone()); -} - -#[no_mangle] -pub extern "C" fn evaluate_condition(condition: &ProbeCondition, context: &mut c_void) -> bool { - datadog_live_debugger::eval_condition(&EVALUATOR, condition, context) -} - -pub fn evaluate_string(condition: &DslString, context: &mut c_void) -> String { - datadog_live_debugger::eval_string(&EVALUATOR, condition, context) -} - -// This is unsafe, but we want to use it as function pointer... -#[no_mangle] -extern "C" fn drop_void_collection_string(void: VoidCollection) { - unsafe { - String::from_raw_parts( - void.elements as *mut u8, - void.count as usize, - void.count as usize, - ); - } -} - -#[no_mangle] -pub extern "C" fn evaluate_unmanaged_string( - condition: &DslString, - context: &mut c_void, -) -> VoidCollection { - let string = evaluate_string(condition, context); - let new = VoidCollection { - count: string.len() as isize, - elements: string.as_ptr() as *const c_void, - free: drop_void_collection_string as extern "C" fn(VoidCollection), - }; - std::mem::forget(string); - new -} diff --git a/live-debugger-ffi/src/lib.rs b/live-debugger-ffi/src/lib.rs deleted file mode 100644 index c91a95339..000000000 --- a/live-debugger-ffi/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -pub mod data; -pub mod evaluator; -pub mod query; -pub mod sender; diff --git a/live-debugger-ffi/src/query.rs b/live-debugger-ffi/src/query.rs deleted file mode 100644 index 05fe858ff..000000000 --- a/live-debugger-ffi/src/query.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::data::LiveDebuggingData; -use ddcommon_ffi::slice::AsBytes; -use ddcommon_ffi::CharSlice; - -#[repr(C)] -pub struct LiveDebuggingParseResult { - pub data: LiveDebuggingData<'static>, - opaque_data: Option>, -} - -#[no_mangle] -pub extern "C" fn parse_json(json: CharSlice) -> LiveDebuggingParseResult { - if let Ok(parsed) = - datadog_live_debugger::parse_json(unsafe { std::str::from_utf8_unchecked(json.as_bytes()) }) - { - let parsed = Box::new(parsed); - LiveDebuggingParseResult { - // we have the box. Rust doesn't allow us to specify a self-referential struct, so pretend it's 'static - data: unsafe { - std::mem::transmute::<&_, &'static datadog_live_debugger::LiveDebuggingData>( - &*parsed, - ) - } - .into(), - opaque_data: Some(parsed), - } - } else { - LiveDebuggingParseResult { - data: LiveDebuggingData::None, - opaque_data: None, - } - } -} - -#[no_mangle] -pub extern "C" fn drop_probe(_: LiveDebuggingData) {} - -#[no_mangle] -pub extern "C" fn drop_parse_result(_: LiveDebuggingParseResult) {} diff --git a/live-debugger-ffi/src/sender.rs b/live-debugger-ffi/src/sender.rs deleted file mode 100644 index 13b741339..000000000 --- a/live-debugger-ffi/src/sender.rs +++ /dev/null @@ -1,117 +0,0 @@ -use ddcommon_ffi::CharSlice; -// Alias to prevent cbindgen panic -use datadog_live_debugger::debugger_defs::{Value as DebuggerValueAlias, Capture as DebuggerCaptureAlias, Captures, DebuggerData, Entry, Fields, DebuggerPayload, Snapshot}; -use datadog_live_debugger::sender::generate_new_id; - -#[repr(C)] -pub enum FieldType { - STATIC, - ARG, - LOCAL, -} - -#[repr(C)] -pub struct CaptureValue<'a> { - pub r#type: CharSlice<'a>, - pub value: CharSlice<'a>, - pub fields: Option>>>, - pub elements: Vec>, - pub entries: Vec>>, - pub is_null: bool, - pub truncated: bool, - pub not_captured_reason: CharSlice<'a>, - pub size: CharSlice<'a>, -} - -impl<'a> From> for DebuggerValueAlias> { - fn from(val: CaptureValue<'a>) -> Self { - DebuggerValueAlias { - r#type: val.r#type, - value: if val.value.len() == 0 { None } else { Some(val.value) }, - fields: if let Some(boxed) = val.fields { *boxed } else { Fields::default() }, - elements: unsafe { std::mem::transmute(val.elements) }, // SAFETY: is transparent - entries: val.entries, - is_null: val.is_null, - truncated: val.truncated, - not_captured_reason: if val.not_captured_reason.len() == 0 { None } else { Some(val.not_captured_reason) }, - size: if val.size.len() == 0 { None } else { Some(val.size) }, - } - } -} - -/// cbindgen:no-export -#[repr(transparent)] -pub struct DebuggerValue<'a>(DebuggerValueAlias>); -/// cbindgen:no-export -#[repr(transparent)] -pub struct DebuggerCapture<'a>(DebuggerCaptureAlias>); - -#[repr(C)] -pub struct ExceptionSnapshot<'a> { - pub data: *mut DebuggerPayload>, - pub capture: *mut DebuggerCapture<'a>, -} - -#[no_mangle] -pub extern "C" fn ddog_create_exception_snapshot<'a>(buffer: &mut Vec>>, service: CharSlice<'a>, language: CharSlice<'a>, id: CharSlice<'a>, exception_id: CharSlice<'a>, timestamp: u64) -> *mut DebuggerCapture<'a> { - let snapshot = DebuggerPayload { - service, - source: "dd_debugger", - timestamp, - debugger: DebuggerData { - snapshot: Snapshot { - captures: Captures { - r#return: Some(DebuggerCaptureAlias::default()), - ..Default::default() - }, - language, - id, - exception_id, - timestamp, - } - } - }; - buffer.push(snapshot); - unsafe { std::mem::transmute(buffer.last_mut().unwrap().debugger.snapshot.captures.r#return.as_mut().unwrap()) } -} - -#[no_mangle] -#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here -pub extern "C" fn ddog_snapshot_add_field<'a, 'b: 'a, 'c: 'a>(capture: &mut DebuggerCapture<'a>, r#type: FieldType, name: CharSlice<'b>, value: CaptureValue<'c>) { - let fields = match r#type { - FieldType::STATIC => &mut capture.0.static_fields, - FieldType::ARG => &mut capture.0.arguments, - FieldType::LOCAL => &mut capture.0.locals, - }; - fields.insert(name, value.into()); -} - -#[no_mangle] -#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here -pub extern "C" fn ddog_capture_value_add_element<'a, 'b: 'a>(value: &mut CaptureValue<'a>, element: CaptureValue<'b>) { - value.elements.push(DebuggerValue(element.into())); -} - -#[no_mangle] -#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here -pub extern "C" fn ddog_capture_value_add_entry<'a, 'b: 'a, 'c: 'a>(value: &mut CaptureValue<'a>, key: CaptureValue<'b>, element: CaptureValue<'c>) { - value.entries.push(Entry(key.into(), element.into())); -} - -#[no_mangle] -#[allow(improper_ctypes_definitions)] // Vec has a fixed size, and we care only about that here -pub extern "C" fn ddog_capture_value_add_field<'a, 'b: 'a, 'c: 'a>(value: &mut CaptureValue<'a>, key: CharSlice<'b>, element: CaptureValue<'c>) { - let fields = match value.fields { - None => { - value.fields = Some(Box::default()); - value.fields.as_mut().unwrap() - }, - Some(ref mut f) => f, - }; - fields.insert(key, element.into()); -} - -#[no_mangle] -pub extern "C" fn ddog_snapshot_format_new_uuid(buf: &mut [u8; 36]) { - generate_new_id().as_hyphenated().encode_lower(buf); -} diff --git a/live-debugger/Cargo.toml b/live-debugger/Cargo.toml deleted file mode 100644 index 011eb2cc3..000000000 --- a/live-debugger/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -edition = "2021" -license = "Apache 2.0" -name = "datadog-live-debugger" -version = "0.0.1" - -[dependencies] -anyhow = "1.0" -ddcommon = { path = "../ddcommon" } -hyper = { version = "0.14", features = ["client"] } -regex = "1.9.3" -json = "0.12.4" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -uuid = { version = "1.0", features = ["v4"] } diff --git a/live-debugger/src/debugger_defs.rs b/live-debugger/src/debugger_defs.rs deleted file mode 100644 index 82cb39c93..000000000 --- a/live-debugger/src/debugger_defs.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::collections::HashMap; -use std::hash::Hash; -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -pub struct DebuggerPayload { - pub service: S, - pub source: &'static str, - pub timestamp: u64, - pub debugger: DebuggerData, -} - -#[derive(Serialize, Deserialize)] -pub struct DebuggerData { - pub snapshot: Snapshot, -} - -#[derive(Serialize, Deserialize)] -pub struct Snapshot { - pub captures: Captures, - pub language: S, - pub id: S, - #[serde(rename = "exception-id")] - pub exception_id: S, - pub timestamp: u64, -} - -#[derive(Default, Serialize, Deserialize)] -pub struct Captures { - #[serde(skip_serializing_if = "HashMap::is_empty")] - pub lines: HashMap>, - #[serde(skip_serializing_if = "Option::is_none")] - pub entry: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub r#return: Option>, -} - -pub type Fields = HashMap>; -#[derive(Default, Serialize, Deserialize)] -pub struct Capture { - #[serde(skip_serializing_if = "HashMap::is_empty")] - #[serde(rename = "staticFields")] - pub static_fields: Fields, - #[serde(skip_serializing_if = "HashMap::is_empty")] - pub arguments: Fields, - #[serde(skip_serializing_if = "HashMap::is_empty")] - pub locals: Fields, - #[serde(skip_serializing_if = "Option::is_none")] - pub throwable: Option>, -} - -#[derive(Serialize, Deserialize)] -pub struct Entry(pub Value, pub Value); - -#[derive(Default, Serialize, Deserialize)] -pub struct Value { - pub r#type: S, - #[serde(skip_serializing_if = "Option::is_none")] - pub value: Option, - #[serde(skip_serializing_if = "HashMap::is_empty")] - pub fields: Fields, - #[serde(skip_serializing_if = "Vec::is_empty")] - pub elements: Vec>, - #[serde(skip_serializing_if = "Vec::is_empty")] - pub entries: Vec>, - #[serde(skip_serializing_if = "<&bool as std::ops::Not>::not")] - #[serde(rename = "isNull")] - pub is_null: bool, - #[serde(skip_serializing_if = "<&bool as std::ops::Not>::not")] - pub truncated: bool, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "notCapturedReason")] - pub not_captured_reason: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub size: Option, -} - - diff --git a/live-debugger/src/expr_defs.rs b/live-debugger/src/expr_defs.rs deleted file mode 100644 index 15e16e3d2..000000000 --- a/live-debugger/src/expr_defs.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -#[derive(Debug)] -pub enum CollectionSource { - Reference(Reference), - FilterOperator(Box<(CollectionSource, Condition)>), -} - -#[derive(Debug)] -pub enum Reference { - Base(String), - Index(Box<(CollectionSource, Value)>), // i.e. foo[bar] - Nested(Box<(Reference, Value)>), // i.e. foo.bar -} - -#[derive(Debug)] -pub enum BinaryComparison { - Equals, - NotEquals, - GreaterThan, - GreaterOrEquals, - LessThan, - LessOrEquals, -} - -#[derive(Debug)] -pub enum StringComparison { - StartsWith, - EndsWith, - Contains, - Matches, -} - -#[derive(Debug)] -pub enum CollectionMatch { - All, - Any, -} - -#[derive(Debug)] -pub enum Condition { - Always, - Never, - Disjunction(Box<(Condition, Condition)>), - Conjunction(Box<(Condition, Condition)>), - Negation(Box), - StringComparison(StringComparison, StringSource, String), - BinaryComparison(Value, BinaryComparison, Value), - CollectionMatch(CollectionMatch, Reference, Box), - IsUndefinedReference(Reference), - IsEmptyReference(Reference), -} - -#[derive(Debug)] -pub enum NumberSource { - Number(f64), - CollectionSize(CollectionSource), - StringLength(Reference), - Reference(Reference), -} - -#[derive(Debug)] -pub enum StringSource { - String(String), - Substring(Box<(StringSource, NumberSource, NumberSource)>), - Null, - Reference(Reference), -} - -#[derive(Debug)] -pub enum Value { - Bool(Box), - String(StringSource), - Number(NumberSource), -} - -#[derive(Debug)] -pub enum DslPart { - Ref(CollectionSource), - String(String), -} diff --git a/live-debugger/src/expr_eval.rs b/live-debugger/src/expr_eval.rs deleted file mode 100644 index 8a92160e0..000000000 --- a/live-debugger/src/expr_eval.rs +++ /dev/null @@ -1,359 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -use std::borrow::Cow; -use crate::expr_defs::{ - BinaryComparison, CollectionMatch, CollectionSource, Condition, DslPart, NumberSource, - Reference, StringComparison, StringSource, Value, -}; -use regex::Regex; -use std::cmp::min; -use std::str::FromStr; -use std::usize; - -#[derive(Debug)] -pub struct DslString(pub(crate) Vec); -#[derive(Debug)] -pub struct ProbeValue(pub(crate) Value); -#[derive(Debug)] -pub struct ProbeCondition(pub(crate) Condition); - -pub enum IntermediateValue<'a, I> { - String(Cow<'a, str>), - Number(f64), - Bool(bool), - Null, - Referenced(&'a I), -} - -pub struct Evaluator { - pub equals: for<'a> fn(&'a C, IntermediateValue<'a, I>, IntermediateValue<'a, I>) -> bool, - pub greater_than: for<'a> fn(&'a C, IntermediateValue<'a, I>, IntermediateValue<'a, I>) -> bool, - pub greater_or_equals: - for<'a> fn(&'a C, IntermediateValue<'a, I>, IntermediateValue<'a, I>) -> bool, - pub fetch_identifier: for<'a> fn(&'a C, &str) -> Option<&'a I>, // special values: @duration, @return, @exception - pub fetch_index: for<'a> fn(&'a C, &'a I, IntermediateValue<'a, I>) -> Option<&'a I>, - pub fetch_nested: for<'a> fn(&'a C, &'a I, IntermediateValue<'a, I>) -> Option<&'a I>, - pub length: for<'a> fn(&'a C, &'a I) -> u64, - pub try_enumerate: for<'a> fn(&'a C, &'a I) -> Option>, - pub stringify: for<'a> fn(&'a C, &'a I) -> Cow<'a, str>, - pub convert_index: for<'a> fn(&'a C, &'a I) -> Option, -} - -type EvalResult = Result; - -struct Eval<'a, I, C> { - eval: &'a Evaluator, - context: &'a C, - it: Option<&'a I>, -} - -impl<'a, I, C> Eval<'a, I, C> { - fn value(&mut self, value: &'a Value) -> EvalResult> { - Ok(match value { - Value::Bool(condition) => IntermediateValue::Bool(self.condition(condition)?), - Value::String(s) => self.string_source(s)?, - Value::Number(n) => self.number_source(n)?, - }) - } - - fn number_source(&mut self, value: &'a NumberSource) -> EvalResult> { - Ok(match value { - NumberSource::Number(n) => IntermediateValue::Number(*n), - NumberSource::CollectionSize(collection) => { - IntermediateValue::Number(match collection { - CollectionSource::Reference(reference) => { - (self.eval.length)(self.context, self.reference(reference)?.ok_or(())?) - as f64 - } - CollectionSource::FilterOperator(_) => { - self.collection_source(collection)?.ok_or(())?.len() as f64 - } - }) - } - NumberSource::StringLength(reference) => IntermediateValue::Number((self.eval.length)( - self.context, - self.reference(reference)?.ok_or(())?, - ) - as f64), - NumberSource::Reference(reference) => { - IntermediateValue::Referenced(self.reference(reference)?.ok_or(())?) - } - }) - } - - fn convert_index(&mut self, value: IntermediateValue<'a, I>) -> EvalResult { - Ok(match value { - IntermediateValue::String(s) => return usize::from_str(&s).map_err(|_| ()), - IntermediateValue::Number(n) => n as usize, - IntermediateValue::Bool(_) => return Err(()), - IntermediateValue::Null => 0, - IntermediateValue::Referenced(referenced) => { - (self.eval.convert_index)(self.context, referenced).ok_or(())? - } - }) - } - - fn number_to_index(&mut self, value: &'a NumberSource) -> EvalResult { - let value = self.number_source(value)?; - self.convert_index(value) - } - - fn string_source(&mut self, value: &'a StringSource) -> EvalResult> { - Ok(match value { - StringSource::String(s) => IntermediateValue::String(Cow::Borrowed(s.as_str())), - StringSource::Substring(boxed) => { - let (string, start, end) = &**boxed; - let str = self.stringify(string)?; - let start = self.number_to_index(start)?; - let mut end = self.number_to_index(end)?; - if start > end || start >= str.len() { - return Err(()); - } - end = min(end, str.len()); - IntermediateValue::String(match str { - Cow::Owned(s) => Cow::Owned(s[start..end].to_string()), - Cow::Borrowed(s) => Cow::Borrowed(&s[start..end]) - }) - } - StringSource::Null => IntermediateValue::Null, - StringSource::Reference(reference) => { - IntermediateValue::Referenced(self.reference(reference)?.ok_or(())?) - } - }) - } - - fn reference_collection(&mut self, reference: &'a Reference) -> EvalResult>> { - Ok(self - .reference(reference)? - .and_then(|reference| (self.eval.try_enumerate)(self.context, reference))) - } - - fn reference(&mut self, reference: &'a Reference) -> EvalResult> { - Ok(match reference { - Reference::Base(ref identifier) => { - if identifier == "@it" { - self.it - } else { - (self.eval.fetch_identifier)(self.context, identifier.as_str()) - } - } - Reference::Index(ref boxed) => { - let (source, dimension) = &**boxed; - let dimension = self.value(dimension)?; - match source { - CollectionSource::FilterOperator(_) => { - let index = self.convert_index(dimension)?; - self.collection_source(source)?.and_then(|vec| { - if index < vec.len() { - Some(vec[index]) - } else { - None - } - }) - } - CollectionSource::Reference(ref reference) => self - .reference(reference)? - .and_then(|base| (self.eval.fetch_index)(self.context, base, dimension)), - } - } - Reference::Nested(ref boxed) => { - let (source, member) = &**boxed; - let member = self.value(member)?; - self.reference(source)? - .and_then(|base| (self.eval.fetch_nested)(self.context, base, member)) - } - }) - } - - fn collection_source( - &mut self, - collection: &'a CollectionSource, - ) -> EvalResult>> { - Ok(match collection { - CollectionSource::Reference(ref reference) => self.reference_collection(reference)?, - CollectionSource::FilterOperator(ref boxed) => { - let (source, condition) = &**boxed; - let mut values = vec![]; - let it = self.it; - if let Some(source_values) = self.collection_source(source)? { - for item in source_values { - self.it = Some(item); - if self.condition(condition)? { - values.push(item); - } - } - self.it = it; - Some(values) - } else { - None - } - } - }) - } - - fn stringify_intermediate(&mut self, value: IntermediateValue<'a, I>) -> Cow<'a, str> { - match value { - IntermediateValue::String(s) => s, - IntermediateValue::Number(n) => Cow::Owned(n.to_string()), - IntermediateValue::Bool(b) => Cow::Owned(b.to_string()), - IntermediateValue::Null => Cow::Borrowed(""), - IntermediateValue::Referenced(referenced) => { - (self.eval.stringify)(self.context, referenced) - } - } - } - - fn stringify(&mut self, value: &'a StringSource) -> EvalResult> { - let value = self.string_source(value)?; - Ok(self.stringify_intermediate(value)) - } - - fn condition(&mut self, condition: &'a Condition) -> EvalResult { - Ok(match condition { - Condition::Always => true, - Condition::Never => false, - Condition::StringComparison(comparer, haystack, needle) => { - let haystack = self.stringify(haystack)?; - match comparer { - StringComparison::StartsWith => haystack.starts_with(needle), - StringComparison::EndsWith => haystack.ends_with(needle), - StringComparison::Contains => haystack.contains(needle), - StringComparison::Matches => { - return Regex::new(needle.as_str()) - .map_err(|_| ()) - .map(|r| r.is_match(&haystack)) - } - } - } - Condition::BinaryComparison(a, comparer, b) => { - let (a, b) = (self.value(a)?, self.value(b)?); - match comparer { - BinaryComparison::Equals => (self.eval.equals)(self.context, a, b), - BinaryComparison::NotEquals => !(self.eval.equals)(self.context, a, b), - BinaryComparison::GreaterThan => (self.eval.greater_than)(self.context, a, b), - BinaryComparison::GreaterOrEquals => { - (self.eval.greater_or_equals)(self.context, a, b) - } - BinaryComparison::LessThan => { - !(self.eval.greater_or_equals)(self.context, a, b) - } - BinaryComparison::LessOrEquals => !(self.eval.greater_than)(self.context, a, b), - } - } - Condition::CollectionMatch(match_type, reference, condition) => { - let vec = self.reference_collection(reference)?.ok_or(())?; - let it = self.it; - let mut result; - match match_type { - CollectionMatch::All => { - result = true; - for v in vec { - self.it = Some(v); - if !self.condition(condition)? { - result = false; - break; - } - } - } - CollectionMatch::Any => { - result = false; - for v in vec { - self.it = Some(v); - if self.condition(condition)? { - result = true; - break; - } - } - } - } - self.it = it; - result - } - Condition::IsUndefinedReference(reference) => self.reference(reference).ok().is_none(), - Condition::IsEmptyReference(reference) => { - if let Some(value) = self.reference(reference)? { - (self.eval.length)(self.context, value) == 0 - } else { - return Err(()); - } - } - Condition::Disjunction(boxed) => { - let (a, b) = &**boxed; - self.condition(a)? || self.condition(b)? - } - Condition::Conjunction(boxed) => { - let (a, b) = &**boxed; - self.condition(a)? && self.condition(b)? - } - Condition::Negation(boxed) => !self.condition(boxed)?, - }) - } -} - -pub fn eval_condition<'a, 'e, 'v, I, C>( - eval: &'e Evaluator, - condition: &'v ProbeCondition, - context: &'a C, -) -> bool -where - 'e: 'a, - 'v: 'a, -{ - Eval { - eval, - context, - it: None, - } - .condition(&condition.0) - .unwrap_or(false) -} - -pub fn eval_string<'a, 'e, 'v, I, C>( - eval: &'e Evaluator, - dsl: &'v DslString, - context: &'a C, -) -> String -where - 'e: 'a, - 'v: 'a, -{ - dsl.0 - .iter() - .map(|p| match p { - DslPart::String(ref str) => Cow::Borrowed(str.as_str()), - DslPart::Ref(ref reference) => { - let mut eval = Eval { - eval, - context, - it: None, - }; - match reference { - CollectionSource::Reference(reference) => eval - .reference(reference) - .unwrap_or_default() - .map(|referenced| { - eval.stringify_intermediate(IntermediateValue::Referenced(referenced)) - }), - CollectionSource::FilterOperator(_) => eval - .collection_source(reference) - .ok() - .unwrap_or_default() - .map(|vec| { - Cow::Owned(format!( - "[{}]", - vec.iter() - .map(|referenced| eval.stringify_intermediate( - IntermediateValue::Referenced(referenced) - )) - .collect::>>() - .join(", ") - )) - }), - } - .unwrap_or(Cow::Borrowed("UNDEFINED")) - } - }) - .collect::>>() - .join("") -} diff --git a/live-debugger/src/lib.rs b/live-debugger/src/lib.rs deleted file mode 100644 index 79eea4e1b..000000000 --- a/live-debugger/src/lib.rs +++ /dev/null @@ -1,16 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -mod expr_defs; -mod expr_eval; -mod parse_json; -mod parse_json_expr; -mod parse_util; -mod probe_defs; - -pub mod debugger_defs; -pub mod sender; - -pub use expr_eval::*; -pub use parse_json::parse as parse_json; -pub use probe_defs::*; diff --git a/live-debugger/src/parse_json.rs b/live-debugger/src/parse_json.rs deleted file mode 100644 index e6a199fbb..000000000 --- a/live-debugger/src/parse_json.rs +++ /dev/null @@ -1,216 +0,0 @@ -use anyhow::Context; -use crate::expr_defs::{Condition, StringSource, Value}; -use crate::parse_json_expr::{parse_condition, parse_segments, parse_value}; -use crate::parse_util::{get, try_get}; -use crate::{ - Capture, EvaluateAt, FilterList, InBodyLocation, LiveDebuggingData, LogProbe, MetricKind, - MetricProbe, Probe, ProbeCondition, ProbeTarget, ProbeType, ProbeValue, ServiceConfiguration, - SpanDecorationProbe, SpanProbe, SpanProbeDecoration, SpanProbeTarget, -}; -use json::JsonValue; - -fn parse_string_vec(array: &JsonValue) -> anyhow::Result> { - let mut vec = vec![]; - if !array.is_array() { - anyhow::bail!("Tried to get Vec from non-array"); - } - for value in array.members() { - vec.push(value.as_str().ok_or_else(|| anyhow::format_err!("Failed to get string from array value"))?.to_string()); - } - Ok(vec) -} - -fn parse_probe(parsed: &JsonValue) -> anyhow::Result { - let mut tags = vec![]; - if let Some(json_tags) = try_get(parsed, "tags") { - tags = parse_string_vec(json_tags).context("parsing tags")?; - } - - let target = get(parsed, "where")?; - let lines = if let Some(lines) = try_get(target, "lines") { - parse_string_vec(get(lines, "where").context("parsing lines")?).context("parsing lines")? - } else { - vec![] - }; - - let target_get = |name: &str| -> anyhow::Result> { - try_get(target, name) - .and_then(|v| { - if v.is_null() { - None - } else { - Some(v.as_str().map(ToString::to_string).ok_or_else(|| anyhow::format_err!("Failed getting string for {name}"))) - } - }) - .transpose() - }; - let probe = match get(parsed, "type")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from type"))? { - "METRIC_PROBE" => ProbeType::Metric(MetricProbe { - kind: match get(parsed, "kind")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from METRIC_PROBE.kind"))? { - "COUNT" => MetricKind::Count, - "GAUGE" => MetricKind::Gauge, - "HISTOGRAM" => MetricKind::Histogram, - "DISTRIBUTION" => MetricKind::Distribution, - kind => anyhow::bail!("{kind} is not a valid METRIC_PROBE.kind"), - }, - name: get(parsed, "metricName")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from metricName"))?.to_string(), - value: ProbeValue( - try_get(parsed, "value") - .map(|v| { - if v.is_null() { - Ok(Value::String(StringSource::Null)) - } else { - parse_value(v) - } - }) - .transpose()? - .unwrap_or(Value::String(StringSource::Null)), - ), - }), - "LOG_PROBE" => ProbeType::Log(LogProbe { - segments: parse_segments(get(parsed, "segments")?).context("while parsing LOG_PROBE.segments")?, - when: ProbeCondition( - try_get(parsed, "when") - .map(|v| parse_condition(v).context("while parsing LOG_PROBE.when")) - .unwrap_or(Ok(Condition::Always))?, - ), - capture: { - let mut capture = Capture::default(); - if let Some(v) = try_get(parsed, "capture") { - if !v.is_null() { - if let Some(max_reference_depth) = try_get(v, "maxReference_depth") { - capture.max_reference_depth = max_reference_depth.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxReferenceDepth"))?; - } - if let Some(max_collection_size) = try_get(v, "maxCollectionSize") { - capture.max_collection_size = max_collection_size.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxCollectionSize"))?; - } - if let Some(max_length) = try_get(v, "maxLength") { - capture.max_length = max_length.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxLength"))?; - } - if let Some(max_field_depth) = try_get(v, "maxFieldDepth") { - capture.max_field_depth = max_field_depth.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.capture.maxFieldDepth"))?; - } - } - } - capture - }, - sampling_snapshots_per_second: try_get(parsed, "sampling") - .and_then(|v| { - if v.is_null() { - None - } else { - Some(v.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from LOG_PROBE.sampling"))) - } - }) - .transpose()? - .unwrap_or(5000), - }), - "SPAN_PROBE" => ProbeType::Span(SpanProbe {}), - "SPAN_DECORATION_PROBE" => ProbeType::SpanDecoration(SpanDecorationProbe { - target: match try_get(parsed, "targetSpan").map_or(Ok("ACTIVE"), |v| v.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from SPAN_DECORATION_PROBE.targetSpan")))? { - "ACTIVE" => SpanProbeTarget::Active, - "ROOT" => SpanProbeTarget::Root, - target => anyhow::bail!("{target} is not a valid SPAN_DECORATION_PROBE.targetSpan"), - }, - decorations: { - let mut vec = vec![]; - let decorations = get(parsed, "decorations").context("on SPAN_DECORATIONS_PROBE")?; - if !decorations.is_array() { - anyhow::bail!("SPAN_DECORATIONS_PROBE.decorations is not an array"); - } - for decoration in decorations.members() { - let tags = get(decoration, "tags").context("on SPAN_DECORATIONS_PROBE.decorations")?; - if !tags.is_array() { - anyhow::bail!("SPAN_DECORATIONS_PROBE.decorations.tags is not an array"); - } - let mut tagvec = vec![]; - for tag in tags.members() { - let name = get(tag, "name").context("on SPAN_DECORATIONS_PROBE.decorations[].tags[]")?.as_str().ok_or_else(|| anyhow::format_err!("SPAN_DECORATIONS_PROBE.decorations.tags[].name is not a string"))?.to_string(); - let value = parse_segments(get(tag, "value")?).context("while parsing SPAN_DECORATIONS_PROBE.decorations[].tags[].value")?; - tagvec.push((name, value)); - } - let condition = try_get(decoration, "when") - .map(|v| { - if v.is_null() { - Ok(Condition::Always) - } else { - parse_condition(v).context("parsing the condition of SPAN_DECORATION_PROBE.decorations[].when") - } - }) - .transpose()? - .unwrap_or(Condition::Always); - vec.push(SpanProbeDecoration { - condition: ProbeCondition(condition), - tags: tagvec, - }); - } - vec - }, - }), - r#type => anyhow::bail!("Unknown probe type {type}"), - }; - - Ok(Probe { - id: get(parsed, "id")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from id"))?.into(), - version: get(parsed, "version")?.as_u64().unwrap_or(0), - language: get(parsed, "language")?.as_str().map(ToString::to_string), - tags, - target: ProbeTarget { - type_name: target_get("typeName")?, - method_name: target_get("methodName")?, - source_file: target_get("sourceFile")?, - signature: target_get("signature")?, - lines, - in_body_location: match target_get("inBodyLocation")? { - None => InBodyLocation::None, - Some(string) => match string.as_str() { - "START" => InBodyLocation::Start, - "END" => InBodyLocation::End, - location => anyhow::bail!("{location} is not a valid inBodyLocation"), - }, - }, - }, - evaluate_at: match get(parsed, "evaluateAt")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from evaluateAt"))? { - "ENTRY" => EvaluateAt::Entry, - "EXIT" => EvaluateAt::Exit, - eval_at => anyhow::bail!("{eval_at} is not a valid evaluateAt"), - }, - probe, - }) -} - -fn parse_service_configuration(parsed: &JsonValue) -> anyhow::Result { - fn parse_filter_list(parsed: &JsonValue, key: &str) -> anyhow::Result { - let f = get(parsed, key)?; - Ok(FilterList { - package_prefixes: try_get(f, "packagePrefixes") - .map_or(Ok(vec![]), parse_string_vec).map_err(|e| e.context(format!("while parsing {key}.packagePrefixes")))?, - classes: try_get(f, "classes").map_or(Ok(vec![]), parse_string_vec) - .map_err(|e| e.context(format!("while parsing {key}.classes")))?, - }) - } - - Ok(ServiceConfiguration { - id: get(parsed, "id")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from id"))?.into(), - allow: parse_filter_list(parsed, "allowList")?, - deny: parse_filter_list(parsed, "denyList")?, - sampling_snapshots_per_second: try_get(parsed, "sampling") - .and_then(|v| { - if v.is_null() { - None - } else { - Some(v.as_u32().ok_or_else(|| anyhow::format_err!("Failed getting u32 from sampling"))) - } - }) - .transpose()? - .unwrap_or(5000), - }) -} - -pub fn parse(json: &str) -> anyhow::Result { - let parsed = json::parse(json)?; - Ok(match get(&parsed, "type")?.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from type"))? { - "SERVICE_CONFIGURATION" => LiveDebuggingData::Probe(parse_probe(&parsed).context("while parsing probe")?), - _ => LiveDebuggingData::ServiceConfiguration(parse_service_configuration(&parsed).context("While parsing service configuration")?), - }) -} diff --git a/live-debugger/src/parse_json_expr.rs b/live-debugger/src/parse_json_expr.rs deleted file mode 100644 index 5ae19f772..000000000 --- a/live-debugger/src/parse_json_expr.rs +++ /dev/null @@ -1,211 +0,0 @@ -use anyhow::Context; -use crate::expr_defs::{ - BinaryComparison, CollectionMatch, CollectionSource, Condition, DslPart, NumberSource, - Reference, StringComparison, StringSource, Value, -}; -use crate::parse_util::try_get; -use crate::DslString; -use json::JsonValue; - -fn try_parse_string_value(json: &JsonValue) -> anyhow::Result> { - if let Some(substring) = try_get(json, "substring") { - if substring.is_array() && substring.len() == 3 { - return Ok(Some(StringSource::Substring(Box::new(( - parse_string_value(&substring[0]).context("while parsing source string for substring")?, - parse_number_value(&substring[1]).context("while parsing number for substring")?, - parse_number_value(&substring[2]).context("while parsing number for substring")?, - ))))); - } - } - if json.is_string() { - return Ok(Some(StringSource::String(json.as_str().unwrap().into()))); - } - if json.is_null() { - return Ok(Some(StringSource::Null)); - } - Ok(try_parse_reference(json).context("while parsing string reference")?.map(StringSource::Reference)) -} - -fn parse_string_value(json: &JsonValue) -> anyhow::Result { - try_parse_string_value(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a string value")) -} - -fn try_parse_number_value(json: &JsonValue) -> anyhow::Result> { - if let Some(reference) = try_get(json, "len") { - return Ok(Some(NumberSource::StringLength(parse_reference(reference).context("while parsing reference for len operation")?))); - } - if let Some(reference) = try_get(json, "count") { - return Ok(Some(NumberSource::CollectionSize(parse_collection_source( - reference, - ).context("while parsing collection for size operation")?))); - } - if json.is_number() { - return Ok(Some(NumberSource::Number(json.as_number().unwrap().into()))); - } - Ok(try_parse_reference(json).context("while parsing number reference")?.map(NumberSource::Reference)) -} - -fn parse_number_value(json: &JsonValue) -> anyhow::Result { - try_parse_number_value(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a number")) -} - -fn try_parse_reference(json: &JsonValue) -> anyhow::Result> { - if let Some(identifier) = try_get(json, "ref") { - return Ok(Some(Reference::Base(identifier.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from ref"))?.into()))); - } - if let Some(index) = try_get(json, "index") { - if index.is_array() && index.len() == 2 { - return Ok(Some(Reference::Index(Box::new(( - parse_collection_source(&index[0]).context("while parsing collection for index operation")?, - parse_value(&index[1]).context("while parsing index for index operation")?, - ))))); - } - } - if let Some(index) = try_get(json, "nested") { - if index.is_array() && index.len() == 2 { - return Ok(Some(Reference::Nested(Box::new(( - parse_reference(&index[0]).context("while parsing reference for nested operation")?, - parse_value(&index[1]).context("while parsing key for nested operation")?, - ))))); - } - } - Ok(None) -} - -fn parse_reference(json: &JsonValue) -> anyhow::Result { - try_parse_reference(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a reference")) -} - -pub fn parse_value(json: &JsonValue) -> anyhow::Result { - Ok(if let Some(str) = try_parse_string_value(json)? { - Value::String(str) - } else if let Some(num) = try_parse_number_value(json)? { - Value::Number(num) - } else { - Value::Bool(Box::new(parse_condition(json).context("while parsing arbitrary value")?)) - }) -} - -pub fn parse_condition(json: &JsonValue) -> anyhow::Result { - for (key, comparer) in [ - ("eq", BinaryComparison::Equals), - ("ne", BinaryComparison::NotEquals), - ("gt", BinaryComparison::GreaterThan), - ("ge", BinaryComparison::GreaterOrEquals), - ("lt", BinaryComparison::LessThan), - ("le", BinaryComparison::LessOrEquals), - ] { - if let Some(args) = try_get(json, key) { - if args.is_array() && args.len() == 2 { - return Ok(Condition::BinaryComparison( - parse_value(&args[0]).context("while parsing lhs of binary comparison")?, - comparer, - parse_value(&args[1]).context("while parsing rhs of binary comparison")?, - )); - } - } - } - - if let Some(args) = try_get(json, "and") { - if args.is_array() && args.len() == 2 { - return Ok(Condition::Disjunction(Box::new(( - parse_condition(&args[0]).context("while parsing lhs of binary and")?, - parse_condition(&args[1]).context("while parsing rhs of binary and")?, - )))); - } - } - - if let Some(args) = try_get(json, "or") { - if args.is_array() && args.len() == 2 { - return Ok(Condition::Conjunction(Box::new(( - parse_condition(&args[0]).context("while parsing lhs of binary or")?, - parse_condition(&args[1]).context("while parsing rhs of binary or")?, - )))); - } - } - - if let Some(arg) = try_get(json, "not") { - return Ok(Condition::Negation(Box::new(parse_condition(arg).context("while parsing negation")?))); - } - - if let Some(arg) = try_get(json, "isEmpty") { - return Ok(Condition::IsEmptyReference(parse_reference(arg).context("while parsing reference for isEmpty operation")?)); - } - - if let Some(arg) = try_get(json, "isUndefined") { - return Ok(Condition::IsUndefinedReference(parse_reference(arg).context("while parsing reference for isUndefined operation")?)); - } - - for (key, comparer) in [("any", CollectionMatch::Any), ("all", CollectionMatch::All)] { - if let Some(args) = try_get(json, key) { - if args.is_array() && args.len() == 2 { - return Ok(Condition::CollectionMatch( - comparer, - parse_reference(&args[0]).context("while parsing collection reference for collection operation")?, - Box::new(parse_condition(&args[1]).context("while parsing condition for collection operation")?), - )); - } - } - } - - for (key, comparer) in [ - ("startsWith", StringComparison::StartsWith), - ("endsWith", StringComparison::EndsWith), - ("contains", StringComparison::Contains), - ("matches", StringComparison::Matches), - ] { - if let Some(args) = try_get(json, key) { - if args.is_array() && args.len() == 2 && args[1].is_string() { - return Ok(Condition::StringComparison( - comparer, - parse_string_value(&args[0]).context("While parsing string operand for string comparison")?, - args[1].as_str().unwrap().into(), - )); - } - } - } - - if let Some(bool) = json.as_bool() { - return Ok(if bool { - Condition::Always - } else { - Condition::Never - }); - } - - anyhow::bail!("Could not find an appropriate operation for a condition / boolean") -} - -pub fn try_parse_collection_source(json: &JsonValue) -> anyhow::Result> { - if let Some(index) = try_get(json, "filter") { - if index.is_array() && index.len() == 2 { - return Ok(Some(CollectionSource::FilterOperator(Box::new(( - parse_collection_source(&index[0]).context("while parsing collection source for filter operation")?, - parse_condition(&index[1]).context("while parsing condition for collection filter operation")?, - ))))); - } - } - - Ok(try_parse_reference(json)?.map(CollectionSource::Reference)) -} - -fn parse_collection_source(json: &JsonValue) -> anyhow::Result { - try_parse_collection_source(json)?.ok_or_else(|| anyhow::format_err!("Could not find an appropriate operation for a collection source")) -} - -pub fn parse_segments(json: &JsonValue) -> anyhow::Result { - if json.is_array() { - let mut vec = vec![]; - for member in json.members() { - if let Some(str) = try_get(member, "str") { - vec.push(DslPart::String(str.as_str().ok_or_else(|| anyhow::format_err!("Failed getting string from str in segment parsing"))?.to_string())); - } else if let Some(part) = try_parse_collection_source(member).context("while parsing collection source for segments")? { - vec.push(DslPart::Ref(part)); - } else { - anyhow::bail!("Could not find an appropriate key for segment parsing"); - } - } - return Ok(DslString(vec)); - } - anyhow::bail!("segments is not an array") -} diff --git a/live-debugger/src/parse_util.rs b/live-debugger/src/parse_util.rs deleted file mode 100644 index c4f855e7c..000000000 --- a/live-debugger/src/parse_util.rs +++ /dev/null @@ -1,13 +0,0 @@ -use json::JsonValue; - -pub fn get<'a>(json: &'a JsonValue, name: &str) -> anyhow::Result<&'a JsonValue> { - try_get(json, name).ok_or_else(|| anyhow::format_err!("Missing key {name}")) -} - -pub fn try_get<'a>(json: &'a JsonValue, name: &str) -> Option<&'a JsonValue> { - if json.has_key(name) { - Some(&json[name]) - } else { - None - } -} diff --git a/live-debugger/src/probe_defs.rs b/live-debugger/src/probe_defs.rs deleted file mode 100644 index 726f39d47..000000000 --- a/live-debugger/src/probe_defs.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. - -use crate::{DslString, ProbeCondition, ProbeValue}; - -#[derive(Debug)] -#[repr(C)] -pub struct Capture { - pub max_reference_depth: u32, - pub max_collection_size: u32, - pub max_length: u32, - pub max_field_depth: u32, -} - -impl Default for Capture { - fn default() -> Self { - Capture { - max_reference_depth: 3, - max_collection_size: 100, - max_length: 255, - max_field_depth: 20, - } - } -} - -#[repr(C)] -#[derive(Copy, Clone, Debug)] -pub enum MetricKind { - Count, - Gauge, - Histogram, - Distribution, -} - -#[derive(Debug)] -pub struct MetricProbe { - pub kind: MetricKind, - pub name: String, - pub value: ProbeValue, // May be Value::Null -} - -#[repr(C)] -#[derive(Copy, Clone, Debug)] -pub enum SpanProbeTarget { - Active, - Root, -} - -#[derive(Debug)] -pub struct SpanProbeDecoration { - pub condition: ProbeCondition, - pub tags: Vec<(String, DslString)>, -} - -#[derive(Debug)] -pub struct LogProbe { - pub segments: DslString, - pub when: ProbeCondition, - pub capture: Capture, - pub sampling_snapshots_per_second: u32, -} - -#[derive(Debug)] -pub struct SpanProbe {} - -#[derive(Debug)] -pub struct SpanDecorationProbe { - pub target: SpanProbeTarget, - pub decorations: Vec, -} - -#[derive(Debug)] -pub enum ProbeType { - Metric(MetricProbe), - Log(LogProbe), - Span(SpanProbe), - SpanDecoration(SpanDecorationProbe), -} - -#[repr(C)] -#[derive(Copy, Clone, Debug)] -pub enum InBodyLocation { - None, - Start, - End, -} - -#[derive(Debug)] -pub struct ProbeTarget { - pub type_name: Option, - pub method_name: Option, - pub source_file: Option, - pub signature: Option, - pub lines: Vec, - pub in_body_location: InBodyLocation, -} - -#[repr(C)] -#[derive(Copy, Clone, Debug)] -pub enum EvaluateAt { - Entry, - Exit, -} - -#[derive(Debug)] -pub struct Probe { - pub id: String, - pub version: u64, - pub language: Option, - pub tags: Vec, - pub target: ProbeTarget, // "where" is rust keyword - pub evaluate_at: EvaluateAt, - pub probe: ProbeType, -} - -#[derive(Debug)] -pub struct FilterList { - pub package_prefixes: Vec, - pub classes: Vec, -} - -#[derive(Debug)] -pub struct ServiceConfiguration { - pub id: String, - pub allow: FilterList, - pub deny: FilterList, - pub sampling_snapshots_per_second: u32, -} - -#[derive(Debug)] -pub enum LiveDebuggingData { - Probe(Probe), - ServiceConfiguration(ServiceConfiguration), -} diff --git a/live-debugger/src/sender.rs b/live-debugger/src/sender.rs deleted file mode 100644 index 04c11f574..000000000 --- a/live-debugger/src/sender.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::hash::Hash; -use std::str::FromStr; -use hyper::{Body, Client, Method, Uri}; -use hyper::http::uri::PathAndQuery; -use serde::Serialize; -use uuid::Uuid; -use ddcommon::connector::Connector; -use ddcommon::Endpoint; -use crate::debugger_defs::DebuggerPayload; - -pub const PROD_INTAKE_SUBDOMAIN: &str = "http-intake.logs"; - -const DIRECT_TELEMETRY_URL_PATH: &str = "/v1/input"; -const AGENT_TELEMETRY_URL_PATH: &str = "/debugger/v1/input"; - -#[derive(Default)] -pub struct Config { - pub endpoint: Option, -} - -impl Config { - pub fn set_endpoint(&mut self, mut endpoint: Endpoint) -> anyhow::Result<()> { - let mut uri_parts = endpoint.url.into_parts(); - if uri_parts.scheme.is_some() && uri_parts.scheme.as_ref().unwrap().as_str() != "file" { - uri_parts.path_and_query = Some(PathAndQuery::from_static( - if endpoint.api_key.is_some() { - DIRECT_TELEMETRY_URL_PATH - } else { - AGENT_TELEMETRY_URL_PATH - }, - )); - } - - endpoint.url = Uri::from_parts(uri_parts)?; - self.endpoint = Some(endpoint); - Ok(()) - } -} - -pub fn encode(data: Vec>) -> Vec { - serde_json::to_vec(&data).unwrap() -} - -pub async fn send(payload: &[u8], endpoint: &Endpoint) -> anyhow::Result<()> { - let mut req = hyper::Request::builder() - .header( - hyper::header::USER_AGENT, - concat!("Tracer/", env!("CARGO_PKG_VERSION")), - ) - .header("Content-type", "application/json") - .method(Method::POST); - - let mut url = endpoint.url.clone(); - if endpoint.api_key.is_some() { - // TODO DD-REQUEST-ID header necessary? - req = req.header("DD-EVP-ORIGIN", "agent-debugger"); - let mut parts = url.into_parts(); - let mut query = String::from(parts.path_and_query.unwrap().as_str()); - query.push_str("?ddtags=host:"); - query.push_str(""); // TODO hostname - // TODO container tags and such - parts.path_and_query = Some(PathAndQuery::from_str(&query)?); - url = Uri::from_parts(parts)?; - } - // "env:" + config.getEnv(), - // "version:" + config.getVersion(), - // "debugger_version:" + DDTraceCoreInfo.VERSION, - // "agent_version:" + DebuggerAgent.getAgentVersion(), - // "host_name:" + config.getHostName()); - - // SAFETY: we ensure the reference exists across the request - let req = req.uri(url).body(Body::from(unsafe { std::mem::transmute::<&[u8], &[u8]>(payload) }))?; - - match Client::builder() - .build(Connector::default()) - .request(req) - .await - { - Ok(response) => { - if response.status().as_u16() >= 400 { - let body_bytes = hyper::body::to_bytes(response.into_body()).await?; - let response_body = - String::from_utf8(body_bytes.to_vec()).unwrap_or_default(); - anyhow::bail!("Server did not accept traces: {response_body}"); - } - Ok(()) - } - Err(e) => anyhow::bail!("Failed to send traces: {e}"), - } -} - -pub fn generate_new_id() -> Uuid { - Uuid::new_v4() -} diff --git a/remote-config/Cargo.toml b/remote-config/Cargo.toml index 4f9490397..9c32e2843 100644 --- a/remote-config/Cargo.toml +++ b/remote-config/Cargo.toml @@ -11,7 +11,6 @@ test = [] anyhow = { version = "1.0" } ddcommon = { path = "../ddcommon" } datadog-trace-protobuf = { path = "../trace-protobuf" } -datadog-live-debugger = { path = "../live-debugger" } hyper = { version = "0.14", features = ["client"], default-features = false } http = "0.2" base64 = "0.21.0" diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index d9128a62b..90b73ea9e 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -1,8 +1,8 @@ // Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +use std::fmt::Display; use serde::{Deserialize, Serialize}; -use datadog_live_debugger::LiveDebuggingData; use crate::dynamic_configuration::data::DynamicConfigFile; #[derive(Debug, Clone, Eq, Hash, PartialEq)] @@ -18,12 +18,13 @@ pub enum RemoteConfigProduct { LiveDebugger, } -impl ToString for RemoteConfigProduct { - fn to_string(&self) -> String { - match self { +impl Display for RemoteConfigProduct { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { RemoteConfigProduct::ApmTracing => "APM_TRACING", RemoteConfigProduct::LiveDebugger => "LIVE_DEBUGGING", - }.to_string() + }; + write!(f, "{}", str) } } @@ -77,7 +78,7 @@ impl ToString for RemoteConfigPath { #[derive(Debug)] pub enum RemoteConfigData { DynamicConfig(DynamicConfigFile), - LiveDebugger(LiveDebuggingData), + LiveDebugger(( /* placeholder */)), } impl RemoteConfigData { @@ -87,8 +88,7 @@ impl RemoteConfigData { RemoteConfigData::DynamicConfig(serde_json::from_slice(data)?) }, RemoteConfigProduct::LiveDebugger => { - let parsed = datadog_live_debugger::parse_json(&String::from_utf8_lossy(data))?; - RemoteConfigData::LiveDebugger(parsed) + RemoteConfigData::LiveDebugger(/* placeholder */ ()) } }) } diff --git a/sidecar-ffi/Cargo.toml b/sidecar-ffi/Cargo.toml index 120c6c5b0..41cbd5bfe 100644 --- a/sidecar-ffi/Cargo.toml +++ b/sidecar-ffi/Cargo.toml @@ -18,7 +18,6 @@ ddcommon = { path = "../ddcommon" } ddcommon-ffi = { path = "../ddcommon-ffi", default-features = false } ddtelemetry-ffi = { path = "../ddtelemetry-ffi", default-features = false } datadog-remote-config = { path = "../remote-config" } -datadog-live-debugger = { path = "../live-debugger" } paste = "1" libc = "0.2" diff --git a/sidecar-ffi/cbindgen.toml b/sidecar-ffi/cbindgen.toml index 75568489f..dd0268147 100644 --- a/sidecar-ffi/cbindgen.toml +++ b/sidecar-ffi/cbindgen.toml @@ -34,4 +34,4 @@ must_use = "DDOG_CHECK_RETURN" [parse] parse_deps = true -include = ["ddcommon", "ddtelemetry", "datadog-sidecar", "ddtelemetry-ffi", "ddcommon-ffi", "datadog-ipc", "datadog-live-debugger", "datadog-remote-config"] +include = ["ddcommon", "ddtelemetry", "datadog-sidecar", "ddtelemetry-ffi", "ddcommon-ffi", "datadog-ipc", "datadog-remote-config"] diff --git a/sidecar-ffi/src/lib.rs b/sidecar-ffi/src/lib.rs index eca288c50..9dcb7b1ea 100644 --- a/sidecar-ffi/src/lib.rs +++ b/sidecar-ffi/src/lib.rs @@ -600,27 +600,6 @@ pub unsafe extern "C" fn ddog_sidecar_send_trace_v04_bytes( MaybeError::None } -#[no_mangle] -#[allow(clippy::missing_safety_doc)] -#[allow(improper_ctypes_definitions)] // DebuggerPayload is just a pointer, we hide its internals -pub unsafe extern "C" fn ddog_sidecar_send_debugger_data( - transport: &mut Box, - instance_id: &InstanceId, - payloads: Vec> -) -> MaybeError { - if payloads.is_empty() { - return MaybeError::None; - } - - try_c!(blocking::send_debugger_data_shm_vec( - transport, - instance_id, - payloads, - )); - - MaybeError::None -} - #[no_mangle] #[allow(clippy::missing_safety_doc)] pub unsafe extern "C" fn ddog_sidecar_set_remote_config_data( diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index bf7169701..e016d5c33 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -22,7 +22,6 @@ datadog-trace-protobuf = { path = "../trace-protobuf" } datadog-trace-utils = { path = "../trace-utils" } datadog-trace-normalization = { path = "../trace-normalization" } datadog-remote-config = { path = "../remote-config" } -datadog-live-debugger = { path = "../live-debugger" } futures = { version = "0.3", default-features = false } manual_future = "0.1.1" diff --git a/sidecar/src/service/blocking.rs b/sidecar/src/service/blocking.rs index 48f217038..a0f2d5fcb 100644 --- a/sidecar/src/service/blocking.rs +++ b/sidecar/src/service/blocking.rs @@ -6,7 +6,7 @@ use super::{ SidecarInterfaceRequest, SidecarInterfaceResponse, }; use crate::dogstatsd::DogStatsDAction; -use datadog_ipc::platform::{Channel, FileBackedHandle, ShmHandle}; +use datadog_ipc::platform::{Channel, ShmHandle}; use datadog_ipc::transport::blocking::BlockingTransport; use std::sync::Mutex; use std::{ @@ -14,8 +14,6 @@ use std::{ io, time::{Duration, Instant}, }; -use std::hash::Hash; -use serde::Serialize; use tracing::info; /// `SidecarTransport` is a wrapper around a BlockingTransport struct from the `datadog_ipc` crate @@ -274,65 +272,6 @@ pub fn send_trace_v04_shm( }) } -/// Sends raw data from shared memory to the debugger endpoint. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `handle` - The handle to the shared memory. -/// -/// # Returns -/// -/// An `io::Result<()>` indicating the result of the operation. -pub fn send_debugger_data_shm( - transport: &mut SidecarTransport, - instance_id: &InstanceId, - handle: ShmHandle, -) -> io::Result<()> { - transport.send(SidecarInterfaceRequest::SendDebuggerDataShm { - instance_id: instance_id.clone(), - handle, - }) -} - -/// Sends a collection of debugger palyloads to the debugger endpoint. -/// -/// # Arguments -/// -/// * `transport` - The transport used for communication. -/// * `instance_id` - The ID of the instance. -/// * `payloads` - The payloads to be sent -/// -/// # Returns -/// -/// An `anyhow::Result<()>` indicating the result of the operation. -pub fn send_debugger_data_shm_vec( - transport: &mut SidecarTransport, - instance_id: &InstanceId, - payloads: Vec>, -) -> anyhow::Result<()> { - struct SizeCount(usize); - - impl io::Write for SizeCount { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0 += buf.len(); - Ok(buf.len()) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - let mut size_serializer = serde_json::Serializer::new(SizeCount(0)); - payloads.serialize(&mut size_serializer).unwrap(); - - let mut mapped = ShmHandle::new(size_serializer.into_inner().0)?.map()?; - let mut serializer = serde_json::Serializer::new(mapped.as_slice_mut()); - payloads.serialize(&mut serializer).unwrap(); - - Ok(send_debugger_data_shm(transport, instance_id, mapped.into())?) -} - /// Sets the state of the current remote config operation. /// The queue id is shared with telemetry and the associated data will be freed upon a /// `Lifecycle::Stop` event. diff --git a/sidecar/src/service/session_info.rs b/sidecar/src/service/session_info.rs index d654da116..52cc22d94 100644 --- a/sidecar/src/service/session_info.rs +++ b/sidecar/src/service/session_info.rs @@ -24,7 +24,6 @@ use crate::service::{InstanceId, RuntimeInfo}; pub(crate) struct SessionInfo { runtimes: Arc>>, pub(crate) session_config: Arc>>, - debugger_config: Arc>, tracer_config: Arc>, dogstatsd: Arc>, remote_config_invariants: Arc>>, @@ -42,7 +41,6 @@ impl Clone for SessionInfo { SessionInfo { runtimes: self.runtimes.clone(), session_config: self.session_config.clone(), - debugger_config: self.debugger_config.clone(), tracer_config: self.tracer_config.clone(), dogstatsd: self.dogstatsd.clone(), remote_config_invariants: self.remote_config_invariants.clone(), @@ -181,17 +179,6 @@ impl SessionInfo { f(&mut self.get_dogstatsd()); } - pub fn get_debugger_config(&self) -> MutexGuard { - self.debugger_config.lock().unwrap() - } - - pub fn modify_debugger_config(&self, mut f: F) - where - F: FnMut(&mut datadog_live_debugger::sender::Config), - { - f(&mut self.get_debugger_config()); - } - pub fn set_remote_config_invariants(&self, invariants: ConfigInvariants) { *self.remote_config_invariants.lock().unwrap() = Some(invariants); } diff --git a/sidecar/src/service/sidecar_interface.rs b/sidecar/src/service/sidecar_interface.rs index 22e63471b..232fce7bb 100644 --- a/sidecar/src/service/sidecar_interface.rs +++ b/sidecar/src/service/sidecar_interface.rs @@ -111,16 +111,6 @@ pub trait SidecarInterface { headers: SerializedTracerHeaderTags, ); - /// Transfers raw data to a live-debugger endpoint. - /// - /// # Arguments - /// * `instance_id` - The ID of the instance. - /// * `handle` - The data to send. - async fn send_debugger_data_shm( - instance_id: InstanceId, - #[SerializedHandle] handle: ShmHandle, - ); - /// Sets contextual data for the remote config client. /// /// # Arguments diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index 8428a9a43..bf66652d8 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -114,6 +114,7 @@ impl SidecarServer { /// # Arguments /// /// * `async_channel`: An `AsyncChannel` that represents the connection to the client. + #[cfg_attr(not(windows), allow(unused_mut))] pub async fn accept_connection(mut self, async_channel: AsyncChannel) { #[cfg(windows)] { self.process_handle = async_channel.metadata.lock().unwrap().process_handle().map(|p| ProcessHandle(p as winapi::HANDLE)); } @@ -341,13 +342,6 @@ impl SidecarServer { self.trace_flusher.enqueue(data); } - async fn send_debugger_data(&self, data: &[u8], target: &Endpoint) { - if let Err(e) = datadog_live_debugger::sender::send(data, target).await { - error!("Error sending data to live debugger endpoint: {e:?}"); - debug!("Attempted to send the following payload: {}", String::from_utf8_lossy(data)); - } - } - async fn compute_stats(&self) -> SidecarStats { let mut telemetry_stats_errors = 0; let telemetry_stats = join_all({ @@ -638,11 +632,6 @@ impl SidecarInterface for SidecarServer { session.configure_dogstatsd(|dogstatsd| { dogstatsd.set_endpoint(config.dogstatsd_endpoint.clone()); }); - session.modify_debugger_config(|cfg| { - let endpoint = - get_product_endpoint(datadog_live_debugger::sender::PROD_INTAKE_SUBDOMAIN, &config.endpoint); - cfg.set_endpoint(endpoint).ok(); - }); session.set_remote_config_invariants(ConfigInvariants { language: config.language, tracer_version: config.tracer_version, @@ -761,33 +750,6 @@ impl SidecarInterface for SidecarServer { no_response() } - type SendDebuggerDataShmFut = NoResponse; - - fn send_debugger_data_shm( - self, - _: Context, - instance_id: InstanceId, - handle: ShmHandle, - ) -> Self::SendDebuggerDataShmFut { - if let Some(endpoint) = self - .get_session(&instance_id.session_id) - .get_debugger_config() - .endpoint - .clone() - { - tokio::spawn(async move { - match handle.map() { - Ok(mapped) => { - self.send_debugger_data(mapped.as_slice(), &endpoint).await; - } - Err(e) => error!("Failed mapping shared trace data memory: {}", e), - } - }); - } - - no_response() - } - type SetRemoteConfigDataFut = NoResponse; fn set_remote_config_data( diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index fe1d541ee..62a2fc811 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -14,6 +14,7 @@ use std::default::Default; use std::ffi::CString; use std::hash::{Hash, Hasher}; use std::io; +#[cfg(windows)] use std::io::Write; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -123,6 +124,7 @@ fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> anyhow::Re let len = len + 4; let mut handle = NamedShmHandle::create(CString::new(name)?, len)?.map()?; + #[cfg_attr(not(windows), allow(unused_mut))] let mut target_slice = handle.as_slice_mut(); #[cfg(windows)] { target_slice.write(&(file.len() as u32).to_ne_bytes())?; } From bc11875530731e05a09d788215ce11aee160dfce Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 12 Jun 2024 18:24:13 +0200 Subject: [PATCH 07/26] Uglify with cargo fmt Signed-off-by: Bob Weinand --- ddcommon-ffi/src/slice.rs | 24 +- .../src/dynamic_configuration/data.rs | 8 +- .../src/dynamic_configuration/mod.rs | 2 +- remote-config/src/fetch/fetcher.rs | 383 +++++++++++++---- remote-config/src/fetch/mod.rs | 12 +- remote-config/src/fetch/multitarget.rs | 377 ++++++++++++---- remote-config/src/fetch/shared.rs | 406 +++++++++++------- remote-config/src/fetch/single.rs | 14 +- remote-config/src/fetch/test_server.rs | 211 ++++++--- remote-config/src/lib.rs | 12 +- remote-config/src/parse.rs | 32 +- remote-config/src/targets.rs | 12 +- sidecar-ffi/src/lib.rs | 50 ++- sidecar/src/lib.rs | 2 +- sidecar/src/service/blocking.rs | 9 +- sidecar/src/service/mod.rs | 2 +- sidecar/src/service/remote_configs.rs | 45 +- sidecar/src/service/runtime_info.rs | 4 +- sidecar/src/service/session_info.rs | 7 +- sidecar/src/service/sidecar_interface.rs | 2 +- sidecar/src/service/sidecar_server.rs | 60 ++- sidecar/src/shm_remote_config.rs | 234 +++++++--- trace-protobuf/build.rs | 24 +- trace-protobuf/src/pb.rs | 81 ++-- 24 files changed, 1411 insertions(+), 602 deletions(-) diff --git a/ddcommon-ffi/src/slice.rs b/ddcommon-ffi/src/slice.rs index e5fa1b714..f54a80fe7 100644 --- a/ddcommon-ffi/src/slice.rs +++ b/ddcommon-ffi/src/slice.rs @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use core::slice; +use serde::ser::Error; +use serde::Serializer; use std::borrow::Cow; use std::fmt::{Debug, Display, Formatter}; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::os::raw::c_char; use std::str::Utf8Error; -use serde::ser::Error; -use serde::Serializer; #[repr(C)] #[derive(Copy, Clone)] @@ -126,19 +126,31 @@ impl<'a, T> Default for Slice<'a, T> { } } -impl<'a, T> Hash for Slice<'a, T> where Slice<'a, T>: AsBytes<'a> { +impl<'a, T> Hash for Slice<'a, T> +where + Slice<'a, T>: AsBytes<'a>, +{ fn hash(&self, state: &mut H) { state.write(self.as_bytes()) } } -impl<'a, T> serde::Serialize for Slice<'a, T> where Slice<'a, T>: AsBytes<'a> { - fn serialize(&self, serializer: S) -> Result where S: Serializer { +impl<'a, T> serde::Serialize for Slice<'a, T> +where + Slice<'a, T>: AsBytes<'a>, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(self.try_to_utf8().map_err(Error::custom)?) } } -impl<'a, T> Display for Slice<'a, T> where Slice<'a, T>: AsBytes<'a> { +impl<'a, T> Display for Slice<'a, T> +where + Slice<'a, T>: AsBytes<'a>, +{ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.try_to_utf8().map_err(|_| std::fmt::Error)?) } diff --git a/remote-config/src/dynamic_configuration/data.rs b/remote-config/src/dynamic_configuration/data.rs index 8be6e49ae..0646bc1b7 100644 --- a/remote-config/src/dynamic_configuration/data.rs +++ b/remote-config/src/dynamic_configuration/data.rs @@ -1,5 +1,5 @@ -use std::collections::HashMap; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; #[derive(Debug, Deserialize)] #[cfg_attr(feature = "test", derive(Serialize))] @@ -64,7 +64,9 @@ impl From for Vec { fn from(value: DynamicConfig) -> Self { let mut vec = vec![]; if let Some(tags) = value.tracing_header_tags { - vec.push(Configs::TracingHeaderTags(tags.into_iter().map(|t| (t.header, t.tag_name)).collect())) + vec.push(Configs::TracingHeaderTags( + tags.into_iter().map(|t| (t.header, t.tag_name)).collect(), + )) } if let Some(sample_rate) = value.tracing_sample_rate { vec.push(Configs::TracingSampleRate(sample_rate)); @@ -101,7 +103,7 @@ pub mod tests { pub fn dummy_dynamic_config(enabled: bool) -> DynamicConfigFile { DynamicConfigFile { action: "".to_string(), - service_target : DynamicConfigTarget { + service_target: DynamicConfigTarget { service: "".to_string(), env: "".to_string(), }, diff --git a/remote-config/src/dynamic_configuration/mod.rs b/remote-config/src/dynamic_configuration/mod.rs index 12e35bbf6..7a345e4c6 100644 --- a/remote-config/src/dynamic_configuration/mod.rs +++ b/remote-config/src/dynamic_configuration/mod.rs @@ -1 +1 @@ -pub mod data; \ No newline at end of file +pub mod data; diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index d7d785d01..9fa773046 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -1,15 +1,18 @@ -use std::collections::{HashMap, HashSet}; -use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; -use std::sync::{Arc, Mutex, MutexGuard}; +use crate::targets::TargetsList; +use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; use base64::Engine; +use datadog_trace_protobuf::remoteconfig::{ + ClientGetConfigsRequest, ClientGetConfigsResponse, ClientState, ClientTracer, ConfigState, + TargetFileHash, TargetFileMeta, +}; +use ddcommon::{connector, Endpoint}; use hyper::http::uri::{PathAndQuery, Scheme}; use hyper::{Client, StatusCode}; use sha2::{Digest, Sha256, Sha512}; +use std::collections::{HashMap, HashSet}; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex, MutexGuard}; use tracing::{debug, trace, warn}; -use datadog_trace_protobuf::remoteconfig::{ClientGetConfigsRequest, ClientGetConfigsResponse, ClientState, ClientTracer, ConfigState, TargetFileHash, TargetFileMeta}; -use ddcommon::{connector, Endpoint}; -use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; -use crate::targets::TargetsList; const PROD_INTAKE_SUBDOMAIN: &str = "config"; @@ -21,10 +24,20 @@ pub trait FileStorage { type StoredFile; /// A new, currently unknown file was received. - fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result>; + fn store( + &self, + version: u64, + path: RemoteConfigPath, + contents: Vec, + ) -> anyhow::Result>; /// A file at a given path was updated (new contents). - fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()>; + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()>; } /// Fundamental configuration of the RC client, which always must be set. @@ -79,7 +92,7 @@ impl ConfigFetcherState { pub fn files_lock(&self) -> ConfigFetcherFilesLock { assert!(!self.expire_unused_files); ConfigFetcherFilesLock { - inner: self.target_files_by_path.lock().unwrap() + inner: self.target_files_by_path.lock().unwrap(), } } } @@ -130,12 +143,18 @@ impl ConfigFetcher { return Ok(Some(vec![])); } - let Target { service, env, app_version } = (*target).clone(); + let Target { + service, + env, + app_version, + } = (*target).clone(); let mut cached_target_files = vec![]; let mut config_states = vec![]; - for StoredTargetFile { state, meta, .. } in self.state.target_files_by_path.lock().unwrap().values() { + for StoredTargetFile { state, meta, .. } in + self.state.target_files_by_path.lock().unwrap().values() + { config_states.push(state.clone()); cached_target_files.push(meta.clone()); } @@ -151,7 +170,13 @@ impl ConfigFetcher { backend_client_state: std::mem::take(&mut opaque_state.client_state), }), id: config_id.into(), - products: self.state.invariants.products.iter().map(|p| p.to_string()).collect(), + products: self + .state + .invariants + .products + .iter() + .map(|p| p.to_string()) + .collect(), is_tracer: true, client_tracer: Some(ClientTracer { runtime_id: runtime_id.to_string(), @@ -166,21 +191,34 @@ impl ConfigFetcher { is_agent: false, client_agent: None, last_seen: 0, - capabilities: self.state.invariants.capabilities.iter().map(|c| *c as u8).collect(), + capabilities: self + .state + .invariants + .capabilities + .iter() + .map(|c| *c as u8) + .collect(), }), cached_target_files, }; - let req = self.state.endpoint + let req = self + .state + .endpoint .into_request_builder(concat!("Sidecar/", env!("CARGO_PKG_VERSION")))? .method(http::Method::POST) - .header(http::header::CONTENT_TYPE, ddcommon::header::APPLICATION_JSON) + .header( + http::header::CONTENT_TYPE, + ddcommon::header::APPLICATION_JSON, + ) .body(serde_json::to_string(&config_req)?)?; let response = Client::builder() .build(connector::Connector::default()) .request(req) .await - .map_err(|e| anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)))?; + .map_err(|e| { + anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)) + })?; let status = response.status(); let body_bytes = hyper::body::to_bytes(response.into_body()).await?; if status != StatusCode::OK { @@ -190,8 +228,7 @@ impl ConfigFetcher { return Ok(Some(vec![])); } - let response_body = - String::from_utf8(body_bytes.to_vec()).unwrap_or_default(); + let response_body = String::from_utf8(body_bytes.to_vec()).unwrap_or_default(); anyhow::bail!("Server did not accept remote config request: {response_body}"); } @@ -204,30 +241,55 @@ impl ConfigFetcher { let response: ClientGetConfigsResponse = serde_json::from_str(&String::from_utf8_lossy(body_bytes.as_ref()))?; - let decoded_targets = base64::engine::general_purpose::STANDARD.decode(response.targets.as_slice())?; - let targets_list = TargetsList::try_parse(decoded_targets.as_slice()).map_err(|e| anyhow::Error::msg(e).context(format!("Decoded targets reply: {}", String::from_utf8_lossy(decoded_targets.as_slice()))))?; - - opaque_state.client_state = targets_list.signed.custom.opaque_backend_state.as_bytes().to_vec(); + let decoded_targets = + base64::engine::general_purpose::STANDARD.decode(response.targets.as_slice())?; + let targets_list = TargetsList::try_parse(decoded_targets.as_slice()).map_err(|e| { + anyhow::Error::msg(e).context(format!( + "Decoded targets reply: {}", + String::from_utf8_lossy(decoded_targets.as_slice()) + )) + })?; + + opaque_state.client_state = targets_list + .signed + .custom + .opaque_backend_state + .as_bytes() + .to_vec(); if let Some(interval) = targets_list.signed.custom.agent_refresh_interval { self.interval.store(interval, Ordering::Relaxed); } - trace!("Received remote config of length {}, containing {:?} paths for target {:?}", body_bytes.len(), targets_list.signed.targets.keys().collect::>(), target); + trace!( + "Received remote config of length {}, containing {:?} paths for target {:?}", + body_bytes.len(), + targets_list.signed.targets.keys().collect::>(), + target + ); - let incoming_files: HashMap<_, _> = response.target_files.iter().map(|f| (f.path.as_str(), f.raw.as_slice())).collect(); + let incoming_files: HashMap<_, _> = response + .target_files + .iter() + .map(|f| (f.path.as_str(), f.raw.as_slice())) + .collect(); // This lock must be held continuously at least between the existence check - // (target_files.get()) and the insertion later on. Makes more sense to just hold it continuously + // (target_files.get()) and the insertion later on. Makes more sense to just hold it + // continuously let mut target_files = self.state.target_files_by_path.lock().unwrap(); if self.state.expire_unused_files { let retain: HashSet<_> = response.client_configs.iter().collect(); - target_files.retain(|k, _| { retain.contains(k) }); + target_files.retain(|k, _| retain.contains(k)); } for (path, target_file) in targets_list.signed.targets { - fn hash_sha256(v: &[u8]) -> String { format!("{:x}", Sha256::digest(v)) } - fn hash_sha512(v: &[u8]) -> String { format!("{:x}", Sha512::digest(v)) } + fn hash_sha256(v: &[u8]) -> String { + format!("{:x}", Sha256::digest(v)) + } + fn hash_sha512(v: &[u8]) -> String { + format!("{:x}", Sha512::digest(v)) + } let (hasher, hash) = if let Some(sha256) = target_file.hashes.get("sha256") { (hash_sha256 as fn(&[u8]) -> String, *sha256) } else if let Some(sha512) = target_file.hashes.get("sha512") { @@ -236,7 +298,12 @@ impl ConfigFetcher { warn!("Found a target file without hashes at path {path}"); continue; }; - let handle = if let Some(StoredTargetFile { hash: old_hash, handle, .. }) = target_files.get(path) { + let handle = if let Some(StoredTargetFile { + hash: old_hash, + handle, + .. + }) = target_files.get(path) + { if old_hash == hash { continue; } @@ -245,9 +312,7 @@ impl ConfigFetcher { None }; if let Some(raw_file) = incoming_files.get(path) { - if let Ok(decoded) = base64::engine::general_purpose::STANDARD - .decode(raw_file) - { + if let Ok(decoded) = base64::engine::general_purpose::STANDARD.decode(raw_file) { let computed_hash = hasher(decoded.as_slice()); if hash != computed_hash { warn!("Computed hash of file {computed_hash} did not match remote config targets file hash {hash} for path {path}: file: {}", String::from_utf8_lossy(decoded.as_slice())); @@ -255,45 +320,64 @@ impl ConfigFetcher { } match RemoteConfigPath::try_parse(path) { - Ok(parsed_path) => if let Some(version) = target_file.try_parse_version() { - debug!("Fetched new remote config file at path {path} targeting {target:?}"); - - target_files.insert(path.to_string(), StoredTargetFile { - hash: computed_hash, - state: ConfigState { - id: parsed_path.config_id.to_string(), - version, - product: parsed_path.product.to_string(), - apply_state: 0, - apply_error: "".to_string(), - }, - meta: TargetFileMeta { - path: path.to_string(), - length: decoded.len() as i64, - hashes: target_file.hashes.iter().map(|(algorithm, hash)| TargetFileHash { - algorithm: algorithm.to_string(), - hash: hash.to_string(), - }).collect(), - }, - handle: if let Some(handle) = handle { - self.file_storage.update(&handle, version, decoded)?; - handle - } else { - self.file_storage.store(version, parsed_path, decoded)? - }, - }); - } else { - warn!("Failed parsing version from remote config path {path}"); - }, + Ok(parsed_path) => { + if let Some(version) = target_file.try_parse_version() { + debug!("Fetched new remote config file at path {path} targeting {target:?}"); + + target_files.insert( + path.to_string(), + StoredTargetFile { + hash: computed_hash, + state: ConfigState { + id: parsed_path.config_id.to_string(), + version, + product: parsed_path.product.to_string(), + apply_state: 0, + apply_error: "".to_string(), + }, + meta: TargetFileMeta { + path: path.to_string(), + length: decoded.len() as i64, + hashes: target_file + .hashes + .iter() + .map(|(algorithm, hash)| TargetFileHash { + algorithm: algorithm.to_string(), + hash: hash.to_string(), + }) + .collect(), + }, + handle: if let Some(handle) = handle { + self.file_storage.update(&handle, version, decoded)?; + handle + } else { + self.file_storage.store( + version, + parsed_path, + decoded, + )? + }, + }, + ); + } else { + warn!("Failed parsing version from remote config path {path}"); + } + } Err(e) => { warn!("Failed parsing remote config path: {path} - {e:?}"); } } } else { - warn!("Failed base64 decoding config for path {path}: {}", String::from_utf8_lossy(raw_file)) + warn!( + "Failed base64 decoding config for path {path}: {}", + String::from_utf8_lossy(raw_file) + ) } } else { - warn!("Found changed config data for path {path}, but no file; existing files: {:?}", incoming_files.keys().collect::>()) + warn!( + "Found changed config data for path {path}, but no file; existing files: {:?}", + incoming_files.keys().collect::>() + ) } } @@ -331,12 +415,12 @@ fn get_product_endpoint(subdomain: &str, endpoint: &Endpoint) -> Endpoint { #[cfg(test)] pub mod tests { + use super::*; + use crate::fetch::test_server::RemoteConfigServer; + use crate::RemoteConfigSource; use http::Response; use hyper::Body; use lazy_static::lazy_static; - use crate::fetch::test_server::RemoteConfigServer; - use crate::RemoteConfigSource; - use super::*; lazy_static! { pub static ref PATH_FIRST: RemoteConfigPath = RemoteConfigPath { @@ -345,14 +429,12 @@ pub mod tests { config_id: "1234".to_string(), name: "config".to_string(), }; - pub static ref PATH_SECOND: RemoteConfigPath = RemoteConfigPath { source: RemoteConfigSource::Employee, product: RemoteConfigProduct::ApmTracing, config_id: "9876".to_string(), name: "config".to_string(), }; - pub static ref DUMMY_TARGET: Arc = Arc::new(Target { service: "service".to_string(), env: "env".to_string(), @@ -370,7 +452,7 @@ pub mod tests { pub struct PathStore { path: RemoteConfigPath, storage: Arc, - pub data: Arc> + pub data: Arc>, } #[derive(Debug, Eq, PartialEq)] @@ -388,12 +470,22 @@ pub mod tests { impl FileStorage for Arc { type StoredFile = PathStore; - fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + fn store( + &self, + version: u64, + path: RemoteConfigPath, + contents: Vec, + ) -> anyhow::Result> { let data = Arc::new(Mutex::new(DataStore { version, contents: String::from_utf8(contents).unwrap(), })); - assert!(self.files.lock().unwrap().insert(path.clone(), data.clone()).is_none()); + assert!(self + .files + .lock() + .unwrap() + .insert(path.clone(), data.clone()) + .is_none()); Ok(Arc::new(PathStore { path: path.clone(), storage: self.clone(), @@ -401,7 +493,12 @@ pub mod tests { })) } - fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()> { *file.data.lock().unwrap() = DataStore { version, contents: String::from_utf8(contents).unwrap(), @@ -414,14 +511,27 @@ pub mod tests { async fn test_inactive() { let server = RemoteConfigServer::spawn(); let storage = Arc::new(Storage::default()); - let mut fetcher = ConfigFetcher::new(storage.clone(), Arc::new(ConfigFetcherState::new(server.dummy_invariants()))); + let mut fetcher = ConfigFetcher::new( + storage.clone(), + Arc::new(ConfigFetcherState::new(server.dummy_invariants())), + ); let mut opaque_state = OpaqueState::default(); let mut response = Response::new(Body::from("")); *response.status_mut() = StatusCode::NOT_FOUND; *server.next_response.lock().unwrap() = Some(response); - let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", Some("test".to_string()), &mut opaque_state).await.unwrap().unwrap(); + let fetched = fetcher + .fetch_once( + DUMMY_RUNTIME_ID, + DUMMY_TARGET.clone(), + "foo", + Some("test".to_string()), + &mut opaque_state, + ) + .await + .unwrap() + .unwrap(); assert!(fetched.is_empty()); } @@ -429,7 +539,10 @@ pub mod tests { #[tokio::test] async fn test_fetch_cache() { let server = RemoteConfigServer::spawn(); - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, "v1".to_string())); + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![DUMMY_TARGET.clone()], 1, "v1".to_string()), + ); let storage = Arc::new(Storage::default()); @@ -437,23 +550,41 @@ pub mod tests { language: "php".to_string(), tracer_version: "1.2.3".to_string(), endpoint: server.endpoint.clone(), - products: vec![RemoteConfigProduct::ApmTracing, RemoteConfigProduct::LiveDebugger], + products: vec![ + RemoteConfigProduct::ApmTracing, + RemoteConfigProduct::LiveDebugger, + ], capabilities: vec![RemoteConfigCapabilities::ApmTracingCustomTags], }; - - let mut fetcher = ConfigFetcher::new(storage.clone(), Arc::new(ConfigFetcherState::new(invariants))); + let mut fetcher = ConfigFetcher::new( + storage.clone(), + Arc::new(ConfigFetcherState::new(invariants)), + ); let mut opaque_state = OpaqueState::default(); { - let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", Some("test".to_string()), &mut opaque_state).await.unwrap().unwrap(); + let fetched = fetcher + .fetch_once( + DUMMY_RUNTIME_ID, + DUMMY_TARGET.clone(), + "foo", + Some("test".to_string()), + &mut opaque_state, + ) + .await + .unwrap() + .unwrap(); let req = server.last_request.lock().unwrap(); let req = req.as_ref().unwrap(); assert!(req.cached_target_files.is_empty()); let client = req.client.as_ref().unwrap(); - assert_eq!(client.capabilities, &[RemoteConfigCapabilities::ApmTracingCustomTags as u8]); + assert_eq!( + client.capabilities, + &[RemoteConfigCapabilities::ApmTracingCustomTags as u8] + ); assert_eq!(client.products, &["APM_TRACING", "LIVE_DEBUGGING"]); assert_eq!(client.is_tracer, true); assert_eq!(client.is_agent, false); @@ -473,18 +604,32 @@ pub mod tests { assert_eq!(tracer.language, "php"); assert_eq!(tracer.tracer_version, "1.2.3"); - - assert_eq!(String::from_utf8_lossy(&opaque_state.client_state), "some state"); + assert_eq!( + String::from_utf8_lossy(&opaque_state.client_state), + "some state" + ); assert_eq!(fetched.len(), 1); assert_eq!(storage.files.lock().unwrap().len(), 1); - assert!(Arc::ptr_eq(&fetched[0].data, storage.files.lock().unwrap().get(&PATH_FIRST).unwrap())); + assert!(Arc::ptr_eq( + &fetched[0].data, + storage.files.lock().unwrap().get(&PATH_FIRST).unwrap() + )); assert_eq!(fetched[0].data.lock().unwrap().contents, "v1"); assert_eq!(fetched[0].data.lock().unwrap().version, 1); } { - let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap(); + let fetched = fetcher + .fetch_once( + DUMMY_RUNTIME_ID, + DUMMY_TARGET.clone(), + "foo", + None, + &mut opaque_state, + ) + .await + .unwrap(); assert!(fetched.is_none()); // no change let req = server.last_request.lock().unwrap(); @@ -492,7 +637,10 @@ pub mod tests { assert_eq!(req.cached_target_files.len(), 1); let client = req.client.as_ref().unwrap(); - assert_eq!(client.capabilities, &[RemoteConfigCapabilities::ApmTracingCustomTags as u8]); + assert_eq!( + client.capabilities, + &[RemoteConfigCapabilities::ApmTracingCustomTags as u8] + ); assert_eq!(client.products, &["APM_TRACING", "LIVE_DEBUGGING"]); assert_eq!(client.is_tracer, true); assert_eq!(client.is_agent, false); @@ -510,34 +658,79 @@ pub mod tests { assert_eq!(cached.hashes.len(), 1); } - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, "v2".to_string())); - server.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string())); + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![DUMMY_TARGET.clone()], 2, "v2".to_string()), + ); + server.files.lock().unwrap().insert( + PATH_SECOND.clone(), + (vec![DUMMY_TARGET.clone()], 1, "X".to_string()), + ); { - let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap().unwrap(); + let fetched = fetcher + .fetch_once( + DUMMY_RUNTIME_ID, + DUMMY_TARGET.clone(), + "foo", + None, + &mut opaque_state, + ) + .await + .unwrap() + .unwrap(); assert_eq!(fetched.len(), 2); assert_eq!(storage.files.lock().unwrap().len(), 2); - let (first, second) = if fetched[0].data.lock().unwrap().version == 2 { (0, 1) } else { (1, 0) }; + let (first, second) = if fetched[0].data.lock().unwrap().version == 2 { + (0, 1) + } else { + (1, 0) + }; - assert!(Arc::ptr_eq(&fetched[first].data, storage.files.lock().unwrap().get(&PATH_FIRST).unwrap())); + assert!(Arc::ptr_eq( + &fetched[first].data, + storage.files.lock().unwrap().get(&PATH_FIRST).unwrap() + )); assert_eq!(fetched[first].data.lock().unwrap().contents, "v2"); assert_eq!(fetched[first].data.lock().unwrap().version, 2); - assert!(Arc::ptr_eq(&fetched[second].data, storage.files.lock().unwrap().get(&PATH_SECOND).unwrap())); + assert!(Arc::ptr_eq( + &fetched[second].data, + storage.files.lock().unwrap().get(&PATH_SECOND).unwrap() + )); assert_eq!(fetched[second].data.lock().unwrap().contents, "X"); assert_eq!(fetched[second].data.lock().unwrap().version, 1); } { - let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap(); + let fetched = fetcher + .fetch_once( + DUMMY_RUNTIME_ID, + DUMMY_TARGET.clone(), + "foo", + None, + &mut opaque_state, + ) + .await + .unwrap(); assert!(fetched.is_none()); // no change } server.files.lock().unwrap().remove(&PATH_FIRST); { - let fetched = fetcher.fetch_once(DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", None, &mut opaque_state).await.unwrap().unwrap(); + let fetched = fetcher + .fetch_once( + DUMMY_RUNTIME_ID, + DUMMY_TARGET.clone(), + "foo", + None, + &mut opaque_state, + ) + .await + .unwrap() + .unwrap(); assert_eq!(fetched.len(), 1); assert_eq!(storage.files.lock().unwrap().len(), 1); } diff --git a/remote-config/src/fetch/mod.rs b/remote-config/src/fetch/mod.rs index 96f8b8948..860661bf1 100644 --- a/remote-config/src/fetch/mod.rs +++ b/remote-config/src/fetch/mod.rs @@ -1,12 +1,12 @@ -#[cfg(any(test, feature = "test"))] -pub mod test_server; mod fetcher; -mod single; -mod shared; mod multitarget; +mod shared; +mod single; +#[cfg(any(test, feature = "test"))] +pub mod test_server; #[cfg_attr(test, allow(ambiguous_glob_reexports))] // ignore mod tests re-export pub use fetcher::*; -pub use single::*; -pub use shared::*; pub use multitarget::*; +pub use shared::*; +pub use single::*; diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index 4874644b6..b60884615 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -1,6 +1,14 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache +// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +use crate::fetch::{ + ConfigFetcherState, ConfigInvariants, FileStorage, RefcountedFile, RefcountingStorage, + SharedFetcher, +}; +use crate::Target; +use futures_util::future::Shared; +use futures_util::FutureExt; +use manual_future::ManualFuture; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::default::Default; @@ -9,15 +17,10 @@ use std::hash::Hash; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; -use futures_util::future::Shared; -use futures_util::FutureExt; -use manual_future::ManualFuture; use tokio::select; use tokio::sync::Semaphore; use tokio::time::Instant; use tracing::{debug, error, trace}; -use crate::fetch::{ConfigFetcherState, ConfigInvariants, FileStorage, RefcountedFile, RefcountingStorage, SharedFetcher}; -use crate::Target; /// MultiTargetFetcher built on a set of SharedFetchers, managing multiple environments and services /// at once. @@ -27,7 +30,11 @@ use crate::Target; /// This fetcher is designed for use cases with more than one Target tuple associated to a /// specific runtime id and/or handling hundreds to thousands of different runtime ids with a low /// amount of actual remote config clients. -pub struct MultiTargetFetcher where S::StoredFile: RefcountedFile + Sync + Send, S: MultiTargetHandlers { +pub struct MultiTargetFetcher +where + S::StoredFile: RefcountedFile + Sync + Send, + S: MultiTargetHandlers, +{ /// All runtime ids belonging to a specific target target_runtimes: Mutex, HashSet>>, /// Keyed by runtime_id @@ -82,9 +89,14 @@ struct RuntimeInfo { targets: HashMap, u32>, } -impl MultiTargetFetcher where S::StoredFile: RefcountedFile + Sync + Send, S: MultiTargetHandlers { +impl + MultiTargetFetcher +where + S::StoredFile: RefcountedFile + Sync + Send, + S: MultiTargetHandlers, +{ pub const DEFAULT_CLIENTS_LIMIT: u32 = 100; - + pub fn new(storage: S, invariants: ConfigInvariants) -> Arc { Arc::new(MultiTargetFetcher { storage: RefcountingStorage::new(storage, ConfigFetcherState::new(invariants)), @@ -98,7 +110,8 @@ impl } pub fn is_dead(&self) -> bool { - self.services.lock().unwrap().is_empty() && self.pending_async_insertions.load(Ordering::Relaxed) == 0 + self.services.lock().unwrap().is_empty() + && self.pending_async_insertions.load(Ordering::Relaxed) == 0 } /// Allow for more than DEFAULT_CLIENTS_LIMIT fetchers running simultaneously @@ -123,21 +136,27 @@ impl KnownTargetStatus::Pending => break 'drop_service, KnownTargetStatus::Alive => { KnownTargetStatus::RemoveAt(Instant::now() + Duration::from_secs(3666)) - }, - KnownTargetStatus::RemoveAt(_) | KnownTargetStatus::Removing(_) => unreachable!(), + } + KnownTargetStatus::RemoveAt(_) | KnownTargetStatus::Removing(_) => { + unreachable!() + } }; 0 } else { if *known_service.fetcher.runtime_id.lock().unwrap() == runtime_id { 'changed_rt_id: { for (id, runtime) in self.runtimes.lock().unwrap().iter() { - if runtime.targets.len() == 1 && runtime.targets.contains_key(target) { - *known_service.fetcher.runtime_id.lock().unwrap() = id.to_string(); + if runtime.targets.len() == 1 + && runtime.targets.contains_key(target) + { + *known_service.fetcher.runtime_id.lock().unwrap() = + id.to_string(); break 'changed_rt_id; } } known_service.synthetic_id = true; - *known_service.fetcher.runtime_id.lock().unwrap() = Self::generate_synthetic_id(); + *known_service.fetcher.runtime_id.lock().unwrap() = + Self::generate_synthetic_id(); } } known_service.refcount - 1 @@ -163,7 +182,8 @@ impl match target_runtimes.entry(target.clone()) { Entry::Occupied(e) => e.into_mut(), Entry::Vacant(e) => e.insert(HashSet::new()), - }.insert(runtime_id.to_string()); + } + .insert(runtime_id.to_string()); drop(target_runtimes); // unlock let mut services = self.services.lock().unwrap(); @@ -178,10 +198,11 @@ impl known_target.refcount = 1; if synthetic_id && !known_target.synthetic_id { known_target.synthetic_id = true; - *known_target.fetcher.runtime_id.lock().unwrap() = Self::generate_synthetic_id(); + *known_target.fetcher.runtime_id.lock().unwrap() = + Self::generate_synthetic_id(); } known_target.runtimes.insert(runtime_id.to_string()); - }, + } KnownTargetStatus::Removing(ref future) => { let future = future.clone(); // Avoid deadlocking between known_target.status and self.services @@ -194,7 +215,7 @@ impl this.pending_async_insertions.fetch_sub(1, Ordering::AcqRel); }); return; - }, + } KnownTargetStatus::Alive | KnownTargetStatus::Pending => unreachable!(), } } else { @@ -204,9 +225,13 @@ impl known_target.synthetic_id = false; *known_target.fetcher.runtime_id.lock().unwrap() = runtime_id.into(); } - }, + } Entry::Vacant(e) => { - let runtime_id = if synthetic_id { Self::generate_synthetic_id() } else { runtime_id.into() }; + let runtime_id = if synthetic_id { + Self::generate_synthetic_id() + } else { + runtime_id.into() + }; self.start_fetcher(e.insert(KnownTarget { refcount: 1, status: Arc::new(Mutex::new(KnownTargetStatus::Pending)), @@ -246,16 +271,25 @@ impl let known_target = services.get_mut(&primary_target).unwrap(); if !known_target.synthetic_id { known_target.synthetic_id = true; - *known_target.fetcher.runtime_id.lock().unwrap() = Self::generate_synthetic_id(); + *known_target.fetcher.runtime_id.lock().unwrap() = + Self::generate_synthetic_id(); } } e.insert(1); self.add_target(true, runtime_entry.key(), target.clone()); - }, + } } } Entry::Vacant(e) => { - if self.storage.invariants().endpoint.url.scheme().map(|s| s.as_str() != "file") == Some(true) { + if self + .storage + .invariants() + .endpoint + .url + .scheme() + .map(|s| s.as_str() != "file") + == Some(true) + { let info = RuntimeInfo { notify_target, targets: HashMap::from([(target.clone(), 1)]), @@ -267,11 +301,7 @@ impl } } - pub fn delete_runtime( - self: &Arc, - runtime_id: &str, - target: &Arc, - ) { + pub fn delete_runtime(self: &Arc, runtime_id: &str, target: &Arc) { trace!("Removing remote config runtime: {target:?} with runtime id {runtime_id}"); { let mut runtimes = self.runtimes.lock().unwrap(); @@ -304,7 +334,10 @@ impl let this = self.clone(); let fetcher = known_target.fetcher.clone(); let status = known_target.status.clone(); - fetcher.default_interval.store(self.remote_config_interval.load(Ordering::Relaxed), Ordering::Relaxed); + fetcher.default_interval.store( + self.remote_config_interval.load(Ordering::Relaxed), + Ordering::Relaxed, + ); tokio::spawn(async move { // Relatively primitive, no prioritization or anything. It is not expected that this // semaphore is ever awaiting under standard usage. Can be improved if needed, e.g. @@ -323,28 +356,44 @@ impl let inner_fetcher = fetcher.clone(); let inner_this = this.clone(); - let fetcher_fut = fetcher.run(this.storage.clone(), Box::new(move |files| { - let (error, notify) = inner_this.storage.storage.fetched(&inner_fetcher.target, files); - - if notify { - // notify_targets is Hash + Eq + Clone, allowing us to deduplicate. Also avoid the lock during notifying - let mut notify_targets = HashSet::new(); - if let Some(runtimes) = inner_this.target_runtimes.lock().unwrap().get(&inner_fetcher.target) { - for runtime_id in runtimes { - if let Some(runtime) = inner_this.runtimes.lock().unwrap().get(runtime_id) { - notify_targets.insert(runtime.notify_target.clone()); + let fetcher_fut = fetcher + .run( + this.storage.clone(), + Box::new(move |files| { + let (error, notify) = inner_this + .storage + .storage + .fetched(&inner_fetcher.target, files); + + if notify { + // notify_targets is Hash + Eq + Clone, allowing us to deduplicate. Also + // avoid the lock during notifying + let mut notify_targets = HashSet::new(); + if let Some(runtimes) = inner_this + .target_runtimes + .lock() + .unwrap() + .get(&inner_fetcher.target) + { + for runtime_id in runtimes { + if let Some(runtime) = + inner_this.runtimes.lock().unwrap().get(runtime_id) + { + notify_targets.insert(runtime.notify_target.clone()); + } + } } - } - } - debug!("Notify {:?} about remote config changes", notify_targets); - for notify_target in notify_targets { - notify_target.notify(); - } - } + debug!("Notify {:?} about remote config changes", notify_targets); + for notify_target in notify_targets { + notify_target.notify(); + } + } - error - })).shared(); + error + }), + ) + .shared(); loop { { @@ -352,10 +401,14 @@ impl if let KnownTargetStatus::RemoveAt(instant) = *status { // Voluntarily give up the semaphore for services in RemoveAt status if // there are only few available permits - if this.fetcher_semaphore.available_permits() < 10 || instant < Instant::now() { - // We need to signal that we're in progress of removing to avoid race conditions + if this.fetcher_semaphore.available_permits() < 10 + || instant < Instant::now() + { + // We need to signal that we're in progress of removing to avoid race + // conditions *status = KnownTargetStatus::Removing(shared_future.clone()); - // break here to drop mutex guard and avoid having status and services locked simultaneously + // break here to drop mutex guard and avoid having status and services + // locked simultaneously fetcher.cancel(); break; } @@ -372,10 +425,12 @@ impl this.storage.storage.expired(&fetcher.target); - { // scope lock before await + { + // scope lock before await let mut services = this.services.lock().unwrap(); services.remove(&fetcher.target); - if services.is_empty() && this.pending_async_insertions.load(Ordering::Relaxed) == 0 { + if services.is_empty() && this.pending_async_insertions.load(Ordering::Relaxed) == 0 + { this.storage.storage.dead(); } } @@ -390,12 +445,12 @@ impl match *status { KnownTargetStatus::Pending | KnownTargetStatus::Alive => { error!("Trying to shutdown {:?} while still alive", target); - }, + } KnownTargetStatus::RemoveAt(_) => { *status = KnownTargetStatus::RemoveAt(Instant::now()); service.fetcher.cancel(); - }, - KnownTargetStatus::Removing(_) => {}, + } + KnownTargetStatus::Removing(_) => {} } } } @@ -403,14 +458,14 @@ impl #[cfg(test)] mod tests { - use std::hash::Hasher; - use std::sync::atomic::AtomicU8; - use manual_future::ManualFutureCompleter; + use super::*; use crate::fetch::fetcher::tests::*; use crate::fetch::shared::tests::*; use crate::fetch::test_server::RemoteConfigServer; use crate::{RemoteConfigPath, Target}; - use super::*; + use manual_future::ManualFutureCompleter; + use std::hash::Hasher; + use std::sync::atomic::AtomicU8; #[derive(Clone)] struct MultiFileStorage { @@ -435,21 +490,39 @@ mod tests { pub fn expect_expiration(&self, target: &Arc) -> ManualFuture<()> { let (future, completer) = ManualFuture::new(); - self.expected_expirations.lock().unwrap().insert(target.clone(), completer); + self.expected_expirations + .lock() + .unwrap() + .insert(target.clone(), completer); future } } impl MultiTargetHandlers for MultiFileStorage { - fn fetched(&self, target: &Arc, files: &[Arc]) -> (Option, bool) { + fn fetched( + &self, + target: &Arc, + files: &[Arc], + ) -> (Option, bool) { match self.recent_fetches.lock().unwrap().entry(target.clone()) { Entry::Occupied(_) => panic!("Double fetch without recent_fetches clear"), - Entry::Vacant(e) => { e.insert(files.to_vec()); }, + Entry::Vacant(e) => { + e.insert(files.to_vec()); + } } match self.awaiting_fetches.fetch_sub(1, Ordering::SeqCst) { - 2.. => {}, - 1 => { tokio::spawn(self.awaited_fetched_done.lock().unwrap().take().unwrap().complete(())); }, + 2.. => {} + 1 => { + tokio::spawn( + self.awaited_fetched_done + .lock() + .unwrap() + .take() + .unwrap() + .complete(()), + ); + } ..=0 => panic!("Got unexpected fetch"), } @@ -457,22 +530,46 @@ mod tests { } fn expired(&self, target: &Arc) { - tokio::spawn(self.expected_expirations.lock().unwrap().remove(target).unwrap().complete(())); + tokio::spawn( + self.expected_expirations + .lock() + .unwrap() + .remove(target) + .unwrap() + .complete(()), + ); } fn dead(&self) { - tokio::spawn(self.on_dead_completer.lock().unwrap().take().unwrap().complete(())); + tokio::spawn( + self.on_dead_completer + .lock() + .unwrap() + .take() + .unwrap() + .complete(()), + ); } } impl FileStorage for MultiFileStorage { type StoredFile = ::StoredFile; - fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + fn store( + &self, + version: u64, + path: RemoteConfigPath, + contents: Vec, + ) -> anyhow::Result> { self.rc.store(version, path, contents) } - fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()> { self.rc.update(file, version, contents) } } @@ -484,7 +581,9 @@ mod tests { impl NotifyState { fn assert_notified(&self, ids: &[u8]) { - let mut notified = std::mem::take(&mut *self.notifications.lock().unwrap()).into_iter().collect::>(); + let mut notified = std::mem::take(&mut *self.notifications.lock().unwrap()) + .into_iter() + .collect::>(); notified.sort(); assert_eq!(notified, ids); } @@ -520,7 +619,6 @@ mod tests { static RT_ID_2: &'static str = "ae588386-8464-43ba-bd3a-3e2d36b2c22c"; static RT_ID_3: &'static str = "0125dff8-d9a7-4fd3-a0c2-0ca3b12816a1"; - #[tokio::test] async fn test_multi_fetcher() { let server = RemoteConfigServer::spawn(); @@ -535,43 +633,144 @@ mod tests { }; let state = Arc::new(NotifyState::default()); - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, "v1".to_string())); + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![DUMMY_TARGET.clone()], 1, "v1".to_string()), + ); let fut = storage.await_fetches(1); - let fetcher = MultiTargetFetcher::::new(storage.clone(), server.dummy_invariants()); + let fetcher = MultiTargetFetcher::::new( + storage.clone(), + server.dummy_invariants(), + ); fetcher.remote_config_interval.store(1000, Ordering::SeqCst); - fetcher.add_runtime(RT_ID_1.to_string(), Notifier { id: 1, state: state.clone() }, &OTHER_TARGET); - assert_eq!(*fetcher.services.lock().unwrap().get(&*OTHER_TARGET).unwrap().fetcher.runtime_id.lock().unwrap(), RT_ID_1); - - fetcher.add_runtime(RT_ID_1.to_string(), Notifier { id: 1, state: state.clone() }, &DUMMY_TARGET); - fetcher.add_runtime(RT_ID_2.to_string(), Notifier { id: 2, state: state.clone() }, &DUMMY_TARGET); - - assert_eq!(*fetcher.services.lock().unwrap().get(&*DUMMY_TARGET).unwrap().fetcher.runtime_id.lock().unwrap(), RT_ID_2); - assert_ne!(*fetcher.services.lock().unwrap().get(&*OTHER_TARGET).unwrap().fetcher.runtime_id.lock().unwrap(), RT_ID_1); + fetcher.add_runtime( + RT_ID_1.to_string(), + Notifier { + id: 1, + state: state.clone(), + }, + &OTHER_TARGET, + ); + assert_eq!( + *fetcher + .services + .lock() + .unwrap() + .get(&*OTHER_TARGET) + .unwrap() + .fetcher + .runtime_id + .lock() + .unwrap(), + RT_ID_1 + ); + + fetcher.add_runtime( + RT_ID_1.to_string(), + Notifier { + id: 1, + state: state.clone(), + }, + &DUMMY_TARGET, + ); + fetcher.add_runtime( + RT_ID_2.to_string(), + Notifier { + id: 2, + state: state.clone(), + }, + &DUMMY_TARGET, + ); + + assert_eq!( + *fetcher + .services + .lock() + .unwrap() + .get(&*DUMMY_TARGET) + .unwrap() + .fetcher + .runtime_id + .lock() + .unwrap(), + RT_ID_2 + ); + assert_ne!( + *fetcher + .services + .lock() + .unwrap() + .get(&*OTHER_TARGET) + .unwrap() + .fetcher + .runtime_id + .lock() + .unwrap(), + RT_ID_1 + ); assert_eq!(fetcher.runtimes.lock().unwrap().len(), 2); // two runtimes assert_eq!(fetcher.target_runtimes.lock().unwrap().len(), 2); // two fetchers - fetcher.add_runtime(RT_ID_3.to_string(), Notifier { id: 3, state: state.clone() }, &OTHER_TARGET); + fetcher.add_runtime( + RT_ID_3.to_string(), + Notifier { + id: 3, + state: state.clone(), + }, + &OTHER_TARGET, + ); fut.await; state.assert_notified(&[1, 2]); - let last_fetched: Vec<_> = storage.recent_fetches.lock().unwrap().get(&*DUMMY_TARGET).unwrap().iter().map(|p| p.store.data.clone()).collect(); + let last_fetched: Vec<_> = storage + .recent_fetches + .lock() + .unwrap() + .get(&*DUMMY_TARGET) + .unwrap() + .iter() + .map(|p| p.store.data.clone()) + .collect(); assert_eq!(last_fetched.len(), 1); let fut = storage.await_fetches(2); - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![OTHER_TARGET.clone()], 1, "v1".to_string())); + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![OTHER_TARGET.clone()], 1, "v1".to_string()), + ); fut.await; state.assert_notified(&[1, 2, 3]); - let new_fetched: Vec<_> = storage.recent_fetches.lock().unwrap().get(&*OTHER_TARGET).unwrap().iter().map(|p| p.store.data.clone()).collect(); - assert_eq!(storage.recent_fetches.lock().unwrap().get(&*OTHER_TARGET).unwrap().len(), 1); + let new_fetched: Vec<_> = storage + .recent_fetches + .lock() + .unwrap() + .get(&*OTHER_TARGET) + .unwrap() + .iter() + .map(|p| p.store.data.clone()) + .collect(); + assert_eq!( + storage + .recent_fetches + .lock() + .unwrap() + .get(&*OTHER_TARGET) + .unwrap() + .len(), + 1 + ); if !Arc::ptr_eq(&new_fetched[0], &last_fetched[0]) { - assert_eq!(*new_fetched[0].lock().unwrap(), *last_fetched[0].lock().unwrap()); + assert_eq!( + *new_fetched[0].lock().unwrap(), + *last_fetched[0].lock().unwrap() + ); } fetcher.delete_runtime(RT_ID_1, &OTHER_TARGET); diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index a4c5cdfed..dfd9dd7d0 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -1,27 +1,26 @@ +use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; +use crate::{RemoteConfigPath, Target}; use std::collections::HashMap; -use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; use std::time::Duration; +use tokio::select; +use tokio::time::sleep; use tokio_util::sync::CancellationToken; use tracing::error; -use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; -use crate::{RemoteConfigPath, Target}; -use tokio::time::sleep; -use tokio::select; /// Fetcher which does a run-loop and carefully manages state around files, with the following /// guarantees: -/// - A file at a given RemoteConfigPath will not be recreated as long as it exists -/// I.e. it will always be drop()'ed before recreation. -/// - It does not leak files which are no longer in use, i.e. it refcounts across all remote -/// config clients sharing the same RefcountingStorage. +/// - A file at a given RemoteConfigPath will not be recreated as long as it exists I.e. it will +/// always be drop()'ed before recreation. +/// - It does not leak files which are no longer in use, i.e. it refcounts across all remote config +/// clients sharing the same RefcountingStorage. /// - The state is always valid, i.e. there will be no intermittently expired files. -/// pub struct SharedFetcher { /// (env, service, version) tuple representing the basic remote config target pub target: Arc, // could be theoretically also Mutex<>ed if needed - /// A unique runtime id. It must not be used by any other remote config client at the same time. - /// Is allowed to be changed at any time. + /// A unique runtime id. It must not be used by any other remote config client at the same + /// time. Is allowed to be changed at any time. pub runtime_id: Arc>, /// Each fetcher must have an unique id. Defaults to a random UUID. pub client_id: String, @@ -91,35 +90,50 @@ impl RunnersGeneration { /// Increments run_id and increments active runners. Returns first run_id to watch for. fn inc_runners(&self) -> u64 { - (self.val.fetch_add((1 << Self::RUN_ID_SHIFT) + 1, Ordering::SeqCst) >> Self::RUN_ID_SHIFT) + 1 + (self + .val + .fetch_add((1 << Self::RUN_ID_SHIFT) + 1, Ordering::SeqCst) + >> Self::RUN_ID_SHIFT) + + 1 } /// Increments run_id and decrements active runners. Returns last run_id to watch for. fn dec_runners(&self) -> u64 { - self.val.fetch_add((1 << Self::RUN_ID_SHIFT) - 1, Ordering::SeqCst) >> Self::RUN_ID_SHIFT + self.val + .fetch_add((1 << Self::RUN_ID_SHIFT) - 1, Ordering::SeqCst) + >> Self::RUN_ID_SHIFT } /// Returns amount of active runners and current run_id. fn runners_and_run_id(&self) -> (u32, u64) { let val = self.val.load(Ordering::Acquire); - ((val & ((1 << Self::RUN_ID_SHIFT) - 1)) as u32, val >> Self::RUN_ID_SHIFT) + ( + (val & ((1 << Self::RUN_ID_SHIFT) - 1)) as u32, + val >> Self::RUN_ID_SHIFT, + ) } } -pub struct RefcountingStorage where S::StoredFile: RefcountedFile { +pub struct RefcountingStorage +where + S::StoredFile: RefcountedFile, +{ pub storage: S, state: Arc>, /// Stores recently expired files. When a file refcount drops to zero, they're no longer sent /// via the remote config client. However, there may still be in-flight requests, with telling - /// the remote config server that we know about these files. Thus, as long as these requests are - /// being processed, we must retain the files, as these would not be resent, leaving us with a - /// potentially incomplete configuration. + /// the remote config server that we know about these files. Thus, as long as these requests + /// are being processed, we must retain the files, as these would not be resent, leaving us + /// with a potentially incomplete configuration. inactive: Arc>>>, /// times ConfigFetcher::::fetch_once() is currently being run run_id: Arc, } -impl Clone for RefcountingStorage where S::StoredFile: RefcountedFile { +impl Clone for RefcountingStorage +where + S::StoredFile: RefcountedFile, +{ fn clone(&self) -> Self { RefcountingStorage { storage: self.storage.clone(), @@ -130,7 +144,10 @@ impl Clone for RefcountingStorage where S::StoredFile } } -impl RefcountingStorage where S::StoredFile: RefcountedFile { +impl RefcountingStorage +where + S::StoredFile: RefcountedFile, +{ pub fn new(storage: S, mut state: ConfigFetcherState) -> Self { state.expire_unused_files = false; RefcountingStorage { @@ -162,10 +179,18 @@ impl RefcountingStorage where S::StoredFile: Refcount } } -impl FileStorage for RefcountingStorage where S::StoredFile: RefcountedFile { +impl FileStorage for RefcountingStorage +where + S::StoredFile: RefcountedFile, +{ type StoredFile = S::StoredFile; - fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + fn store( + &self, + version: u64, + path: RemoteConfigPath, + contents: Vec, + ) -> anyhow::Result> { let mut inactive = self.inactive.lock().unwrap(); if let Some(existing) = inactive.remove(&path) { if version <= existing.refcount().version { @@ -180,7 +205,12 @@ impl FileStorage for RefcountingStorage where S::Stor } } - fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()> { self.storage.update(file, version, contents) } } @@ -200,7 +230,13 @@ impl SharedFetcher { /// On successful fetches on_fetch() is called with the new configuration. /// Should not be called more than once. #[allow(clippy::type_complexity)] - pub async fn run(&self, storage: RefcountingStorage, on_fetch: Box>) -> Option>) where S::StoredFile: RefcountedFile { + pub async fn run( + &self, + storage: RefcountingStorage, + on_fetch: Box>) -> Option>, + ) where + S::StoredFile: RefcountedFile, + { let state = storage.state.clone(); let mut fetcher = ConfigFetcher::new(storage, state); @@ -213,15 +249,28 @@ impl SharedFetcher { let first_run_id = fetcher.file_storage.run_id.inc_runners(); let runtime_id = self.runtime_id.lock().unwrap().clone(); - let fetched = fetcher.fetch_once(runtime_id.as_str(), self.target.clone(), self.client_id.as_str(), last_error.take(), &mut opaque_state).await; + let fetched = fetcher + .fetch_once( + runtime_id.as_str(), + self.target.clone(), + self.client_id.as_str(), + last_error.take(), + &mut opaque_state, + ) + .await; let last_run_id = fetcher.file_storage.run_id.dec_runners(); - fetcher.file_storage.inactive.lock().unwrap().retain(|_, v| { - (first_run_id..last_run_id).contains(&v.get_dropped_run_id()) && v.delref() == 1 - }); + fetcher + .file_storage + .inactive + .lock() + .unwrap() + .retain(|_, v| { + (first_run_id..last_run_id).contains(&v.get_dropped_run_id()) && v.delref() == 1 + }); match fetched { - Ok(None) => { /* unchanged */ }, + Ok(None) => { /* unchanged */ } Ok(Some(files)) => { if !files.is_empty() || !last_files.is_empty() { for file in files.iter() { @@ -271,13 +320,13 @@ impl SharedFetcher { #[cfg(test)] pub mod tests { - use futures::future::join_all; - use std::sync::Arc; - use lazy_static::lazy_static; + use super::*; use crate::fetch::fetcher::tests::*; use crate::fetch::test_server::RemoteConfigServer; use crate::Target; - use super::*; + use futures::future::join_all; + use lazy_static::lazy_static; + use std::sync::Arc; lazy_static! { pub static ref OTHER_TARGET: Arc = Arc::new(Target { @@ -304,14 +353,24 @@ pub mod tests { impl FileStorage for RcFileStorage { type StoredFile = RcPathStore; - fn store(&self, version: u64, path: RemoteConfigPath, contents: Vec) -> anyhow::Result> { + fn store( + &self, + version: u64, + path: RemoteConfigPath, + contents: Vec, + ) -> anyhow::Result> { Ok(Arc::new(RcPathStore { store: self.0.store(version, path.clone(), contents)?, refcounted: FileRefcountData::new(version, path), })) } - fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()> { self.0.update(&file.store, version, contents) } } @@ -320,52 +379,69 @@ pub mod tests { async fn test_single_fetcher() { let server = RemoteConfigServer::spawn(); let storage = RcFileStorage::default(); - let rc_storage = RefcountingStorage::new(storage.clone(), ConfigFetcherState::new(server.dummy_invariants())); - - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, "v1".to_string())); - - let fetcher = SharedFetcher::new(DUMMY_TARGET.clone(), "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string()); + let rc_storage = RefcountingStorage::new( + storage.clone(), + ConfigFetcherState::new(server.dummy_invariants()), + ); + + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![DUMMY_TARGET.clone()], 1, "v1".to_string()), + ); + + let fetcher = SharedFetcher::new( + DUMMY_TARGET.clone(), + "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string(), + ); let iteration = AtomicU32::new(0); let inner_fetcher = unsafe { &*(&fetcher as *const SharedFetcher) }; let inner_storage = storage.clone(); - fetcher.run(rc_storage, Box::new(move |fetched| { - match iteration.fetch_add(1, Ordering::SeqCst) { - 0 => { - assert_eq!(fetched.len(), 1); - assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); - - server.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string())); - - Some("error".to_string()) - }, - 1 => { - assert_eq!(fetched.len(), 2); - let req = server.last_request.lock().unwrap(); - let req = req.as_ref().unwrap(); - let client = req.client.as_ref().unwrap(); - let state = client.state.as_ref().unwrap(); - assert_eq!(state.error, "error"); - - server.files.lock().unwrap().remove(&PATH_SECOND); + fetcher + .run( + rc_storage, + Box::new( + move |fetched| match iteration.fetch_add(1, Ordering::SeqCst) { + 0 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); + + server.files.lock().unwrap().insert( + PATH_SECOND.clone(), + (vec![DUMMY_TARGET.clone()], 1, "X".to_string()), + ); + + Some("error".to_string()) + } + 1 => { + assert_eq!(fetched.len(), 2); + let req = server.last_request.lock().unwrap(); + let req = req.as_ref().unwrap(); + let client = req.client.as_ref().unwrap(); + let state = client.state.as_ref().unwrap(); + assert_eq!(state.error, "error"); - None - }, - 2 => { - assert_eq!(fetched.len(), 1); - assert_eq!(inner_storage.0.files.lock().unwrap().len(), 1); - let req = server.last_request.lock().unwrap(); - let req = req.as_ref().unwrap(); - let client = req.client.as_ref().unwrap(); - let state = client.state.as_ref().unwrap(); - assert_eq!(state.has_error, false); - - inner_fetcher.cancel(); + server.files.lock().unwrap().remove(&PATH_SECOND); - None - } - _ => panic!("Unexpected"), - } - })).await; + None + } + 2 => { + assert_eq!(fetched.len(), 1); + assert_eq!(inner_storage.0.files.lock().unwrap().len(), 1); + let req = server.last_request.lock().unwrap(); + let req = req.as_ref().unwrap(); + let client = req.client.as_ref().unwrap(); + let state = client.state.as_ref().unwrap(); + assert_eq!(state.has_error, false); + + inner_fetcher.cancel(); + + None + } + _ => panic!("Unexpected"), + }, + ), + ) + .await; assert!(storage.0.files.lock().unwrap().is_empty()); } @@ -374,17 +450,40 @@ pub mod tests { async fn test_parallel_fetchers() { let server = RemoteConfigServer::spawn(); let storage = RcFileStorage::default(); - let rc_storage = RefcountingStorage::new(storage.clone(), ConfigFetcherState::new(server.dummy_invariants())); - - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone(), OTHER_TARGET.clone()], 1, "v1".to_string())); - server.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string())); + let rc_storage = RefcountingStorage::new( + storage.clone(), + ConfigFetcherState::new(server.dummy_invariants()), + ); + + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + ( + vec![DUMMY_TARGET.clone(), OTHER_TARGET.clone()], + 1, + "v1".to_string(), + ), + ); + server.files.lock().unwrap().insert( + PATH_SECOND.clone(), + (vec![DUMMY_TARGET.clone()], 1, "X".to_string()), + ); let server_1 = server.clone(); let server_1_storage = storage.clone(); let server_first_1 = move || { assert_eq!(server_1_storage.0.files.lock().unwrap().len(), 2); - server_1.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![OTHER_TARGET.clone()], 1, "v1".to_string())); - server_1.files.lock().unwrap().insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone(), OTHER_TARGET.clone()], 1, "X".to_string())); + server_1.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![OTHER_TARGET.clone()], 1, "v1".to_string()), + ); + server_1.files.lock().unwrap().insert( + PATH_SECOND.clone(), + ( + vec![DUMMY_TARGET.clone(), OTHER_TARGET.clone()], + 1, + "X".to_string(), + ), + ); }; let server_first_2 = server_first_1.clone(); @@ -392,7 +491,10 @@ pub mod tests { let server_2_storage = storage.clone(); let server_second_1 = move || { assert_eq!(server_2_storage.0.files.lock().unwrap().len(), 2); - server_2.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, "v2".to_string())); + server_2.files.lock().unwrap().insert( + PATH_FIRST.clone(), + (vec![DUMMY_TARGET.clone()], 2, "v2".to_string()), + ); server_2.files.lock().unwrap().remove(&PATH_SECOND); }; let server_second_2 = server_second_1.clone(); @@ -406,79 +508,95 @@ pub mod tests { }; let server_third_2 = server_third_1.clone(); - let fetcher_1 = SharedFetcher::new(DUMMY_TARGET.clone(), "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string()); - let fetcher_2 = SharedFetcher::new(OTHER_TARGET.clone(), "ae588386-8464-43ba-bd3a-3e2d36b2c22c".to_string()); + let fetcher_1 = SharedFetcher::new( + DUMMY_TARGET.clone(), + "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string(), + ); + let fetcher_2 = SharedFetcher::new( + OTHER_TARGET.clone(), + "ae588386-8464-43ba-bd3a-3e2d36b2c22c".to_string(), + ); let iteration = Arc::new(AtomicU32::new(0)); let iteration_1 = iteration.clone(); let iteration_2 = iteration.clone(); let inner_fetcher_1 = unsafe { &*(&fetcher_1 as *const SharedFetcher) }; let inner_fetcher_2 = unsafe { &*(&fetcher_2 as *const SharedFetcher) }; - join_all(vec![fetcher_1.run(rc_storage.clone(), Box::new(move |fetched| { - match iteration_1.fetch_add(1, Ordering::SeqCst) { - i @ 0|i @ 1 => { - assert_eq!(fetched.len(), 2); + join_all(vec![ + fetcher_1.run( + rc_storage.clone(), + Box::new(move |fetched| { + match iteration_1.fetch_add(1, Ordering::SeqCst) { + i @ 0 | i @ 1 => { + assert_eq!(fetched.len(), 2); + + if i == 1 { + server_first_1(); + } + } + i @ 2 | i @ 3 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "X"); - if i == 1 { - server_first_1(); - } - }, - i @ 2|i @ 3 => { - assert_eq!(fetched.len(), 1); - assert_eq!(fetched[0].store.data.lock().unwrap().contents, "X"); + if i == 3 { + server_second_1(); + } + } + i @ 4 | i @ 5 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v2"); - if i == 3 { - server_second_1(); - } - }, - i @ 4|i @ 5 => { - assert_eq!(fetched.len(), 1); - assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v2"); + if i == 5 { + server_third_1(); + } + } + 6 | 7 => { + assert_eq!(fetched.len(), 0); - if i == 5 { - server_third_1(); + inner_fetcher_1.cancel(); + } + _ => panic!("Unexpected"), } - }, - 6 | 7 => { - assert_eq!(fetched.len(), 0); + None + }), + ), + fetcher_2.run( + rc_storage, + Box::new(move |fetched| { + match iteration_2.fetch_add(1, Ordering::SeqCst) { + i @ 0 | i @ 1 => { + assert_eq!(fetched.len(), 1); + assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); + + if i == 1 { + server_first_2(); + } + } + i @ 2 | i @ 3 => { + assert_eq!(fetched.len(), 2); - inner_fetcher_1.cancel(); - }, - _ => panic!("Unexpected"), - } - None - })), fetcher_2.run(rc_storage, Box::new(move |fetched| { - match iteration_2.fetch_add(1, Ordering::SeqCst) { - i @ 0|i @ 1 => { - assert_eq!(fetched.len(), 1); - assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); - - if i == 1 { - server_first_2(); - } - }, - i @ 2|i @ 3 => { - assert_eq!(fetched.len(), 2); + if i == 3 { + server_second_2(); + } + } + i @ 4 | i @ 5 => { + assert_eq!(fetched.len(), 0); - if i == 3 { - server_second_2(); - } - }, - i @ 4|i @ 5 => { - assert_eq!(fetched.len(), 0); + if i == 5 { + server_third_2(); + } + } + 6 | 7 => { + assert_eq!(fetched.len(), 0); - if i == 5 { - server_third_2(); + inner_fetcher_2.cancel(); + } + _ => panic!("Unexpected"), } - }, - 6 | 7 => { - assert_eq!(fetched.len(), 0); - - inner_fetcher_2.cancel(); - }, - _ => panic!("Unexpected"), - } - None - }))]).await; + None + }), + ), + ]) + .await; assert!(storage.0.files.lock().unwrap().is_empty()); } diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index 3bbc2c27b..97455a60d 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -1,6 +1,6 @@ -use std::sync::Arc; use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; use crate::Target; +use std::sync::Arc; pub struct SingleFetcher { fetcher: ConfigFetcher, @@ -24,6 +24,14 @@ impl SingleFetcher { } pub async fn fetch_once(&mut self) -> anyhow::Result>>> { - self.fetcher.fetch_once(self.runtime_id.as_str(), self.target.clone(), self.config_id.as_str(), self.last_error.take(), &mut self.opaque_state).await + self.fetcher + .fetch_once( + self.runtime_id.as_str(), + self.target.clone(), + self.config_id.as_str(), + self.last_error.take(), + &mut self.opaque_state, + ) + .await } -} \ No newline at end of file +} diff --git a/remote-config/src/fetch/test_server.rs b/remote-config/src/fetch/test_server.rs index 95bf5e921..6f9a7dd7e 100644 --- a/remote-config/src/fetch/test_server.rs +++ b/remote-config/src/fetch/test_server.rs @@ -1,21 +1,23 @@ -use std::collections::HashMap; -use std::convert::Infallible; -use std::net::TcpListener; -use std::sync::{Arc, Mutex}; +use crate::fetch::ConfigInvariants; +use crate::targets::{TargetData, TargetsCustom, TargetsData, TargetsList}; +use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; use base64::Engine; +use datadog_trace_protobuf::remoteconfig::{ + ClientGetConfigsRequest, ClientGetConfigsResponse, File, +}; +use ddcommon::Endpoint; use http::{Request, Response}; -use hyper::{Body, Server}; use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Server}; use serde_json::value::to_raw_value; use sha2::{Digest, Sha256}; +use std::collections::HashMap; +use std::convert::Infallible; +use std::net::TcpListener; +use std::sync::{Arc, Mutex}; use time::OffsetDateTime; use tokio::select; use tokio::sync::mpsc::Sender; -use datadog_trace_protobuf::remoteconfig::{ClientGetConfigsRequest, ClientGetConfigsResponse, File}; -use ddcommon::Endpoint; -use crate::fetch::ConfigInvariants; -use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; -use crate::targets::{TargetData, TargetsCustom, TargetsData, TargetsList}; pub struct RemoteConfigServer { pub last_request: Mutex>, @@ -50,68 +52,136 @@ impl RemoteConfigServer { let this = this.clone(); async move { let body_bytes = hyper::body::to_bytes(req.into_body()).await.unwrap(); - let request: ClientGetConfigsRequest = serde_json::from_str(&String::from_utf8(body_bytes.to_vec()).unwrap()).unwrap(); - let response = if let Some(response) = this.next_response.lock().unwrap().take() { - response - } else { - let known: HashMap<_, _> = request.cached_target_files.iter().map(|m| (m.path.clone(), m.hashes[0].hash.clone())).collect(); - let files = this.files.lock().unwrap(); - let applied_files: HashMap<_, _> = files.iter().filter(|(_, (targets, _, _))| { - let tracer = request.client.as_ref().unwrap().client_tracer.as_ref().unwrap(); - targets.iter().any(|t| t.service == tracer.service && t.env == tracer.env && t.app_version == tracer.app_version) - }).collect(); - let states = &request.client.as_ref().unwrap().state.as_ref().unwrap().config_states; - if applied_files.len() == states.len() && states.iter().all(|s| { - for (p, (_, v, _)) in applied_files.iter() { - if p.product.to_string() == s.product && p.config_id == s.id && *v == s.version { - return true; - } - } - false - }) { - Response::new( Body::from("{}")) + let request: ClientGetConfigsRequest = serde_json::from_str( + &String::from_utf8(body_bytes.to_vec()).unwrap(), + ) + .unwrap(); + let response = + if let Some(response) = this.next_response.lock().unwrap().take() { + response } else { - let target_info: Vec<_> = applied_files.iter().map(|(p, (_, v, file))| { - (p.to_string(), format!("{:x}", Sha256::digest(file)), to_raw_value(v).unwrap(), file.clone()) - }).filter(|(p, hash, _, _)| if let Some(existing) = known.get(p) { - existing != hash + let known: HashMap<_, _> = request + .cached_target_files + .iter() + .map(|m| (m.path.clone(), m.hashes[0].hash.clone())) + .collect(); + let files = this.files.lock().unwrap(); + let applied_files: HashMap<_, _> = files + .iter() + .filter(|(_, (targets, _, _))| { + let tracer = request + .client + .as_ref() + .unwrap() + .client_tracer + .as_ref() + .unwrap(); + targets.iter().any(|t| { + t.service == tracer.service + && t.env == tracer.env + && t.app_version == tracer.app_version + }) + }) + .collect(); + let states = &request + .client + .as_ref() + .unwrap() + .state + .as_ref() + .unwrap() + .config_states; + if applied_files.len() == states.len() + && states.iter().all(|s| { + for (p, (_, v, _)) in applied_files.iter() { + if p.product.to_string() == s.product + && p.config_id == s.id + && *v == s.version + { + return true; + } + } + false + }) + { + Response::new(Body::from("{}")) } else { - true - }).collect(); - let targets = TargetsList { - signatures: vec![], - signed: TargetsData { - _type: "", - custom: TargetsCustom { - agent_refresh_interval: Some(1000), - opaque_backend_state: "some state", + let target_info: Vec<_> = applied_files + .iter() + .map(|(p, (_, v, file))| { + ( + p.to_string(), + format!("{:x}", Sha256::digest(file)), + to_raw_value(v).unwrap(), + file.clone(), + ) + }) + .filter(|(p, hash, _, _)| { + if let Some(existing) = known.get(p) { + existing != hash + } else { + true + } + }) + .collect(); + let targets = TargetsList { + signatures: vec![], + signed: TargetsData { + _type: "", + custom: TargetsCustom { + agent_refresh_interval: Some(1000), + opaque_backend_state: "some state", + }, + expires: OffsetDateTime::from_unix_timestamp( + 253402300799, + ) + .unwrap(), + spec_version: "1.0.0", + targets: target_info + .iter() + .map(|(p, hash, version, _)| { + ( + p.as_str(), + TargetData { + custom: HashMap::from([( + "v", &**version, + )]), + hashes: HashMap::from([( + "sha256", + hash.as_str(), + )]), + length: 0, + }, + ) + }) + .collect(), + version: 1, }, - expires: OffsetDateTime::from_unix_timestamp(253402300799).unwrap(), - spec_version: "1.0.0", - targets: target_info.iter().map(|(p, hash, version, _)| { - (p.as_str(), TargetData { - custom: HashMap::from([("v", &**version)]), - hashes: HashMap::from([("sha256", hash.as_str())]), - length: 0, + }; + let response = ClientGetConfigsResponse { + roots: vec![], /* not checked */ + targets: base64::engine::general_purpose::STANDARD + .encode(serde_json::to_vec(&targets).unwrap()) + .into_bytes(), + target_files: target_info + .iter() + .map(|(p, _, _, file)| File { + path: p.to_string(), + raw: base64::engine::general_purpose::STANDARD + .encode(file) + .into_bytes(), }) - }).collect(), - version: 1, - }, - }; - let response = ClientGetConfigsResponse { - roots: vec![] /* not checked */, - targets: base64::engine::general_purpose::STANDARD.encode(serde_json::to_vec(&targets).unwrap()).into_bytes(), - target_files: target_info.iter().map(|(p, _, _, file)| { - File { - path: p.to_string(), - raw: base64::engine::general_purpose::STANDARD.encode(file).into_bytes(), - } - }).collect(), - client_configs: applied_files.keys().map(|k| k.to_string()).collect(), - }; - Response::new(Body::from(serde_json::to_vec(&response).unwrap())) - } - }; + .collect(), + client_configs: applied_files + .keys() + .map(|k| k.to_string()) + .collect(), + }; + Response::new(Body::from( + serde_json::to_vec(&response).unwrap(), + )) + } + }; *this.last_request.lock().unwrap() = Some(request); Ok::<_, Infallible>(response) } @@ -137,7 +207,10 @@ impl RemoteConfigServer { language: "php".to_string(), tracer_version: "1.2.3".to_string(), endpoint: self.endpoint.clone(), - products: vec![RemoteConfigProduct::ApmTracing, RemoteConfigProduct::LiveDebugger], + products: vec![ + RemoteConfigProduct::ApmTracing, + RemoteConfigProduct::LiveDebugger, + ], capabilities: vec![RemoteConfigCapabilities::ApmTracingCustomTags], } } diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs index f7f094d1d..cb7c4ee98 100644 --- a/remote-config/src/lib.rs +++ b/remote-config/src/lib.rs @@ -1,13 +1,13 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache +// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +pub mod dynamic_configuration; +pub mod fetch; mod parse; mod targets; -pub mod fetch; -pub mod dynamic_configuration; -use serde::{Deserialize, Serialize}; pub use parse::*; +use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize, Clone, Hash, Ord, PartialOrd, Eq, PartialEq)] pub struct Target { @@ -50,4 +50,4 @@ pub enum RemoteConfigCapabilities { AsmRaspXss = 28, ApmTracingSampleRules = 29, CsmActivation = 30, -} \ No newline at end of file +} diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index 90b73ea9e..bd1c3524f 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -1,9 +1,9 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache +// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. -use std::fmt::Display; -use serde::{Deserialize, Serialize}; use crate::dynamic_configuration::data::DynamicConfigFile; +use serde::{Deserialize, Serialize}; +use std::fmt::Display; #[derive(Debug, Clone, Eq, Hash, PartialEq)] pub enum RemoteConfigSource { @@ -69,8 +69,19 @@ impl RemoteConfigPath { impl ToString for RemoteConfigPath { fn to_string(&self) -> String { match self.source { - RemoteConfigSource::Datadog(id) => format!("datadog/{}/{}/{}/{}", id, self.product.to_string(), self.config_id, self.name), - RemoteConfigSource::Employee => format!("employee/{}/{}/{}", self.product.to_string(), self.config_id, self.name), + RemoteConfigSource::Datadog(id) => format!( + "datadog/{}/{}/{}/{}", + id, + self.product.to_string(), + self.config_id, + self.name + ), + RemoteConfigSource::Employee => format!( + "employee/{}/{}/{}", + self.product.to_string(), + self.config_id, + self.name + ), } } } @@ -78,15 +89,18 @@ impl ToString for RemoteConfigPath { #[derive(Debug)] pub enum RemoteConfigData { DynamicConfig(DynamicConfigFile), - LiveDebugger(( /* placeholder */)), + LiveDebugger(()), } impl RemoteConfigData { - pub fn try_parse(product: RemoteConfigProduct, data: &[u8]) -> anyhow::Result { + pub fn try_parse( + product: RemoteConfigProduct, + data: &[u8], + ) -> anyhow::Result { Ok(match product { RemoteConfigProduct::ApmTracing => { RemoteConfigData::DynamicConfig(serde_json::from_slice(data)?) - }, + } RemoteConfigProduct::LiveDebugger => { RemoteConfigData::LiveDebugger(/* placeholder */ ()) } diff --git a/remote-config/src/targets.rs b/remote-config/src/targets.rs index bb0861959..805f7dcdc 100644 --- a/remote-config/src/targets.rs +++ b/remote-config/src/targets.rs @@ -1,7 +1,7 @@ -use std::collections::HashMap; -use std::str::FromStr; use serde::Deserialize; use serde_json::value::RawValue; +use std::collections::HashMap; +use std::str::FromStr; use time::OffsetDateTime; #[derive(Deserialize)] @@ -26,7 +26,7 @@ pub struct TargetsData<'a> { pub custom: TargetsCustom<'a>, #[serde(with = "time::serde::iso8601")] pub expires: OffsetDateTime, - pub spec_version : &'a str, + pub spec_version: &'a str, pub targets: HashMap<&'a str, TargetData<'a>>, pub version: i64, } @@ -55,6 +55,8 @@ impl<'a> TargetsList<'a> { impl<'a> TargetData<'a> { pub fn try_parse_version(&self) -> Option { - self.custom.get("v").and_then(|v| u64::from_str(v.get()).ok()) + self.custom + .get("v") + .and_then(|v| u64::from_str(v.get()).ok()) } -} \ No newline at end of file +} diff --git a/sidecar-ffi/src/lib.rs b/sidecar-ffi/src/lib.rs index 9dcb7b1ea..313ee427b 100644 --- a/sidecar-ffi/src/lib.rs +++ b/sidecar-ffi/src/lib.rs @@ -4,6 +4,9 @@ use datadog_ipc::platform::{ FileBackedHandle, MappedMem, NamedShmHandle, PlatformHandle, ShmHandle, }; +use datadog_live_debugger::debugger_defs::DebuggerPayload; +use datadog_remote_config::fetch::ConfigInvariants; +use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct, Target}; use datadog_sidecar::agent_remote_config::{ new_reader, reader_from_shm, AgentRemoteConfigEndpoint, AgentRemoteConfigWriter, }; @@ -15,6 +18,7 @@ use datadog_sidecar::service::{ blocking::{self, SidecarTransport}, InstanceId, QueueId, RuntimeMetadata, SerializedTracerHeaderTags, SessionConfig, SidecarAction, }; +use datadog_sidecar::shm_remote_config::RemoteConfigReader; use ddcommon::tag::Tag; use ddcommon::Endpoint; use ddcommon_ffi as ffi; @@ -35,10 +39,6 @@ use std::os::windows::io::{FromRawHandle, RawHandle}; use std::slice; use std::sync::Arc; use std::time::Duration; -use datadog_live_debugger::debugger_defs::DebuggerPayload; -use datadog_remote_config::fetch::ConfigInvariants; -use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct, Target}; -use datadog_sidecar::shm_remote_config::RemoteConfigReader; #[repr(C)] pub struct NativeFile { @@ -208,17 +208,25 @@ pub unsafe extern "C" fn ddog_remote_config_reader_for_endpoint<'a>( remote_config_capabilities: *const RemoteConfigCapabilities, remote_config_capabilities_count: usize, ) -> Box { - Box::new(RemoteConfigReader::new(&ConfigInvariants { - language: language.to_utf8_lossy().into(), - tracer_version: tracer_version.to_utf8_lossy().into(), - endpoint: endpoint.clone(), - products: slice::from_raw_parts(remote_config_products, remote_config_products_count).to_vec(), - capabilities: slice::from_raw_parts(remote_config_capabilities, remote_config_capabilities_count).to_vec(), - }, &Arc::new(Target { - service: service_name.to_utf8_lossy().into(), - env: env_name.to_utf8_lossy().into(), - app_version: app_version.to_utf8_lossy().into(), - }))) + Box::new(RemoteConfigReader::new( + &ConfigInvariants { + language: language.to_utf8_lossy().into(), + tracer_version: tracer_version.to_utf8_lossy().into(), + endpoint: endpoint.clone(), + products: slice::from_raw_parts(remote_config_products, remote_config_products_count) + .to_vec(), + capabilities: slice::from_raw_parts( + remote_config_capabilities, + remote_config_capabilities_count, + ) + .to_vec(), + }, + &Arc::new(Target { + service: service_name.to_utf8_lossy().into(), + env: env_name.to_utf8_lossy().into(), + app_version: app_version.to_utf8_lossy().into(), + }), + )) } #[no_mangle] @@ -512,8 +520,16 @@ pub unsafe extern "C" fn ddog_sidecar_session_set_config( } else { LogMethod::File(String::from(log_path.to_utf8_lossy()).into()) }, - remote_config_products: slice::from_raw_parts(remote_config_products, remote_config_products_count).to_vec(), - remote_config_capabilities: slice::from_raw_parts(remote_config_capabilities, remote_config_capabilities_count).to_vec(), + remote_config_products: slice::from_raw_parts( + remote_config_products, + remote_config_products_count + ) + .to_vec(), + remote_config_capabilities: slice::from_raw_parts( + remote_config_capabilities, + remote_config_capabilities_count + ) + .to_vec(), }, )); diff --git a/sidecar/src/lib.rs b/sidecar/src/lib.rs index 777bd871a..197b286f2 100644 --- a/sidecar/src/lib.rs +++ b/sidecar/src/lib.rs @@ -9,8 +9,8 @@ pub mod entry; pub mod log; pub mod one_way_shared_memory; mod self_telemetry; -pub mod shm_remote_config; pub mod setup; +pub mod shm_remote_config; mod tracer; mod watchdog; diff --git a/sidecar/src/service/blocking.rs b/sidecar/src/service/blocking.rs index a0f2d5fcb..100e7a198 100644 --- a/sidecar/src/service/blocking.rs +++ b/sidecar/src/service/blocking.rs @@ -201,17 +201,16 @@ pub fn register_service_and_flush_queued_actions( /// An `io::Result<()>` indicating the result of the operation. pub fn set_session_config( transport: &mut SidecarTransport, - #[cfg(unix)] - pid: libc::pid_t, - #[cfg(windows)] - remote_config_notify_function: *mut libc::c_void, + #[cfg(unix)] pid: libc::pid_t, + #[cfg(windows)] remote_config_notify_function: *mut libc::c_void, session_id: String, config: &SessionConfig, ) -> io::Result<()> { #[cfg(unix)] let remote_config_notify_target = pid; #[cfg(windows)] - let remote_config_notify_target = crate::service::remote_configs::RemoteConfigNotifyFunction(remote_config_notify_function); + let remote_config_notify_target = + crate::service::remote_configs::RemoteConfigNotifyFunction(remote_config_notify_function); transport.send(SidecarInterfaceRequest::SetSessionConfig { session_id, remote_config_notify_target, diff --git a/sidecar/src/service/mod.rs b/sidecar/src/service/mod.rs index d97efb736..566058b04 100644 --- a/sidecar/src/service/mod.rs +++ b/sidecar/src/service/mod.rs @@ -4,6 +4,7 @@ // imports for structs defined in this file use crate::config; use crate::service::telemetry::enqueued_telemetry_data::EnqueuedTelemetryData; +use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct}; use ddcommon::tag::Tag; use ddcommon::Endpoint; use ddtelemetry::metrics::MetricContext; @@ -11,7 +12,6 @@ use ddtelemetry::worker::TelemetryActions; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct}; // public types we want to bring up to top level of service:: scope pub use instance_id::InstanceId; diff --git a/sidecar/src/service/remote_configs.rs b/sidecar/src/service/remote_configs.rs index 00bfa8394..8b22ff5f3 100644 --- a/sidecar/src/service/remote_configs.rs +++ b/sidecar/src/service/remote_configs.rs @@ -1,9 +1,9 @@ +use crate::shm_remote_config::{ShmRemoteConfigs, ShmRemoteConfigsGuard}; +use datadog_remote_config::fetch::{ConfigInvariants, NotifyTarget}; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::sync::{Arc, Mutex}; use zwohash::HashMap; -use datadog_remote_config::fetch::{ConfigInvariants, NotifyTarget}; -use crate::shm_remote_config::{ShmRemoteConfigs, ShmRemoteConfigsGuard}; #[cfg(windows)] #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] @@ -15,21 +15,28 @@ unsafe impl Sync for RemoteConfigNotifyFunction {} #[cfg(windows)] impl Default for RemoteConfigNotifyFunction { fn default() -> Self { - return RemoteConfigNotifyFunction(std::ptr::null_mut()) + return RemoteConfigNotifyFunction(std::ptr::null_mut()); } } #[cfg(windows)] impl serde::Serialize for RemoteConfigNotifyFunction { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { serializer.serialize_u64(self.0 as u64) } } #[cfg(windows)] impl<'de> serde::Deserialize<'de> for RemoteConfigNotifyFunction { - fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de> { - >::deserialize(deserializer).map(|p| RemoteConfigNotifyFunction(p as *mut libc::c_void)) + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + >::deserialize(deserializer) + .map(|p| RemoteConfigNotifyFunction(p as *mut libc::c_void)) } } @@ -63,13 +70,23 @@ impl NotifyTarget for RemoteConfigNotifyTarget { // TODO: CreateRemoteThread -> ddtrace_set_all_thread_vm_interrupt unsafe { let dummy = 0; - kernel32::CreateRemoteThread(self.process_handle.0, std::ptr::null_mut(), 0, Some(std::mem::transmute(self.notify_function.0)), &dummy as *const i32 as winapi::LPVOID, 0, std::ptr::null_mut()); + kernel32::CreateRemoteThread( + self.process_handle.0, + std::ptr::null_mut(), + 0, + Some(std::mem::transmute(self.notify_function.0)), + &dummy as *const i32 as winapi::LPVOID, + 0, + std::ptr::null_mut(), + ); } } } #[derive(Default, Clone)] -pub struct RemoteConfigs(Arc>>>); +pub struct RemoteConfigs( + Arc>>>, +); pub type RemoteConfigsGuard = ShmRemoteConfigsGuard; impl RemoteConfigs { @@ -87,11 +104,15 @@ impl RemoteConfigs { Entry::Vacant(e) => { let this = self.0.clone(); let invariants = e.key().clone(); - e.insert(ShmRemoteConfigs::new(invariants.clone(), Box::new(move || { - this.lock().unwrap().remove(&invariants); - }))) + e.insert(ShmRemoteConfigs::new( + invariants.clone(), + Box::new(move || { + this.lock().unwrap().remove(&invariants); + }), + )) } - }.add_runtime(runtime_id, notify_target, env, service, app_version) + } + .add_runtime(runtime_id, notify_target, env, service, app_version) } pub fn shutdown(&self) { diff --git a/sidecar/src/service/runtime_info.rs b/sidecar/src/service/runtime_info.rs index 73d46a0ab..9f2f8215d 100644 --- a/sidecar/src/service/runtime_info.rs +++ b/sidecar/src/service/runtime_info.rs @@ -131,7 +131,9 @@ impl RuntimeInfo { /// /// * `MutexGuard>` - A mutable reference to the remote /// config guards map. - pub(crate) fn lock_remote_config_guards(&self) -> MutexGuard> { + pub(crate) fn lock_remote_config_guards( + &self, + ) -> MutexGuard> { self.remote_config_guards.lock().unwrap() } } diff --git a/sidecar/src/service/session_info.rs b/sidecar/src/service/session_info.rs index 52cc22d94..47539c312 100644 --- a/sidecar/src/service/session_info.rs +++ b/sidecar/src/service/session_info.rs @@ -1,16 +1,16 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 +use std::sync::atomic::AtomicI32; use std::{ collections::HashMap, sync::{Arc, Mutex, MutexGuard}, }; -use std::sync::atomic::AtomicI32; use futures::future; -use tracing::{enabled, info, Level}; use datadog_remote_config::fetch::ConfigInvariants; +use tracing::{enabled, info, Level}; use crate::log::{MultiEnvFilterGuard, MultiWriterGuard}; use crate::{dogstatsd, tracer}; @@ -28,7 +28,8 @@ pub(crate) struct SessionInfo { dogstatsd: Arc>, remote_config_invariants: Arc>>, #[cfg(windows)] - pub(crate) remote_config_notify_function: Arc>, + pub(crate) remote_config_notify_function: + Arc>, pub(crate) log_guard: Arc, MultiWriterGuard<'static>)>>>, #[cfg(feature = "tracing")] diff --git a/sidecar/src/service/sidecar_interface.rs b/sidecar/src/service/sidecar_interface.rs index 232fce7bb..a616318e5 100644 --- a/sidecar/src/service/sidecar_interface.rs +++ b/sidecar/src/service/sidecar_interface.rs @@ -66,7 +66,7 @@ pub trait SidecarInterface { async fn set_session_config( session_id: String, remote_config_notify_target: RemoteConfigNotifyTarget, - config: SessionConfig + config: SessionConfig, ); /// Shuts down a runtime. diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index bf66652d8..1aede69d4 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -40,12 +40,12 @@ use serde::{Deserialize, Serialize}; use tokio::task::{JoinError, JoinHandle}; use crate::dogstatsd::DogStatsDAction; +use crate::service::remote_configs::{RemoteConfigNotifyTarget, RemoteConfigs}; use crate::service::telemetry::enqueued_telemetry_stats::EnqueuedTelemetryStats; use crate::service::tracing::trace_flusher::TraceFlusherStats; use datadog_ipc::platform::FileBackedHandle; use datadog_ipc::tarpc::server::{Channel, InFlightRequest}; use datadog_remote_config::fetch::ConfigInvariants; -use crate::service::remote_configs::{RemoteConfigNotifyTarget, RemoteConfigs}; type NoResponse = Ready<()>; @@ -117,7 +117,14 @@ impl SidecarServer { #[cfg_attr(not(windows), allow(unused_mut))] pub async fn accept_connection(mut self, async_channel: AsyncChannel) { #[cfg(windows)] - { self.process_handle = async_channel.metadata.lock().unwrap().process_handle().map(|p| ProcessHandle(p as winapi::HANDLE)); } + { + self.process_handle = async_channel + .metadata + .lock() + .unwrap() + .process_handle() + .map(|p| ProcessHandle(p as winapi::HANDLE)); + } let server = tarpc::server::BaseChannel::new( tarpc::server::Config { pending_response_buffer: 10000, @@ -512,12 +519,14 @@ impl SidecarInterface for SidecarServer { } }, Entry::Vacant(entry) => { - if actions.len() == 1 && matches!( - actions[0], - SidecarAction::Telemetry(TelemetryActions::Lifecycle( - LifecycleAction::Stop - )) - ) { + if actions.len() == 1 + && matches!( + actions[0], + SidecarAction::Telemetry(TelemetryActions::Lifecycle( + LifecycleAction::Stop + )) + ) + { rt_info.lock_remote_config_guards().remove(&queue_id); } else { entry.insert(AppOrQueue::Queue(EnqueuedTelemetryData::processed(actions))); @@ -606,17 +615,20 @@ impl SidecarInterface for SidecarServer { self, _: Context, session_id: String, - #[cfg(unix)] - pid: libc::pid_t, + #[cfg(unix)] pid: libc::pid_t, #[cfg(windows)] remote_config_notify_function: crate::service::remote_configs::RemoteConfigNotifyFunction, config: SessionConfig, ) -> Self::SetSessionConfigFut { let session = self.get_session(&session_id); #[cfg(unix)] - { session.pid.store(pid, Ordering::Relaxed); } + { + session.pid.store(pid, Ordering::Relaxed); + } #[cfg(windows)] - { *session.remote_config_notify_function.lock().unwrap() = remote_config_notify_function; } + { + *session.remote_config_notify_function.lock().unwrap() = remote_config_notify_function; + } session.modify_telemetry_config(|cfg| { let endpoint = get_product_endpoint(ddtelemetry::config::PROD_INTAKE_SUBDOMAIN, &config.endpoint); @@ -772,9 +784,27 @@ impl SidecarInterface for SidecarServer { return no_response(); }; #[cfg(unix)] - let notify_target = RemoteConfigNotifyTarget { pid: session.pid.load(Ordering::Relaxed) }; - session.get_runtime(&instance_id.runtime_id).lock_remote_config_guards().insert(queue_id, self.remote_configs - .add_runtime(session.get_remote_config_invariants().as_ref().expect("Expecting remote config invariants to be set early").clone(), instance_id.runtime_id, notify_target, env_name, service_name, app_version)); + let notify_target = RemoteConfigNotifyTarget { + pid: session.pid.load(Ordering::Relaxed), + }; + session + .get_runtime(&instance_id.runtime_id) + .lock_remote_config_guards() + .insert( + queue_id, + self.remote_configs.add_runtime( + session + .get_remote_config_invariants() + .as_ref() + .expect("Expecting remote config invariants to be set early") + .clone(), + instance_id.runtime_id, + notify_target, + env_name, + service_name, + app_version, + ), + ); no_response() } diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index 62a2fc811..befac2717 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -1,12 +1,20 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache +// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. -use crate::one_way_shared_memory::{open_named_shm, OneWayShmReader, OneWayShmWriter, ReaderOpener}; +use crate::one_way_shared_memory::{ + open_named_shm, OneWayShmReader, OneWayShmWriter, ReaderOpener, +}; +use crate::primary_sidecar_identifier; use base64::prelude::BASE64_URL_SAFE_NO_PAD; use base64::Engine; use datadog_ipc::platform::{FileBackedHandle, MappedMem, NamedShmHandle}; +use datadog_remote_config::fetch::{ + ConfigInvariants, FileRefcountData, FileStorage, MultiTargetFetcher, MultiTargetHandlers, + NotifyTarget, RefcountedFile, +}; use datadog_remote_config::{RemoteConfigPath, RemoteConfigValue, Target}; -use datadog_remote_config::fetch::{ConfigInvariants, FileRefcountData, FileStorage, MultiTargetFetcher, MultiTargetHandlers, NotifyTarget, RefcountedFile}; +use priority_queue::PriorityQueue; +use sha2::{Digest, Sha224}; use std::cmp::Reverse; use std::collections::hash_map::Entry; use std::collections::HashMap; @@ -18,12 +26,9 @@ use std::io; use std::io::Write; use std::sync::{Arc, Mutex}; use std::time::Duration; -use priority_queue::PriorityQueue; -use sha2::{Digest, Sha224}; use tokio::time::Instant; use tracing::{debug, error, trace, warn}; use zwohash::ZwoHasher; -use crate::primary_sidecar_identifier; pub struct RemoteConfigWriter(OneWayShmWriter); pub struct RemoteConfigReader(OneWayShmReader); @@ -34,16 +39,18 @@ fn path_for_remote_config(id: &ConfigInvariants, target: &Arc) -> CStrin id.hash(&mut hasher); target.hash(&mut hasher); // datadog remote config, on macos we're restricted to 31 chars - CString::new(format!("/ddrc{}-{}", primary_sidecar_identifier(), hasher.finish())).unwrap() + CString::new(format!( + "/ddrc{}-{}", + primary_sidecar_identifier(), + hasher.finish() + )) + .unwrap() } impl RemoteConfigReader { pub fn new(id: &ConfigInvariants, target: &Arc) -> RemoteConfigReader { let path = path_for_remote_config(id, target); - RemoteConfigReader(OneWayShmReader::new( - open_named_shm(&path).ok(), - path, - )) + RemoteConfigReader(OneWayShmReader::new(open_named_shm(&path).ok(), path)) } pub fn read(&mut self) -> (bool, &[u8]) { @@ -63,9 +70,7 @@ impl RemoteConfigWriter { } } -impl ReaderOpener - for OneWayShmReader -{ +impl ReaderOpener for OneWayShmReader { fn open(&self) -> Option> { open_named_shm(&self.extra).ok() } @@ -93,25 +98,35 @@ impl RefcountedFile for StoredShmFile { impl FileStorage for ConfigFileStorage { type StoredFile = StoredShmFile; - fn store(&self, version: u64, path: RemoteConfigPath, file: Vec) -> anyhow::Result> { + fn store( + &self, + version: u64, + path: RemoteConfigPath, + file: Vec, + ) -> anyhow::Result> { Ok(Arc::new(StoredShmFile { handle: Mutex::new(store_shm(version, &path, file)?), refcount: FileRefcountData::new(version, path), })) } - fn update(&self, file: &Arc, version: u64, contents: Vec) -> anyhow::Result<()> { + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()> { *file.handle.lock().unwrap() = store_shm(version, &file.refcount.path, contents)?; Ok(()) } } -fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> anyhow::Result { - let name = format!( - "ddrc{}-{}", - primary_sidecar_identifier(), - version, - ); +fn store_shm( + version: u64, + path: &RemoteConfigPath, + file: Vec, +) -> anyhow::Result { + let name = format!("ddrc{}-{}", primary_sidecar_identifier(), version,); // as much signal as possible to be collision free let hashed_path = BASE64_URL_SAFE_NO_PAD.encode(Sha224::digest(&path.to_string())); #[cfg(target_os = "macos")] @@ -127,14 +142,20 @@ fn store_shm(version: u64, path: &RemoteConfigPath, file: Vec) -> anyhow::Re #[cfg_attr(not(windows), allow(unused_mut))] let mut target_slice = handle.as_slice_mut(); #[cfg(windows)] - { target_slice.write(&(file.len() as u32).to_ne_bytes())?; } + { + target_slice.write(&(file.len() as u32).to_ne_bytes())?; + } target_slice.copy_from_slice(file.as_slice()); Ok(handle.into()) } impl MultiTargetHandlers for ConfigFileStorage { - fn fetched(&self, target: &Arc, files: &[Arc]) -> (Option, bool) { + fn fetched( + &self, + target: &Arc, + files: &[Arc], + ) -> (Option, bool) { let mut writers = self.writers.lock().unwrap(); let writer = match writers.entry(target.clone()) { Entry::Occupied(e) => e.into_mut(), @@ -144,23 +165,33 @@ impl MultiTargetHandlers for ConfigFileStorage { let msg = format!("Failed acquiring a remote config shm writer: {:?}", e); error!(msg); return (Some(msg), false); - }, + } }), }; - let len = files.iter().map(|f| f.handle.lock().unwrap().get_path().len() + 2).sum(); + let len = files + .iter() + .map(|f| f.handle.lock().unwrap().get_path().len() + 2) + .sum(); let mut serialized = Vec::with_capacity(len); for file in files.iter() { serialized.extend_from_slice(file.handle.lock().unwrap().get_path()); serialized.push(b':'); - serialized.extend_from_slice(BASE64_URL_SAFE_NO_PAD.encode(file.refcount.path.to_string()).as_bytes()); + serialized.extend_from_slice( + BASE64_URL_SAFE_NO_PAD + .encode(file.refcount.path.to_string()) + .as_bytes(), + ); serialized.push(b'\n'); } if writer.0.as_slice() != serialized { writer.write(&serialized); - debug!("Active configuration files are: {}", String::from_utf8_lossy(&serialized)); + debug!( + "Active configuration files are: {}", + String::from_utf8_lossy(&serialized) + ); (None, true) } else { @@ -176,7 +207,13 @@ impl MultiTargetHandlers for ConfigFileStorage { } fn dead(&self) { - (self.on_dead.lock().unwrap().take().expect("The MultiTargetHandler must not be used anymore once on_dead is called"))(); + (self + .on_dead + .lock() + .unwrap() + .take() + .expect("The MultiTargetHandler must not be used anymore once on_dead is called"))( + ); } } @@ -188,18 +225,22 @@ pub struct ShmRemoteConfigsGuard { impl Drop for ShmRemoteConfigsGuard { fn drop(&mut self) { - self.remote_configs.0.delete_runtime(&self.runtime_id, &self.target); + self.remote_configs + .0 + .delete_runtime(&self.runtime_id, &self.target); } } #[derive(Clone)] -pub struct ShmRemoteConfigs(Arc>); +pub struct ShmRemoteConfigs( + Arc>, +); // we collect services per env, so that we always query, for each runtime + env, all the services // adding runtimes increases amount of services, removing services after a while -// one request per (runtime_id, RemoteConfigIdentifier) tuple: extra_services are all services pertaining to that env -// refcounting RemoteConfigIdentifier tuples by their unique runtime_id +// one request per (runtime_id, RemoteConfigIdentifier) tuple: extra_services are all services +// pertaining to that env refcounting RemoteConfigIdentifier tuples by their unique runtime_id impl ShmRemoteConfigs { pub fn new(invariants: ConfigInvariants, on_dead: Box) -> Self { @@ -228,7 +269,8 @@ impl ShmRemoteConfigs { env, app_version, }); - self.0.add_runtime(runtime_id.clone(), notify_target, &target); + self.0 + .add_runtime(runtime_id.clone(), notify_target, &target); ShmRemoteConfigsGuard { target, runtime_id, @@ -250,7 +292,10 @@ fn read_config(path: &str) -> anyhow::Result { let data = &data[4..(4 + u32::from_ne_bytes((&data[0..4]).try_into()?) as usize)]; RemoteConfigValue::try_parse(&rc_path, data) } else { - anyhow::bail!("could not read config; {} does not have exactly one colon", path); + anyhow::bail!( + "could not read config; {} does not have exactly one colon", + path + ); } } @@ -296,7 +341,9 @@ impl RemoteConfigManager { /// Has to be polled repeatedly until None is returned. pub fn fetch_update(&mut self) -> RemoteConfigUpdate { if let Some(ref target) = self.active_target { - let reader = self.active_reader.get_or_insert_with(|| RemoteConfigReader::new(&self.invariants, target)); + let reader = self + .active_reader + .get_or_insert_with(|| RemoteConfigReader::new(&self.invariants, target)); let (changed, data) = reader.read(); if changed { @@ -346,12 +393,15 @@ impl RemoteConfigManager { match read_config(&config) { Ok(parsed) => { trace!("Adding remote config file {config}: {parsed:?}"); - self.active_configs.insert(config, RemoteConfigPath { - source: parsed.source.clone(), - product: (&parsed.data).into(), - config_id: parsed.config_id.clone(), - name: parsed.name.clone(), - }); + self.active_configs.insert( + config, + RemoteConfigPath { + source: parsed.source.clone(), + product: (&parsed.data).into(), + config_id: parsed.config_id.clone(), + name: parsed.name.clone(), + }, + ); return RemoteConfigUpdate::Add(parsed); } Err(e) => warn!("Failed reading remote config file {config}; skipping: {e:?}"), @@ -370,8 +420,10 @@ impl RemoteConfigManager { if self.check_configs.is_empty() { current_configs.extend(self.active_configs.keys().cloned()); } - self.encountered_targets.insert(old_target.clone(), (reader, current_configs)); - self.unexpired_targets.push(old_target, Reverse(Instant::now())); + self.encountered_targets + .insert(old_target.clone(), (reader, current_configs)); + self.unexpired_targets + .push(old_target, Reverse(Instant::now())); } } if let Some(ref target) = self.active_target { @@ -410,12 +462,14 @@ impl RemoteConfigManager { #[cfg(test)] mod tests { - use lazy_static::lazy_static; - use manual_future::ManualFuture; - use datadog_remote_config::dynamic_configuration::data::{Configs, tests::dummy_dynamic_config}; use super::*; + use datadog_remote_config::dynamic_configuration::data::{ + tests::dummy_dynamic_config, Configs, + }; use datadog_remote_config::fetch::test_server::RemoteConfigServer; use datadog_remote_config::{RemoteConfigData, RemoteConfigProduct, RemoteConfigSource}; + use lazy_static::lazy_static; + use manual_future::ManualFuture; lazy_static! { static ref PATH_FIRST: RemoteConfigPath = RemoteConfigPath { @@ -424,14 +478,12 @@ mod tests { config_id: "1234".to_string(), name: "config".to_string(), }; - static ref PATH_SECOND: RemoteConfigPath = RemoteConfigPath { source: RemoteConfigSource::Employee, product: RemoteConfigProduct::ApmTracing, config_id: "9876".to_string(), name: "config".to_string(), }; - static ref DUMMY_TARGET: Arc = Arc::new(Target { service: "service".to_string(), env: "env".to_string(), @@ -449,13 +501,17 @@ mod tests { impl Eq for NotifyDummy {} impl PartialEq for NotifyDummy { - fn eq(&self, _other: &Self) -> bool { true } + fn eq(&self, _other: &Self) -> bool { + true + } } impl NotifyTarget for NotifyDummy { fn notify(&self) { let channel = self.0.clone(); - tokio::spawn(async move { channel.send(()).await.unwrap(); }); + tokio::spawn(async move { + channel.send(()).await.unwrap(); + }); } } @@ -464,11 +520,23 @@ mod tests { let server = RemoteConfigServer::spawn(); let (on_dead, on_dead_completer) = ManualFuture::new(); - let shm = ShmRemoteConfigs::new(server.dummy_invariants(), Box::new(|| { tokio::spawn(on_dead_completer.complete(())); })); + let shm = ShmRemoteConfigs::new( + server.dummy_invariants(), + Box::new(|| { + tokio::spawn(on_dead_completer.complete(())); + }), + ); let mut manager = RemoteConfigManager::new(server.dummy_invariants()); - server.files.lock().unwrap().insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 1, serde_json::to_string(&dummy_dynamic_config(true)).unwrap())); + server.files.lock().unwrap().insert( + PATH_FIRST.clone(), + ( + vec![DUMMY_TARGET.clone()], + 1, + serde_json::to_string(&dummy_dynamic_config(true)).unwrap(), + ), + ); // Nothing yet. (No target) assert!(matches!(manager.fetch_update(), RemoteConfigUpdate::None)); @@ -479,7 +547,13 @@ mod tests { let (sender, mut receiver) = tokio::sync::mpsc::channel(1); - let shm_guard = shm.add_runtime("3b43524b-a70c-45dc-921d-34504e50c5eb".to_string(), NotifyDummy(Arc::new(sender)), DUMMY_TARGET.env.to_string(), DUMMY_TARGET.service.to_string(), DUMMY_TARGET.app_version.to_string()); + let shm_guard = shm.add_runtime( + "3b43524b-a70c-45dc-921d-34504e50c5eb".to_string(), + NotifyDummy(Arc::new(sender)), + DUMMY_TARGET.env.to_string(), + DUMMY_TARGET.service.to_string(), + DUMMY_TARGET.app_version.to_string(), + ); receiver.recv().await; @@ -488,7 +562,10 @@ mod tests { assert_eq!(update.source, PATH_FIRST.source); assert_eq!(update.name, PATH_FIRST.name); if let RemoteConfigData::DynamicConfig(data) = update.data { - assert!(matches!(>::from(data.lib_config)[0], Configs::TracingEnabled(true))); + assert!(matches!( + >::from(data.lib_config)[0], + Configs::TracingEnabled(true) + )); } else { unreachable!(); } @@ -501,8 +578,22 @@ mod tests { { let mut files = server.files.lock().unwrap(); - files.insert(PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, serde_json::to_string(&dummy_dynamic_config(false)).unwrap())); - files.insert(PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, serde_json::to_string(&dummy_dynamic_config(true)).unwrap())); + files.insert( + PATH_FIRST.clone(), + ( + vec![DUMMY_TARGET.clone()], + 2, + serde_json::to_string(&dummy_dynamic_config(false)).unwrap(), + ), + ); + files.insert( + PATH_SECOND.clone(), + ( + vec![DUMMY_TARGET.clone()], + 1, + serde_json::to_string(&dummy_dynamic_config(true)).unwrap(), + ), + ); } receiver.recv().await; @@ -522,7 +613,14 @@ mod tests { unreachable!(); }; if let RemoteConfigUpdate::Add(update) = manager.fetch_update() { - assert_eq!(&update.config_id, if was_second { &PATH_FIRST.config_id } else { &PATH_SECOND.config_id }); + assert_eq!( + &update.config_id, + if was_second { + &PATH_FIRST.config_id + } else { + &PATH_SECOND.config_id + } + ); } else { unreachable!(); }; @@ -543,7 +641,14 @@ mod tests { manager.track_target(&DUMMY_TARGET); // If we re-track it's added again immediately if let RemoteConfigUpdate::Add(update) = manager.fetch_update() { - assert_eq!(&update.config_id, if was_second { &PATH_SECOND.config_id } else { &PATH_FIRST.config_id }); + assert_eq!( + &update.config_id, + if was_second { + &PATH_SECOND.config_id + } else { + &PATH_FIRST.config_id + } + ); } else { unreachable!(); }; @@ -562,7 +667,14 @@ mod tests { unreachable!(); }; if let RemoteConfigUpdate::Remove(update) = manager.fetch_update() { - assert_eq!(&update, if was_second { &*PATH_FIRST } else { &*PATH_SECOND }); + assert_eq!( + &update, + if was_second { + &*PATH_FIRST + } else { + &*PATH_SECOND + } + ); } else { unreachable!(); }; diff --git a/trace-protobuf/build.rs b/trace-protobuf/build.rs index 6a427ac16..258af77d6 100644 --- a/trace-protobuf/build.rs +++ b/trace-protobuf/build.rs @@ -119,7 +119,10 @@ fn generate_protobuf() { "#[serde(rename = \"DBType\")]", ); - config.type_attribute("ClientGetConfigsResponse", "#[derive(Deserialize, Serialize)]"); + config.type_attribute( + "ClientGetConfigsResponse", + "#[derive(Deserialize, Serialize)]", + ); config.type_attribute("File", "#[derive(Deserialize, Serialize)]"); config.type_attribute( "ClientGetConfigsRequest", @@ -168,9 +171,12 @@ fn generate_protobuf() { let license = "// Copyright 2023-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -".as_bytes(); +" + .as_bytes(); - let null_deser = &[license, "use serde::{Deserialize, Deserializer, Serialize}; + let null_deser = &[ + license, + "use serde::{Deserialize, Deserializer, Serialize}; fn deserialize_null_into_default<'de, D, T>(deserializer: D) -> Result where @@ -186,12 +192,18 @@ pub fn is_default(t: &T) -> bool { } " - .as_bytes()].concat(); + .as_bytes(), + ] + .concat(); - let serde_uses = &[license, "use serde::{Deserialize, Serialize}; + let serde_uses = &[ + license, + "use serde::{Deserialize, Serialize}; " - .as_bytes()].concat(); + .as_bytes(), + ] + .concat(); prepend_to_file(null_deser, &output_path.join("pb.rs")); prepend_to_file(serde_uses, &output_path.join("remoteconfig.rs")); diff --git a/trace-protobuf/src/pb.rs b/trace-protobuf/src/pb.rs index 422a9d7a5..a2c5f71c9 100644 --- a/trace-protobuf/src/pb.rs +++ b/trace-protobuf/src/pb.rs @@ -39,10 +39,8 @@ pub struct SpanLink { /// /// Optional. Simple mapping of keys to string values. #[prost(map = "string, string", tag = "4")] - pub attributes: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub attributes: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// @gotags: msg:"tracestate,omitempty" /// /// Optional. W3C tracestate. @@ -70,8 +68,8 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub name: ::prost::alloc::string::String, - /// resource is the resource name of this span, also sometimes called the endpoint (for web spans). - /// @gotags: json:"resource" msg:"resource" + /// resource is the resource name of this span, also sometimes called the endpoint (for web + /// spans). @gotags: json:"resource" msg:"resource" #[prost(string, tag = "3")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -118,18 +116,16 @@ pub struct Span { #[prost(map = "string, string", tag = "10")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] - pub meta: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub meta: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// metrics is a mapping from tag name to tag value for numeric-valued tags. /// @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" #[prost(map = "string, double", tag = "11")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub metrics: ::std::collections::HashMap<::prost::alloc::string::String, f64>, - /// type is the type of the service with which this span is associated. Example values: web, db, lambda. - /// @gotags: json:"type" msg:"type" + /// type is the type of the service with which this span is associated. Example values: web, + /// db, lambda. @gotags: json:"type" msg:"type" #[prost(string, tag = "12")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -140,19 +136,18 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::std::collections::HashMap::is_empty")] - pub meta_struct: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::vec::Vec, - >, - /// span_links represents a collection of links, where each link defines a causal relationship between two spans. - /// @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" + pub meta_struct: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec>, + /// span_links represents a collection of links, where each link defines a causal relationship + /// between two spans. @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" #[prost(message, repeated, tag = "14")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::prost::alloc::vec::Vec::is_empty")] pub span_links: ::prost::alloc::vec::Vec, } -/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. +/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a +/// trace. #[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -172,10 +167,8 @@ pub struct TraceChunk { /// tags specifies tags common in all `spans`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "4")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// droppedTrace specifies whether the trace was dropped by samplers or not. /// @gotags: json:"dropped_trace" msg:"dropped_trace" #[prost(bool, tag = "5")] @@ -213,10 +206,8 @@ pub struct TracerPayload { /// tags specifies tags common in all `chunks`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "7")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// env specifies `env` tag that set with the tracer. /// @gotags: json:"env" msg:"env" #[prost(string, tag = "8")] @@ -245,10 +236,8 @@ pub struct AgentPayload { pub tracer_payloads: ::prost::alloc::vec::Vec, /// tags specifies tags common in all `tracerPayloads`. #[prost(map = "string, string", tag = "6")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// agentVersion specifies version of the agent. #[prost(string, tag = "7")] pub agent_version: ::prost::alloc::string::String, @@ -279,8 +268,9 @@ pub struct StatsPayload { pub agent_version: ::prost::alloc::string::String, #[prost(bool, tag = "5")] pub client_computed: bool, - /// splitPayload indicates if the payload is actually one of several payloads split out from a larger payload. - /// This field can be used in the backend to signal if re-aggregation is necessary. + /// splitPayload indicates if the payload is actually one of several payloads split out from a + /// larger payload. This field can be used in the backend to signal if re-aggregation is + /// necessary. #[prost(bool, tag = "6")] pub split_payload: bool, } @@ -325,8 +315,8 @@ pub struct ClientStatsPayload { #[prost(uint64, tag = "8")] #[serde(default)] pub sequence: u64, - /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer - /// characterizes counts only and distributions only payloads + /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation + /// layer characterizes counts only and distributions only payloads #[prost(string, tag = "9")] #[serde(default)] pub agent_aggregation: ::prost::alloc::string::String, @@ -335,18 +325,20 @@ pub struct ClientStatsPayload { #[prost(string, tag = "10")] #[serde(default)] pub service: ::prost::alloc::string::String, - /// ContainerID specifies the origin container ID. It is meant to be populated by the client and may - /// be enhanced by the agent to ensure it is unique. + /// ContainerID specifies the origin container ID. It is meant to be populated by the client + /// and may be enhanced by the agent to ensure it is unique. #[prost(string, tag = "11")] #[serde(default)] #[serde(rename = "ContainerID")] pub container_id: ::prost::alloc::string::String, - /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. - /// This field should be left empty by the client. It only applies to some specific environment. + /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the + /// specified containerID. This field should be left empty by the client. It only applies + /// to some specific environment. #[prost(string, repeated, tag = "12")] #[serde(default)] pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration. + /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> + /// source code integration. #[prost(string, tag = "13")] #[serde(default)] pub git_commit_sha: ::prost::alloc::string::String, @@ -376,7 +368,8 @@ pub struct ClientStatsBucket { #[serde(default)] pub agent_time_shift: i64, } -/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type +/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, +/// type #[derive(Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] #[allow(clippy::derive_partial_eq_without_eq)] @@ -427,7 +420,8 @@ pub struct ClientGroupedStats { #[serde(default)] pub span_kind: ::prost::alloc::string::String, /// peer_tags are supplementary tags that further describe a peer entity - /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB + /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the + /// name of peer DB #[prost(string, repeated, tag = "16")] #[serde(default)] pub peer_tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, @@ -436,7 +430,8 @@ pub struct ClientGroupedStats { #[serde(default)] pub is_trace_root: i32, } -/// Trilean is an expanded boolean type that is meant to differentiate between being unset and false. +/// Trilean is an expanded boolean type that is meant to differentiate between being unset and +/// false. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Trilean { From e088f304800026887dcd0cb46059ff744ba2efbe Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 13 Jun 2024 11:54:53 +0200 Subject: [PATCH 08/26] start timeout handling Signed-off-by: Bob Weinand --- remote-config/src/fetch/fetcher.rs | 4 +- remote-config/src/fetch/multitarget.rs | 4 ++ remote-config/src/fetch/shared.rs | 6 +- trace-protobuf/src/pb.rs | 81 ++++++++++++++------------ 4 files changed, 55 insertions(+), 40 deletions(-) diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 9fa773046..886f79f18 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -100,8 +100,10 @@ impl ConfigFetcherState { pub struct ConfigFetcher { pub file_storage: S, state: Arc>, - timeout: AtomicU32, + /// Timeout after which to report failure, in milliseconds. + pub timeout: AtomicU32, /// Collected interval. May be zero if not provided by the remote config server or fetched yet. + /// Given in nanoseconds. pub interval: AtomicU64, } diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index b60884615..331937306 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -39,7 +39,10 @@ where target_runtimes: Mutex, HashSet>>, /// Keyed by runtime_id runtimes: Mutex>>, + /// Interval used if the remote server does not specify a refetch interval, in nanoseconds. pub remote_config_interval: AtomicU64, + /// Timeout after which to report failure, in milliseconds. + pub remote_config_timeout: AtomicU32, /// All services by target in use services: Mutex, KnownTarget>>, pending_async_insertions: AtomicU32, @@ -103,6 +106,7 @@ where target_runtimes: Mutex::new(Default::default()), runtimes: Mutex::new(Default::default()), remote_config_interval: AtomicU64::new(5_000_000_000), + remote_config_timeout: AtomicU32::new(5_000), services: Mutex::new(Default::default()), pending_async_insertions: AtomicU32::new(0), fetcher_semaphore: Semaphore::new(Self::DEFAULT_CLIENTS_LIMIT as usize), diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index dfd9dd7d0..59deaec7e 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -25,8 +25,10 @@ pub struct SharedFetcher { /// Each fetcher must have an unique id. Defaults to a random UUID. pub client_id: String, cancellation: CancellationToken, - /// Interval used if the remote server does not specify a refetch interval + /// Interval used if the remote server does not specify a refetch interval, in nanoseconds. pub default_interval: AtomicU64, + /// Timeout after which to report failure, in milliseconds. + pub timeout: AtomicU32, } pub struct FileRefcountData { @@ -223,6 +225,7 @@ impl SharedFetcher { client_id: uuid::Uuid::new_v4().to_string(), cancellation: CancellationToken::new(), default_interval: AtomicU64::new(5_000_000_000), + timeout: AtomicU32::new(5000), } } @@ -239,6 +242,7 @@ impl SharedFetcher { { let state = storage.state.clone(); let mut fetcher = ConfigFetcher::new(storage, state); + fetcher.timeout.store(self.timeout.load(Ordering::Relaxed), Ordering::Relaxed); let mut opaque_state = OpaqueState::default(); diff --git a/trace-protobuf/src/pb.rs b/trace-protobuf/src/pb.rs index a2c5f71c9..422a9d7a5 100644 --- a/trace-protobuf/src/pb.rs +++ b/trace-protobuf/src/pb.rs @@ -39,8 +39,10 @@ pub struct SpanLink { /// /// Optional. Simple mapping of keys to string values. #[prost(map = "string, string", tag = "4")] - pub attributes: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub attributes: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// @gotags: msg:"tracestate,omitempty" /// /// Optional. W3C tracestate. @@ -68,8 +70,8 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub name: ::prost::alloc::string::String, - /// resource is the resource name of this span, also sometimes called the endpoint (for web - /// spans). @gotags: json:"resource" msg:"resource" + /// resource is the resource name of this span, also sometimes called the endpoint (for web spans). + /// @gotags: json:"resource" msg:"resource" #[prost(string, tag = "3")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -116,16 +118,18 @@ pub struct Span { #[prost(map = "string, string", tag = "10")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] - pub meta: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub meta: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// metrics is a mapping from tag name to tag value for numeric-valued tags. /// @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" #[prost(map = "string, double", tag = "11")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub metrics: ::std::collections::HashMap<::prost::alloc::string::String, f64>, - /// type is the type of the service with which this span is associated. Example values: web, - /// db, lambda. @gotags: json:"type" msg:"type" + /// type is the type of the service with which this span is associated. Example values: web, db, lambda. + /// @gotags: json:"type" msg:"type" #[prost(string, tag = "12")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -136,18 +140,19 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::std::collections::HashMap::is_empty")] - pub meta_struct: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec>, - /// span_links represents a collection of links, where each link defines a causal relationship - /// between two spans. @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" + pub meta_struct: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::vec::Vec, + >, + /// span_links represents a collection of links, where each link defines a causal relationship between two spans. + /// @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" #[prost(message, repeated, tag = "14")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::prost::alloc::vec::Vec::is_empty")] pub span_links: ::prost::alloc::vec::Vec, } -/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a -/// trace. +/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. #[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -167,8 +172,10 @@ pub struct TraceChunk { /// tags specifies tags common in all `spans`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "4")] - pub tags: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// droppedTrace specifies whether the trace was dropped by samplers or not. /// @gotags: json:"dropped_trace" msg:"dropped_trace" #[prost(bool, tag = "5")] @@ -206,8 +213,10 @@ pub struct TracerPayload { /// tags specifies tags common in all `chunks`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "7")] - pub tags: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// env specifies `env` tag that set with the tracer. /// @gotags: json:"env" msg:"env" #[prost(string, tag = "8")] @@ -236,8 +245,10 @@ pub struct AgentPayload { pub tracer_payloads: ::prost::alloc::vec::Vec, /// tags specifies tags common in all `tracerPayloads`. #[prost(map = "string, string", tag = "6")] - pub tags: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// agentVersion specifies version of the agent. #[prost(string, tag = "7")] pub agent_version: ::prost::alloc::string::String, @@ -268,9 +279,8 @@ pub struct StatsPayload { pub agent_version: ::prost::alloc::string::String, #[prost(bool, tag = "5")] pub client_computed: bool, - /// splitPayload indicates if the payload is actually one of several payloads split out from a - /// larger payload. This field can be used in the backend to signal if re-aggregation is - /// necessary. + /// splitPayload indicates if the payload is actually one of several payloads split out from a larger payload. + /// This field can be used in the backend to signal if re-aggregation is necessary. #[prost(bool, tag = "6")] pub split_payload: bool, } @@ -315,8 +325,8 @@ pub struct ClientStatsPayload { #[prost(uint64, tag = "8")] #[serde(default)] pub sequence: u64, - /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation - /// layer characterizes counts only and distributions only payloads + /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer + /// characterizes counts only and distributions only payloads #[prost(string, tag = "9")] #[serde(default)] pub agent_aggregation: ::prost::alloc::string::String, @@ -325,20 +335,18 @@ pub struct ClientStatsPayload { #[prost(string, tag = "10")] #[serde(default)] pub service: ::prost::alloc::string::String, - /// ContainerID specifies the origin container ID. It is meant to be populated by the client - /// and may be enhanced by the agent to ensure it is unique. + /// ContainerID specifies the origin container ID. It is meant to be populated by the client and may + /// be enhanced by the agent to ensure it is unique. #[prost(string, tag = "11")] #[serde(default)] #[serde(rename = "ContainerID")] pub container_id: ::prost::alloc::string::String, - /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the - /// specified containerID. This field should be left empty by the client. It only applies - /// to some specific environment. + /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. + /// This field should be left empty by the client. It only applies to some specific environment. #[prost(string, repeated, tag = "12")] #[serde(default)] pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> - /// source code integration. + /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration. #[prost(string, tag = "13")] #[serde(default)] pub git_commit_sha: ::prost::alloc::string::String, @@ -368,8 +376,7 @@ pub struct ClientStatsBucket { #[serde(default)] pub agent_time_shift: i64, } -/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, -/// type +/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type #[derive(Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] #[allow(clippy::derive_partial_eq_without_eq)] @@ -420,8 +427,7 @@ pub struct ClientGroupedStats { #[serde(default)] pub span_kind: ::prost::alloc::string::String, /// peer_tags are supplementary tags that further describe a peer entity - /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the - /// name of peer DB + /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB #[prost(string, repeated, tag = "16")] #[serde(default)] pub peer_tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, @@ -430,8 +436,7 @@ pub struct ClientGroupedStats { #[serde(default)] pub is_trace_root: i32, } -/// Trilean is an expanded boolean type that is meant to differentiate between being unset and -/// false. +/// Trilean is an expanded boolean type that is meant to differentiate between being unset and false. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Trilean { From 38969df905fe230c0e55ad0500e6d1e5d8a8e7ee Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 13 Jun 2024 21:33:19 +0200 Subject: [PATCH 09/26] clippy Signed-off-by: Bob Weinand --- Cargo.lock | 123 +- LICENSE-3rdparty.yml | 1536 +++++++++++++++-- ipc/src/platform/unix/mem_handle_macos.rs | 2 +- remote-config/Cargo.toml | 2 +- .../src/dynamic_configuration/data.rs | 3 + .../src/dynamic_configuration/mod.rs | 3 + remote-config/src/fetch/fetcher.rs | 24 +- remote-config/src/fetch/mod.rs | 4 + remote-config/src/fetch/multitarget.rs | 12 +- remote-config/src/fetch/shared.rs | 33 +- remote-config/src/fetch/single.rs | 3 + remote-config/src/fetch/test_server.rs | 4 + remote-config/src/lib.rs | 4 +- remote-config/src/parse.rs | 29 +- remote-config/src/targets.rs | 3 + sidecar-ffi/src/lib.rs | 1 - sidecar-ffi/tests/sidecar.rs | 19 +- sidecar/src/service/remote_configs.rs | 3 + sidecar/src/shm_remote_config.rs | 35 +- sidecar/src/tracer.rs | 3 - tools/docker/Dockerfile.build | 1 + trace-protobuf/src/pb.rs | 81 +- trace-protobuf/src/serde.rs | 3 + 23 files changed, 1648 insertions(+), 283 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ef7d2df1..69e0118db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "gimli", + "gimli 0.28.1", ] [[package]] @@ -521,6 +521,7 @@ dependencies = [ "hyper 0.14.28", "once_cell", "serde_json", + "strum", "tempfile", ] @@ -591,19 +592,20 @@ checksum = "a1d084b0137aaa901caf9f1e8b21daa6aa24d41cd806e111335541eff9683bd6" [[package]] name = "blazesym" -version = "0.2.0-alpha.11" -source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-alpha.11#16bfee4bca2fe73e19f9530d334a9523d9551cbd" +version = "0.2.0-rc.0" +source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-rc.0#2f393f66a448f46ea71889e81a8866799762463d" dependencies = [ "cpp_demangle", - "gimli", + "gimli 0.30.0", "libc", + "miniz_oxide", "rustc-demangle", ] [[package]] name = "blazesym-c" -version = "0.0.0" -source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-alpha.11#16bfee4bca2fe73e19f9530d334a9523d9551cbd" +version = "0.1.0-alpha.1" +source = "git+https://github.com/libbpf/blazesym.git?rev=v0.2.0-rc.0#2f393f66a448f46ea71889e81a8866799762463d" dependencies = [ "blazesym", "memoffset 0.9.1", @@ -775,7 +777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da6bc11b07529f16944307272d5bd9b22530bc7d05751717c9d416586cedab49" dependencies = [ "clap 3.2.25", - "heck", + "heck 0.4.1", "indexmap 1.9.3", "log", "proc-macro2", @@ -922,7 +924,7 @@ version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn 2.0.58", @@ -1073,6 +1075,7 @@ dependencies = [ "ciborium", "clap 4.4.18", "criterion-plot", + "csv", "is-terminal", "itertools 0.10.5", "num-traits", @@ -1148,6 +1151,27 @@ dependencies = [ "typenum", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "current_platform" version = "0.2.0" @@ -1263,6 +1287,15 @@ dependencies = [ "uuid", ] +[[package]] +name = "datadog-ddsketch" +version = "10.0.0" +dependencies = [ + "prost 0.11.9", + "prost-build", + "protoc-bin-vendored", +] + [[package]] name = "datadog-ipc" version = "0.1.0" @@ -1496,7 +1529,7 @@ dependencies = [ [[package]] name = "datadog-trace-mini-agent" -version = "0.4.0" +version = "0.4.2" dependencies = [ "anyhow", "async-trait", @@ -1521,6 +1554,7 @@ name = "datadog-trace-normalization" version = "10.0.0" dependencies = [ "anyhow", + "criterion", "datadog-trace-protobuf", "duplicate", "rand", @@ -1553,6 +1587,8 @@ dependencies = [ "protoc-bin-vendored", "serde", "serde_bytes", + "serde_json", + "tokio", ] [[package]] @@ -1572,7 +1608,9 @@ dependencies = [ "log", "prost 0.11.9", "rand", + "rmp", "rmp-serde", + "rmpv", "serde", "serde_json", "tokio", @@ -1621,6 +1659,8 @@ name = "ddtelemetry" version = "10.0.0" dependencies = [ "anyhow", + "base64 0.22.1", + "datadog-ddsketch", "ddcommon", "futures", "hashbrown 0.12.3", @@ -1732,7 +1772,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0a4be4cd710e92098de6ad258e6e7c24af11c29c5142f3c6f2a545652480ff8" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-error", ] @@ -2093,6 +2133,12 @@ name = "gimli" version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "gimli" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e1d97fbe9722ba9bbd0c97051c2956e726562b61f86a25a4360398a40edfc9" dependencies = [ "fallible-iterator", "indexmap 2.2.6", @@ -2203,6 +2249,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -2906,6 +2958,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", + "simd-adler32", ] [[package]] @@ -3505,7 +3558,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck", + "heck 0.4.1", "itertools 0.10.5", "lazy_static", "log", @@ -3774,9 +3827,9 @@ dependencies = [ [[package]] name = "rmp" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" dependencies = [ "byteorder", "num-traits", @@ -3794,6 +3847,16 @@ dependencies = [ "serde", ] +[[package]] +name = "rmpv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58450723cd9ee93273ce44a20b6ec4efe17f8ed2e3631474387bfdecf18bb2a9" +dependencies = [ + "num-traits", + "rmp", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -3840,8 +3903,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a218f0f6d05669de4eabfb24f31ce802035c952429d037507b4a4a39f0e60c5b" dependencies = [ "aws-lc-rs", - "log", "once_cell", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -4021,9 +4084,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "itoa", "ryu", @@ -4146,6 +4209,12 @@ dependencies = [ "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "simd-json" version = "0.13.9" @@ -4269,6 +4338,28 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.58", +] + [[package]] name = "subtle" version = "2.5.0" diff --git a/LICENSE-3rdparty.yml b/LICENSE-3rdparty.yml index 7e6d63349..267eaeaa3 100644 --- a/LICENSE-3rdparty.yml +++ b/LICENSE-3rdparty.yml @@ -1,4 +1,4 @@ -root_name: datadog-alloc, datadog-crashtracker, ddcommon, ddtelemetry, datadog-ddsketch, datadog-profiling, datadog-profiling-ffi, data-pipeline-ffi, data-pipeline, datadog-trace-normalization, datadog-trace-protobuf, datadog-trace-utils, ddcommon-ffi, build_common, ddtelemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, tools, datadog-ipc, datadog-ipc-macros, tarpc, tarpc-plugins, spawn_worker, cc_utils, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, sidecar_mockgen, datadog-trace-obfuscation, test_spawn_from_lib, datadog-serverless-trace-mini-agent, datadog-trace-mini-agent +root_name: datadog-alloc, datadog-crashtracker, ddcommon, ddtelemetry, datadog-ddsketch, datadog-profiling, datadog-profiling-ffi, data-pipeline-ffi, data-pipeline, datadog-trace-normalization, datadog-trace-protobuf, datadog-trace-utils, ddcommon-ffi, build_common, ddtelemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, datadog-dynamic-configuration, tools, datadog-ipc, datadog-ipc-macros, tarpc, tarpc-plugins, spawn_worker, cc_utils, datadog-remote-config, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, sidecar_mockgen, datadog-trace-obfuscation, test_spawn_from_lib, datadog-serverless-trace-mini-agent, datadog-trace-mini-agent third_party_libraries: - package_name: addr2line package_version: 0.21.0 @@ -2595,7 +2595,7 @@ third_party_libraries: OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: aws-lc-rs - package_version: 1.8.0 + package_version: 1.7.2 repository: https://github.com/awslabs/aws-lc-rs license: ISC AND (Apache-2.0 OR ISC) licenses: @@ -3008,7 +3008,7 @@ third_party_libraries: ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - package_name: aws-lc-sys - package_version: 0.19.0 + package_version: 0.17.0 repository: https://github.com/aws/aws-lc-rs license: ISC AND (Apache-2.0 OR ISC) AND OpenSSL licenses: @@ -4763,6 +4763,241 @@ third_party_libraries: CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- package_name: block-buffer + package_version: 0.10.4 + repository: https://github.com/RustCrypto/utils + license: MIT OR Apache-2.0 + licenses: + - license: MIT + text: | + Copyright (c) 2018-2019 The RustCrypto Project Developers + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + - license: Apache-2.0 + text: |2 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - package_name: blocking package_version: 1.6.0 repository: https://github.com/smol-rs/blocking @@ -7139,23 +7374,258 @@ third_party_libraries: DEALINGS IN THE SOFTWARE. - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" -- package_name: crc32fast - package_version: 1.4.0 - repository: https://github.com/srijs/rust-crc32fast +- package_name: cpufeatures + package_version: 0.2.12 + repository: https://github.com/RustCrypto/utils license: MIT OR Apache-2.0 licenses: - license: MIT text: | - MIT License + Copyright (c) 2020 The RustCrypto Project Developers - Copyright (c) 2018 Sam Rijs, Alex Crichton and contributors + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + - license: Apache-2.0 + text: |2 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +- package_name: crc32fast + package_version: 1.4.0 + repository: https://github.com/srijs/rust-crc32fast + license: MIT OR Apache-2.0 + licenses: + - license: MIT + text: | + MIT License + + Copyright (c) 2018 Sam Rijs, Alex Crichton and contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -7428,21 +7898,256 @@ third_party_libraries: is furnished to do so, subject to the following conditions: - The above copyright notice and this permission notice - shall be included in all copies or substantial portions - of the Software. + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + - license: Apache-2.0 + text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" +- package_name: crypto-common + package_version: 0.1.6 + repository: https://github.com/RustCrypto/traits + license: MIT OR Apache-2.0 + licenses: + - license: MIT + text: | + Copyright (c) 2021 RustCrypto Developers + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + - license: Apache-2.0 + text: |2 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF - ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED - TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A - PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT - SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR - IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - - license: Apache-2.0 - text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - package_name: current_platform package_version: 0.2.0 repository: https://github.com/Shnatsel/current_platform @@ -7891,136 +8596,371 @@ third_party_libraries: (b) You must cause any modified files to carry prominent notices stating that You changed the files; and - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Jacob Pratt et al. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +- package_name: derivative + package_version: 2.2.0 + repository: https://github.com/mcarton/rust-derivative + license: MIT/Apache-2.0 + licenses: + - license: MIT + text: | + Copyright (c) 2016 Martin Carton + + Permission is hereby granted, free of charge, to any person obtaining a copy of + this software and associated documentation files (the "Software"), to deal in + the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is furnished to do + so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + - license: Apache-2.0 + text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" +- package_name: digest + package_version: 0.10.7 + repository: https://github.com/RustCrypto/traits + license: MIT OR Apache-2.0 + licenses: + - license: MIT + text: | + Copyright (c) 2017 Artyom Pavlov + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + - license: Apache-2.0 + text: |2 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - END OF TERMS AND CONDITIONS + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. - APPENDIX: How to apply the Apache License to your work. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - Copyright 2022 Jacob Pratt et al. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + END OF TERMS AND CONDITIONS - http://www.apache.org/licenses/LICENSE-2.0 + APPENDIX: How to apply the Apache License to your work. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -- package_name: derivative - package_version: 2.2.0 - repository: https://github.com/mcarton/rust-derivative - license: MIT/Apache-2.0 - licenses: - - license: MIT - text: | - Copyright (c) 2016 Martin Carton + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. - Permission is hereby granted, free of charge, to any person obtaining a copy of - this software and associated documentation files (the "Software"), to deal in - the Software without restriction, including without limitation the rights to - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies - of the Software, and to permit persons to whom the Software is furnished to do - so, subject to the following conditions: + Copyright [yyyy] [name of copyright owner] - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - - license: Apache-2.0 - text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - package_name: educe package_version: 0.4.23 repository: https://github.com/magiclen/educe @@ -9402,6 +10342,13 @@ third_party_libraries: DEALINGS IN THE SOFTWARE. - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright (c) 2016 Alex Crichton\nCopyright (c) 2017 The Tokio Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" +- package_name: generic-array + package_version: 0.14.7 + repository: https://github.com/fizyk20/generic-array.git + license: MIT + licenses: + - license: MIT + text: "The MIT License (MIT)\r\n\r\nCopyright (c) 2015 Bartłomiej Kamiński\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE." - package_name: getrandom package_version: 0.2.14 repository: https://github.com/rust-random/getrandom @@ -10834,7 +11781,7 @@ third_party_libraries: OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: hyper - package_version: 1.4.0 + package_version: 1.3.1 repository: https://github.com/hyperium/hyper license: MIT licenses: @@ -10920,7 +11867,7 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: hyper-util - package_version: 0.1.6 + package_version: 0.1.5 repository: https://github.com/hyperium/hyper-util license: MIT licenses: @@ -19776,7 +20723,7 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: rustls - package_version: 0.23.10 + package_version: 0.23.9 repository: https://github.com/rustls/rustls license: Apache-2.0 OR ISC OR MIT licenses: @@ -19878,7 +20825,7 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: rustls-native-certs - package_version: 0.7.1 + package_version: 0.7.0 repository: https://github.com/rustls/rustls-native-certs license: Apache-2.0 OR ISC OR MIT licenses: @@ -20065,7 +21012,7 @@ third_party_libraries: - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright 2023 Dirkjan Ochtman\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" - package_name: rustls-webpki - package_version: 0.102.5 + package_version: 0.102.4 repository: https://github.com/rustls/webpki license: ISC licenses: @@ -21476,7 +22423,7 @@ third_party_libraries: END OF TERMS AND CONDITIONS - package_name: serde_json - package_version: 1.0.117 + package_version: 1.0.120 repository: https://github.com/serde-rs/json license: MIT OR Apache-2.0 licenses: @@ -21935,13 +22882,47 @@ third_party_libraries: http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +- package_name: serde_with + package_version: 3.7.0 + repository: https://github.com/jonasbb/serde_with/ + license: MIT OR Apache-2.0 + licenses: + - license: MIT + text: | + Copyright (c) 2015 + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. -- package_name: serde_with + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + - license: Apache-2.0 + text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" +- package_name: serde_with_macros package_version: 3.7.0 repository: https://github.com/jonasbb/serde_with/ license: MIT OR Apache-2.0 @@ -21975,14 +22956,16 @@ third_party_libraries: DEALINGS IN THE SOFTWARE. - license: Apache-2.0 text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" -- package_name: serde_with_macros - package_version: 3.7.0 - repository: https://github.com/jonasbb/serde_with/ +- package_name: sha2 + package_version: 0.10.8 + repository: https://github.com/RustCrypto/hashes license: MIT OR Apache-2.0 licenses: - license: MIT text: | - Copyright (c) 2015 + Copyright (c) 2006-2009 Graydon Hoare + Copyright (c) 2009-2013 Mozilla Foundation + Copyright (c) 2016 Artyom Pavlov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated @@ -22008,7 +22991,208 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - license: Apache-2.0 - text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n" + text: |2 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - package_name: sharded-slab package_version: 0.1.7 repository: https://github.com/hawkw/sharded-slab @@ -23000,7 +24184,7 @@ third_party_libraries: OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: strum - package_version: 0.26.2 + package_version: 0.26.3 repository: https://github.com/Peternator7/strum license: MIT licenses: @@ -23056,12 +24240,12 @@ third_party_libraries: OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: subtle - package_version: 2.6.1 + package_version: 2.5.0 repository: https://github.com/dalek-cryptography/subtle license: BSD-3-Clause licenses: - license: BSD-3-Clause - text: "Copyright (c) 2016-2017 Isis Agora Lovecruft, Henry de Valence. All rights reserved.\nCopyright (c) 2016-2024 Isis Agora Lovecruft. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\nIS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\nTO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n" + text: "Copyright (c) 2016-2017 Isis Agora Lovecruft, Henry de Valence. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\nIS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\nTO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n" - package_name: symbolic-common package_version: 12.8.0 repository: https://github.com/getsentry/symbolic @@ -25637,6 +26821,36 @@ third_party_libraries: LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +- package_name: typenum + package_version: 1.17.0 + repository: https://github.com/paholg/typenum + license: MIT OR Apache-2.0 + licenses: + - license: MIT + text: | + The MIT License (MIT) + + Copyright (c) 2014 Paho Lurie-Gregg + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + - license: Apache-2.0 + text: " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\nCopyright 2014 Paho Lurie-Gregg\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License." - package_name: unicase package_version: 2.7.0 repository: https://github.com/seanmonstar/unicase diff --git a/ipc/src/platform/unix/mem_handle_macos.rs b/ipc/src/platform/unix/mem_handle_macos.rs index 7ae0fc356..6a5930e0e 100644 --- a/ipc/src/platform/unix/mem_handle_macos.rs +++ b/ipc/src/platform/unix/mem_handle_macos.rs @@ -58,7 +58,7 @@ pub(crate) fn mmap_handle(mut handle: T) -> io::Result(mapped: &mut MappedMem) { +pub(crate) fn munmap_handle(mapped: &MappedMem) { unsafe { _ = munmap(mapped.ptr, mapped.mem.get_size()); } diff --git a/remote-config/Cargo.toml b/remote-config/Cargo.toml index 9c32e2843..ea7a09e96 100644 --- a/remote-config/Cargo.toml +++ b/remote-config/Cargo.toml @@ -15,7 +15,7 @@ hyper = { version = "0.14", features = ["client"], default-features = false } http = "0.2" base64 = "0.21.0" sha2 = "0.10" -uuid = "1.7.0" +uuid = { version = "1.7.0", features = ["v4"] } futures-util = "0.3" tokio = { version = "1.36.0" } tokio-util = "0.7.10" diff --git a/remote-config/src/dynamic_configuration/data.rs b/remote-config/src/dynamic_configuration/data.rs index 0646bc1b7..89443a6e4 100644 --- a/remote-config/src/dynamic_configuration/data.rs +++ b/remote-config/src/dynamic_configuration/data.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use serde::{Deserialize, Serialize}; use std::collections::HashMap; diff --git a/remote-config/src/dynamic_configuration/mod.rs b/remote-config/src/dynamic_configuration/mod.rs index 7a345e4c6..e0018372e 100644 --- a/remote-config/src/dynamic_configuration/mod.rs +++ b/remote-config/src/dynamic_configuration/mod.rs @@ -1 +1,4 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + pub mod data; diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 886f79f18..e8d470e3a 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use crate::targets::TargetsList; use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; use base64::Engine; @@ -444,7 +447,7 @@ pub mod tests { }); } - static DUMMY_RUNTIME_ID: &'static str = "3b43524b-a70c-45dc-921d-34504e50c5eb"; + static DUMMY_RUNTIME_ID: &str = "3b43524b-a70c-45dc-921d-34504e50c5eb"; #[derive(Default)] pub struct Storage { @@ -510,6 +513,7 @@ pub mod tests { } #[tokio::test] + #[cfg_attr(miri, ignore)] async fn test_inactive() { let server = RemoteConfigServer::spawn(); let storage = Arc::new(Storage::default()); @@ -539,6 +543,7 @@ pub mod tests { } #[tokio::test] + #[cfg_attr(miri, ignore)] async fn test_fetch_cache() { let server = RemoteConfigServer::spawn(); server.files.lock().unwrap().insert( @@ -588,13 +593,13 @@ pub mod tests { &[RemoteConfigCapabilities::ApmTracingCustomTags as u8] ); assert_eq!(client.products, &["APM_TRACING", "LIVE_DEBUGGING"]); - assert_eq!(client.is_tracer, true); - assert_eq!(client.is_agent, false); + assert!(client.is_tracer); + assert!(!client.is_agent); assert_eq!(client.id, "foo"); let state = client.state.as_ref().unwrap(); assert_eq!(state.error, "test"); - assert_eq!(state.has_error, true); + assert!(state.has_error); assert!(state.config_states.is_empty()); assert!(state.backend_client_state.is_empty()); @@ -644,15 +649,14 @@ pub mod tests { &[RemoteConfigCapabilities::ApmTracingCustomTags as u8] ); assert_eq!(client.products, &["APM_TRACING", "LIVE_DEBUGGING"]); - assert_eq!(client.is_tracer, true); - assert_eq!(client.is_agent, false); + assert!(client.is_tracer); + assert!(!client.is_agent); assert_eq!(client.id, "foo"); let state = client.state.as_ref().unwrap(); - assert_eq!(state.error, "test"); - assert_eq!(state.has_error, true); - assert!(state.config_states.is_empty()); - assert!(state.backend_client_state.is_empty()); + assert!(!state.has_error); + assert!(!state.config_states.is_empty()); + assert!(!state.backend_client_state.is_empty()); let cached = &req.cached_target_files[0]; assert_eq!(cached.path, PATH_FIRST.to_string()); diff --git a/remote-config/src/fetch/mod.rs b/remote-config/src/fetch/mod.rs index 860661bf1..a14319587 100644 --- a/remote-config/src/fetch/mod.rs +++ b/remote-config/src/fetch/mod.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + mod fetcher; mod multitarget; mod shared; @@ -5,6 +8,7 @@ mod single; #[cfg(any(test, feature = "test"))] pub mod test_server; +#[allow(clippy::useless_attribute)] // different clippy versions are differently picky #[cfg_attr(test, allow(ambiguous_glob_reexports))] // ignore mod tests re-export pub use fetcher::*; pub use multitarget::*; diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index 331937306..0d9359ca3 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -1,5 +1,5 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache -// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 use crate::fetch::{ ConfigFetcherState, ConfigInvariants, FileStorage, RefcountedFile, RefcountingStorage, @@ -475,6 +475,7 @@ mod tests { struct MultiFileStorage { rc: RcFileStorage, on_dead_completer: Arc>>>, + #[allow(clippy::type_complexity)] recent_fetches: Arc, Vec>>>>, awaiting_fetches: Arc, awaited_fetched_done: Arc>>>, @@ -619,11 +620,12 @@ mod tests { } } - static RT_ID_1: &'static str = "3b43524b-a70c-45dc-921d-34504e50c5eb"; - static RT_ID_2: &'static str = "ae588386-8464-43ba-bd3a-3e2d36b2c22c"; - static RT_ID_3: &'static str = "0125dff8-d9a7-4fd3-a0c2-0ca3b12816a1"; + static RT_ID_1: &str = "3b43524b-a70c-45dc-921d-34504e50c5eb"; + static RT_ID_2: &str = "ae588386-8464-43ba-bd3a-3e2d36b2c22c"; + static RT_ID_3: &str = "0125dff8-d9a7-4fd3-a0c2-0ca3b12816a1"; #[tokio::test] + #[cfg_attr(miri, ignore)] async fn test_multi_fetcher() { let server = RemoteConfigServer::spawn(); let (on_dead, on_dead_completer) = ManualFuture::new(); diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 59deaec7e..a4de6e65b 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; use crate::{RemoteConfigPath, Target}; use std::collections::HashMap; @@ -242,7 +245,9 @@ impl SharedFetcher { { let state = storage.state.clone(); let mut fetcher = ConfigFetcher::new(storage, state); - fetcher.timeout.store(self.timeout.load(Ordering::Relaxed), Ordering::Relaxed); + fetcher + .timeout + .store(self.timeout.load(Ordering::Relaxed), Ordering::Relaxed); let mut opaque_state = OpaqueState::default(); @@ -380,6 +385,7 @@ pub mod tests { } #[tokio::test] + #[cfg_attr(miri, ignore)] async fn test_single_fetcher() { let server = RemoteConfigServer::spawn(); let storage = RcFileStorage::default(); @@ -435,7 +441,7 @@ pub mod tests { let req = req.as_ref().unwrap(); let client = req.client.as_ref().unwrap(); let state = client.state.as_ref().unwrap(); - assert_eq!(state.has_error, false); + assert!(!state.has_error); inner_fetcher.cancel(); @@ -451,6 +457,7 @@ pub mod tests { } #[tokio::test] + #[cfg_attr(miri, ignore)] async fn test_parallel_fetchers() { let server = RemoteConfigServer::spawn(); let storage = RcFileStorage::default(); @@ -505,9 +512,24 @@ pub mod tests { let server_3 = server.clone(); let server_3_storage = storage.clone(); + let server_3_rc_storage = rc_storage.clone(); let server_third_1 = move || { + // It may happen that the other fetcher is _right now_ doing a fetch. + // This leads to a race condition: + // - If the other fetcher is currently fetching, then the file will be inactive and + // dropped once its fetching ended. + // - If there's no other fetching active, it'll immediately drop the file. + let (runners, _) = server_3_rc_storage.run_id.runners_and_run_id(); + let (expected_files, expected_inactive) = if runners == 0 { (1, 0) } else { (2, 1) }; + assert_eq!( + server_3_rc_storage.inactive.lock().unwrap().len(), + expected_inactive + ); // one file should be expired by now - assert_eq!(server_3_storage.0.files.lock().unwrap().len(), 1); + assert_eq!( + server_3_storage.0.files.lock().unwrap().len(), + expected_files + ); server_3.files.lock().unwrap().clear(); }; let server_third_2 = server_third_1.clone(); @@ -553,7 +575,7 @@ pub mod tests { server_third_1(); } } - 6 | 7 => { + 6 => { assert_eq!(fetched.len(), 0); inner_fetcher_1.cancel(); @@ -588,9 +610,6 @@ pub mod tests { if i == 5 { server_third_2(); } - } - 6 | 7 => { - assert_eq!(fetched.len(), 0); inner_fetcher_2.cancel(); } diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index 97455a60d..5138b0af1 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; use crate::Target; use std::sync::Arc; diff --git a/remote-config/src/fetch/test_server.rs b/remote-config/src/fetch/test_server.rs index 6f9a7dd7e..c39412cb7 100644 --- a/remote-config/src/fetch/test_server.rs +++ b/remote-config/src/fetch/test_server.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use crate::fetch::ConfigInvariants; use crate::targets::{TargetData, TargetsCustom, TargetsData, TargetsList}; use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; @@ -21,6 +24,7 @@ use tokio::sync::mpsc::Sender; pub struct RemoteConfigServer { pub last_request: Mutex>, + #[allow(clippy::type_complexity)] pub files: Mutex>, u64, String)>>, pub next_response: Mutex>>, pub endpoint: Endpoint, diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs index cb7c4ee98..f6ca1a364 100644 --- a/remote-config/src/lib.rs +++ b/remote-config/src/lib.rs @@ -1,5 +1,5 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache -// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 pub mod dynamic_configuration; pub mod fetch; diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index bd1c3524f..ae17b846c 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -1,5 +1,5 @@ -// Unless explicitly stated otherwise all files in this repository are licensed under the Apache -// License Version 2.0. This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2021-Present Datadog, Inc. +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 use crate::dynamic_configuration::data::DynamicConfigFile; use serde::{Deserialize, Serialize}; @@ -66,22 +66,21 @@ impl RemoteConfigPath { } } -impl ToString for RemoteConfigPath { - fn to_string(&self) -> String { +impl Display for RemoteConfigPath { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.source { - RemoteConfigSource::Datadog(id) => format!( + RemoteConfigSource::Datadog(id) => write!( + f, "datadog/{}/{}/{}/{}", - id, - self.product.to_string(), - self.config_id, - self.name - ), - RemoteConfigSource::Employee => format!( - "employee/{}/{}/{}", - self.product.to_string(), - self.config_id, - self.name + id, self.product, self.config_id, self.name ), + RemoteConfigSource::Employee => { + write!( + f, + "employee/{}/{}/{}", + self.product, self.config_id, self.name + ) + } } } } diff --git a/remote-config/src/targets.rs b/remote-config/src/targets.rs index 805f7dcdc..c0c07384d 100644 --- a/remote-config/src/targets.rs +++ b/remote-config/src/targets.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use serde::Deserialize; use serde_json::value::RawValue; use std::collections::HashMap; diff --git a/sidecar-ffi/src/lib.rs b/sidecar-ffi/src/lib.rs index 313ee427b..3e907e46b 100644 --- a/sidecar-ffi/src/lib.rs +++ b/sidecar-ffi/src/lib.rs @@ -4,7 +4,6 @@ use datadog_ipc::platform::{ FileBackedHandle, MappedMem, NamedShmHandle, PlatformHandle, ShmHandle, }; -use datadog_live_debugger::debugger_defs::DebuggerPayload; use datadog_remote_config::fetch::ConfigInvariants; use datadog_remote_config::{RemoteConfigCapabilities, RemoteConfigProduct, Target}; use datadog_sidecar::agent_remote_config::{ diff --git a/sidecar-ffi/tests/sidecar.rs b/sidecar-ffi/tests/sidecar.rs index e4833eab8..4772c391d 100644 --- a/sidecar-ffi/tests/sidecar.rs +++ b/sidecar-ffi/tests/sidecar.rs @@ -12,6 +12,7 @@ macro_rules! assert_maybe_no_error { } use ddcommon::Endpoint; +use std::ptr::{null, null_mut}; use std::time::Duration; #[cfg(unix)] use std::{ @@ -89,11 +90,18 @@ fn test_ddog_sidecar_register_app() { url: hyper::Uri::from_static("http://localhost:8082/"), }, &Endpoint::default(), - 1000, + "".into(), + "".into(), 1000000, 10000000, + 10000000, "".into(), "".into(), + null_mut(), + null(), + 0, + null(), + 0, ); let meta = ddog_sidecar_runtimeMeta_build( @@ -133,11 +141,18 @@ fn test_ddog_sidecar_register_app() { url: hyper::Uri::from_static("http://localhost:8083/"), }, &Endpoint::default(), - 1000, + "".into(), + "".into(), 1000000, 10000000, + 10000000, "".into(), "".into(), + null_mut(), + null(), + 0, + null(), + 0, ); //TODO: Shutdown the service diff --git a/sidecar/src/service/remote_configs.rs b/sidecar/src/service/remote_configs.rs index 8b22ff5f3..1890a5473 100644 --- a/sidecar/src/service/remote_configs.rs +++ b/sidecar/src/service/remote_configs.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use crate::shm_remote_config::{ShmRemoteConfigs, ShmRemoteConfigsGuard}; use datadog_remote_config::fetch::{ConfigInvariants, NotifyTarget}; use std::collections::hash_map::Entry; diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index befac2717..f8955da83 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -81,6 +81,7 @@ struct ConfigFileStorage { invariants: ConfigInvariants, /// All writers writers: Arc, RemoteConfigWriter>>>, + #[allow(clippy::type_complexity)] on_dead: Arc>>>, } @@ -128,7 +129,7 @@ fn store_shm( ) -> anyhow::Result { let name = format!("ddrc{}-{}", primary_sidecar_identifier(), version,); // as much signal as possible to be collision free - let hashed_path = BASE64_URL_SAFE_NO_PAD.encode(Sha224::digest(&path.to_string())); + let hashed_path = BASE64_URL_SAFE_NO_PAD.encode(Sha224::digest(path.to_string())); #[cfg(target_os = "macos")] let sliced_path = &hashed_path[..30 - name.len()]; #[cfg(not(target_os = "macos"))] @@ -389,22 +390,23 @@ impl RemoteConfigManager { } while let Some(config) = self.last_read_configs.pop() { - if !self.active_configs.contains_key(&config) { - match read_config(&config) { + if let Entry::Vacant(entry) = self.active_configs.entry(config) { + match read_config(entry.key()) { Ok(parsed) => { - trace!("Adding remote config file {config}: {parsed:?}"); - self.active_configs.insert( - config, - RemoteConfigPath { - source: parsed.source.clone(), - product: (&parsed.data).into(), - config_id: parsed.config_id.clone(), - name: parsed.name.clone(), - }, - ); + trace!("Adding remote config file {}: {:?}", entry.key(), parsed); + entry.insert(RemoteConfigPath { + source: parsed.source.clone(), + product: (&parsed.data).into(), + config_id: parsed.config_id.clone(), + name: parsed.name.clone(), + }); return RemoteConfigUpdate::Add(parsed); } - Err(e) => warn!("Failed reading remote config file {config}; skipping: {e:?}"), + Err(e) => warn!( + "Failed reading remote config file {}; skipping: {:?}", + entry.key(), + e + ), } } } @@ -516,6 +518,7 @@ mod tests { } #[tokio::test] + #[cfg_attr(miri, ignore)] async fn test_shm_updates() { let server = RemoteConfigServer::spawn(); @@ -633,7 +636,7 @@ mod tests { // and start to remove let was_second = if let RemoteConfigUpdate::Remove(update) = manager.fetch_update() { - &update == &*PATH_SECOND + update == *PATH_SECOND } else { unreachable!(); }; @@ -662,7 +665,7 @@ mod tests { // After proper shutdown it must be like all configs were removed let was_second = if let RemoteConfigUpdate::Remove(update) = manager.fetch_update() { - &update == &*PATH_SECOND + update == *PATH_SECOND } else { unreachable!(); }; diff --git a/sidecar/src/tracer.rs b/sidecar/src/tracer.rs index 00b137ae1..9f6034813 100644 --- a/sidecar/src/tracer.rs +++ b/sidecar/src/tracer.rs @@ -8,10 +8,7 @@ use std::str::FromStr; #[derive(Default)] pub struct Config { - pub raw_endpoint: Option, pub endpoint: Option, - pub language: String, - pub tracer_version: String, } impl Config { diff --git a/tools/docker/Dockerfile.build b/tools/docker/Dockerfile.build index 51befed8c..d9f49ac3c 100644 --- a/tools/docker/Dockerfile.build +++ b/tools/docker/Dockerfile.build @@ -85,6 +85,7 @@ COPY "ddsketch/Cargo.toml" "ddsketch/" COPY "profiling/Cargo.toml" "profiling/" COPY "profiling-ffi/Cargo.toml" "profiling-ffi/" COPY "profiling-replayer/Cargo.toml" "profiling-replayer/" +COPY "remote-config/Cargo.toml" "remote-config/" COPY "sidecar/Cargo.toml" "sidecar/" COPY "sidecar/macros/Cargo.toml" "sidecar/macros/" COPY "sidecar-ffi/Cargo.toml" "sidecar-ffi/" diff --git a/trace-protobuf/src/pb.rs b/trace-protobuf/src/pb.rs index 422a9d7a5..a2c5f71c9 100644 --- a/trace-protobuf/src/pb.rs +++ b/trace-protobuf/src/pb.rs @@ -39,10 +39,8 @@ pub struct SpanLink { /// /// Optional. Simple mapping of keys to string values. #[prost(map = "string, string", tag = "4")] - pub attributes: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub attributes: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// @gotags: msg:"tracestate,omitempty" /// /// Optional. W3C tracestate. @@ -70,8 +68,8 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub name: ::prost::alloc::string::String, - /// resource is the resource name of this span, also sometimes called the endpoint (for web spans). - /// @gotags: json:"resource" msg:"resource" + /// resource is the resource name of this span, also sometimes called the endpoint (for web + /// spans). @gotags: json:"resource" msg:"resource" #[prost(string, tag = "3")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -118,18 +116,16 @@ pub struct Span { #[prost(map = "string, string", tag = "10")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] - pub meta: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub meta: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// metrics is a mapping from tag name to tag value for numeric-valued tags. /// @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" #[prost(map = "string, double", tag = "11")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub metrics: ::std::collections::HashMap<::prost::alloc::string::String, f64>, - /// type is the type of the service with which this span is associated. Example values: web, db, lambda. - /// @gotags: json:"type" msg:"type" + /// type is the type of the service with which this span is associated. Example values: web, + /// db, lambda. @gotags: json:"type" msg:"type" #[prost(string, tag = "12")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -140,19 +136,18 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::std::collections::HashMap::is_empty")] - pub meta_struct: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::vec::Vec, - >, - /// span_links represents a collection of links, where each link defines a causal relationship between two spans. - /// @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" + pub meta_struct: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec>, + /// span_links represents a collection of links, where each link defines a causal relationship + /// between two spans. @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" #[prost(message, repeated, tag = "14")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::prost::alloc::vec::Vec::is_empty")] pub span_links: ::prost::alloc::vec::Vec, } -/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. +/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a +/// trace. #[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -172,10 +167,8 @@ pub struct TraceChunk { /// tags specifies tags common in all `spans`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "4")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// droppedTrace specifies whether the trace was dropped by samplers or not. /// @gotags: json:"dropped_trace" msg:"dropped_trace" #[prost(bool, tag = "5")] @@ -213,10 +206,8 @@ pub struct TracerPayload { /// tags specifies tags common in all `chunks`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "7")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// env specifies `env` tag that set with the tracer. /// @gotags: json:"env" msg:"env" #[prost(string, tag = "8")] @@ -245,10 +236,8 @@ pub struct AgentPayload { pub tracer_payloads: ::prost::alloc::vec::Vec, /// tags specifies tags common in all `tracerPayloads`. #[prost(map = "string, string", tag = "6")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// agentVersion specifies version of the agent. #[prost(string, tag = "7")] pub agent_version: ::prost::alloc::string::String, @@ -279,8 +268,9 @@ pub struct StatsPayload { pub agent_version: ::prost::alloc::string::String, #[prost(bool, tag = "5")] pub client_computed: bool, - /// splitPayload indicates if the payload is actually one of several payloads split out from a larger payload. - /// This field can be used in the backend to signal if re-aggregation is necessary. + /// splitPayload indicates if the payload is actually one of several payloads split out from a + /// larger payload. This field can be used in the backend to signal if re-aggregation is + /// necessary. #[prost(bool, tag = "6")] pub split_payload: bool, } @@ -325,8 +315,8 @@ pub struct ClientStatsPayload { #[prost(uint64, tag = "8")] #[serde(default)] pub sequence: u64, - /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer - /// characterizes counts only and distributions only payloads + /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation + /// layer characterizes counts only and distributions only payloads #[prost(string, tag = "9")] #[serde(default)] pub agent_aggregation: ::prost::alloc::string::String, @@ -335,18 +325,20 @@ pub struct ClientStatsPayload { #[prost(string, tag = "10")] #[serde(default)] pub service: ::prost::alloc::string::String, - /// ContainerID specifies the origin container ID. It is meant to be populated by the client and may - /// be enhanced by the agent to ensure it is unique. + /// ContainerID specifies the origin container ID. It is meant to be populated by the client + /// and may be enhanced by the agent to ensure it is unique. #[prost(string, tag = "11")] #[serde(default)] #[serde(rename = "ContainerID")] pub container_id: ::prost::alloc::string::String, - /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. - /// This field should be left empty by the client. It only applies to some specific environment. + /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the + /// specified containerID. This field should be left empty by the client. It only applies + /// to some specific environment. #[prost(string, repeated, tag = "12")] #[serde(default)] pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration. + /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> + /// source code integration. #[prost(string, tag = "13")] #[serde(default)] pub git_commit_sha: ::prost::alloc::string::String, @@ -376,7 +368,8 @@ pub struct ClientStatsBucket { #[serde(default)] pub agent_time_shift: i64, } -/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type +/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, +/// type #[derive(Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] #[allow(clippy::derive_partial_eq_without_eq)] @@ -427,7 +420,8 @@ pub struct ClientGroupedStats { #[serde(default)] pub span_kind: ::prost::alloc::string::String, /// peer_tags are supplementary tags that further describe a peer entity - /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB + /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the + /// name of peer DB #[prost(string, repeated, tag = "16")] #[serde(default)] pub peer_tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, @@ -436,7 +430,8 @@ pub struct ClientGroupedStats { #[serde(default)] pub is_trace_root: i32, } -/// Trilean is an expanded boolean type that is meant to differentiate between being unset and false. +/// Trilean is an expanded boolean type that is meant to differentiate between being unset and +/// false. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Trilean { diff --git a/trace-protobuf/src/serde.rs b/trace-protobuf/src/serde.rs index 2920eb75e..cc0c36f90 100644 --- a/trace-protobuf/src/serde.rs +++ b/trace-protobuf/src/serde.rs @@ -1,3 +1,6 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + use serde::{Deserializer, Serializer}; use serde_bytes::ByteBuf; From 00d316811140eaed1e5b44b6823fa034e465567e Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 14 Jun 2024 21:36:20 +0200 Subject: [PATCH 10/26] Update lib.rs --- ipc/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/ipc/src/lib.rs b/ipc/src/lib.rs index 68783f126..b3dc197cf 100644 --- a/ipc/src/lib.rs +++ b/ipc/src/lib.rs @@ -1,8 +1,6 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -extern crate core; - pub mod example_interface; pub mod handles; pub mod transport; From 858a245e5493bd09c82df5e72dc7029db36f7ae2 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 21 Jun 2024 19:41:41 +0200 Subject: [PATCH 11/26] Update remote-config/src/parse.rs Co-authored-by: Luc Vieillescazes --- remote-config/src/parse.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index ae17b846c..b82a5446b 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -49,7 +49,7 @@ impl RemoteConfigPath { } "employee" => { if parts.len() != 4 { - anyhow::bail!("{} is employee and does not have exactly 5 parts", path); + anyhow::bail!("{} is employee and does not have exactly 4 parts", path); } RemoteConfigSource::Employee } From c915c1160e536746e567bed34af649eaec31c44a Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Mon, 24 Jun 2024 12:09:01 +0200 Subject: [PATCH 12/26] Apply suggestions from code review Co-authored-by: Julio Gonzalez <107922352+hoolioh@users.noreply.github.com> --- remote-config/src/dynamic_configuration/data.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/remote-config/src/dynamic_configuration/data.rs b/remote-config/src/dynamic_configuration/data.rs index 89443a6e4..aa0ca8f45 100644 --- a/remote-config/src/dynamic_configuration/data.rs +++ b/remote-config/src/dynamic_configuration/data.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[derive(Debug, Deserialize)] -#[cfg_attr(feature = "test", derive(Serialize))] +#[cfg_attr(feature = "test", derive(Default, Serialize))] pub struct DynamicConfigTarget { pub service: String, pub env: String, @@ -106,10 +106,7 @@ pub mod tests { pub fn dummy_dynamic_config(enabled: bool) -> DynamicConfigFile { DynamicConfigFile { action: "".to_string(), - service_target: DynamicConfigTarget { - service: "".to_string(), - env: "".to_string(), - }, + service_target: DynamicConfigTarget::default(), lib_config: DynamicConfig { tracing_enabled: Some(enabled), ..DynamicConfig::default() From 6399db7b5000b314128bd1d684973c63d54be627 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Tue, 25 Jun 2024 17:07:33 +0200 Subject: [PATCH 13/26] Make remote config usable in process, without manual FileStorage interaction Signed-off-by: Bob Weinand --- remote-config/examples/remote_config_fetch.rs | 94 +++++++++++++ remote-config/src/fetch/multitarget.rs | 2 +- remote-config/src/fetch/single.rs | 91 ++++++++++++- remote-config/src/file_change_tracker.rs | 78 +++++++++++ remote-config/src/file_storage.rs | 124 ++++++++++++++++++ remote-config/src/lib.rs | 2 + 6 files changed, 388 insertions(+), 3 deletions(-) create mode 100644 remote-config/examples/remote_config_fetch.rs create mode 100644 remote-config/src/file_change_tracker.rs create mode 100644 remote-config/src/file_storage.rs diff --git a/remote-config/examples/remote_config_fetch.rs b/remote-config/examples/remote_config_fetch.rs new file mode 100644 index 000000000..8e59290b5 --- /dev/null +++ b/remote-config/examples/remote_config_fetch.rs @@ -0,0 +1,94 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use datadog_remote_config::fetch::{ConfigInvariants, SingleChangesFetcher}; +use datadog_remote_config::file_change_tracker::{Change, FilePath}; +use datadog_remote_config::file_storage::ParsedFileStorage; +use datadog_remote_config::RemoteConfigProduct::ApmTracing; +use datadog_remote_config::{RemoteConfigData, Target}; +use ddcommon::Endpoint; +use std::time::Duration; +use tokio::time::sleep; + +const RUNTIME_ID: &str = "23e76587-5ae1-410c-a05c-137cae600a10"; +const SERVICE: &str = "testservice"; +const ENV: &str = "testenv"; +const VERSION: &str = "1.2.3"; + +#[tokio::main(flavor = "current_thread")] +async fn main() { + // SingleChangesFetcher is ideal for a single static (runtime_id, service, env, version) tuple + // Otherwise a SharedFetcher (or even a MultiTargetFetcher for a potentially high number of + // targets) for multiple targets is needed. These can be manually wired together with a + // ChangeTracker to keep track of changes. The SingleChangesTracker does it for you. + let mut fetcher = SingleChangesFetcher::new( + // Use SimpleFileStorage if you desire just the raw, unparsed contents + // (e.g. to do processing directly in your language) + // For more complicated use cases, like needing to store data in shared memory, a custom + // FileStorage implementation is recommended + ParsedFileStorage::default(), + Target { + service: SERVICE.to_string(), + env: ENV.to_string(), + app_version: VERSION.to_string(), + }, + RUNTIME_ID.to_string(), + ConfigInvariants { + language: "awesomelang".to_string(), + tracer_version: "99.10.5".to_string(), + endpoint: Endpoint { + url: hyper::Uri::from_static("http://localhost:8126"), + api_key: None, + }, + products: vec![ApmTracing], + capabilities: vec![], + }, + ); + + // Custom timeout, defaults to 5 seconds. + fetcher.set_timeout(2000); + + loop { + match fetcher.fetch_changes().await { + Ok(changes) => { + println!("Got {} changes:", changes.len()); + for change in changes { + match change { + Change::Add(file) => { + println!("Added file: {} (version: {})", file.path(), file.version()); + print_file_contents(&file.contents()); + } + Change::Update(file, _) => { + println!( + "Got update for file: {} (version: {})", + file.path(), + file.version() + ); + print_file_contents(&file.contents()); + } + Change::Remove(file) => { + println!("Removing file {}", file.path()); + } + } + } + } + Err(e) => { + eprintln!("Fetch failed with {e}"); + fetcher.set_last_error(e.to_string()); + } + } + + sleep(Duration::from_nanos(fetcher.get_interval()).max(Duration::from_secs(1))).await; + } +} + +fn print_file_contents(contents: &anyhow::Result) { + match contents { + Ok(data) => { + println!("File contents: {:?}", data); + } + Err(e) => { + println!("Failed parsing file: {:?}", e); + } + } +} diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index 0d9359ca3..59b27e725 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -334,7 +334,7 @@ where Self::remove_target(self, runtime_id, target); } - fn start_fetcher(self: &Arc, known_target: &mut KnownTarget) { + fn start_fetcher(self: &Arc, known_target: &KnownTarget) { let this = self.clone(); let fetcher = known_target.fetcher.clone(); let status = known_target.status.clone(); diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index 5138b0af1..bcf0c5344 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -2,15 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; +use crate::file_change_tracker::{Change, ChangeTracker, FilePath, UpdatedFiles}; use crate::Target; +use std::sync::atomic::Ordering; use std::sync::Arc; +/// Simple implementation pub struct SingleFetcher { fetcher: ConfigFetcher, target: Arc, runtime_id: String, - pub config_id: String, - pub last_error: Option, + config_id: String, + last_error: Option, opaque_state: OpaqueState, } @@ -26,6 +29,12 @@ impl SingleFetcher { } } + pub fn with_config_id(mut self, config_id: String) -> Self { + self.config_id = config_id; + self + } + + /// Polls the current runtime config files. pub async fn fetch_once(&mut self) -> anyhow::Result>>> { self.fetcher .fetch_once( @@ -37,4 +46,82 @@ impl SingleFetcher { ) .await } + + /// Timeout after which to report failure, in milliseconds. + pub fn set_timeout(&self, milliseconds: u32) { + self.fetcher.timeout.store(milliseconds, Ordering::Relaxed); + } + + /// Collected interval. May be zero if not provided by the remote config server or fetched yet. + /// Given in nanoseconds. + pub fn get_interval(&self) -> u64 { + self.fetcher.interval.load(Ordering::Relaxed) + } + + /// Sets the error to be reported to the backend. + pub fn set_last_error(&mut self, error: String) { + self.last_error = Some(error); + } + + pub fn get_config_id(&self) -> &String { + &self.config_id + } +} + +pub struct SingleChangesFetcher +where + S::StoredFile: FilePath, +{ + changes: ChangeTracker, + pub fetcher: SingleFetcher, +} + +impl SingleChangesFetcher +where + S::StoredFile: FilePath, +{ + pub fn new(sink: S, target: Target, runtime_id: String, invariants: ConfigInvariants) -> Self { + SingleChangesFetcher { + changes: ChangeTracker::default(), + fetcher: SingleFetcher::new(sink, target, runtime_id, invariants), + } + } + + pub fn with_config_id(mut self, config_id: String) -> Self { + self.fetcher = self.fetcher.with_config_id(config_id); + self + } + + /// Polls for new changes + pub async fn fetch_changes(&mut self) -> anyhow::Result, R>>> + where + S: UpdatedFiles, + { + Ok(match self.fetcher.fetch_once().await? { + None => vec![], + Some(files) => self + .changes + .get_changes(files, self.fetcher.fetcher.file_storage.updated()), + }) + } + + /// Timeout after which to report failure, in milliseconds. + pub fn set_timeout(&self, milliseconds: u32) { + self.fetcher.set_timeout(milliseconds) + } + + /// Collected interval. May be zero if not provided by the remote config server or fetched yet. + /// Given in nanoseconds. + pub fn get_interval(&self) -> u64 { + self.fetcher.get_interval() + } + + /// Sets the error to be reported to the backend. + pub fn set_last_error(&mut self, error: String) { + self.fetcher.set_last_error(error); + } + + pub fn get_config_id(&self) -> &String { + self.fetcher.get_config_id() + } } diff --git a/remote-config/src/file_change_tracker.rs b/remote-config/src/file_change_tracker.rs new file mode 100644 index 000000000..9600849d2 --- /dev/null +++ b/remote-config/src/file_change_tracker.rs @@ -0,0 +1,78 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::RemoteConfigPath; +use std::collections::HashSet; +use std::hash::{Hash, Hasher}; +use std::sync::Arc; + +pub trait FilePath { + fn path(&self) -> &RemoteConfigPath; +} + +pub trait UpdatedFiles { + fn updated(&self) -> Vec<(Arc, R)>; +} + +struct FilePathBasedArc(Arc); + +impl Hash for FilePathBasedArc { + fn hash(&self, state: &mut H) { + self.0.path().hash(state) + } +} + +impl PartialEq for FilePathBasedArc { + fn eq(&self, other: &Self) -> bool { + self.0.path() == other.0.path() + } +} + +impl Eq for FilePathBasedArc {} + +pub struct ChangeTracker { + last_files: HashSet>, +} + +impl Default for ChangeTracker { + fn default() -> Self { + ChangeTracker { + last_files: Default::default(), + } + } +} + +pub enum Change { + Add(S), + Update(S, R), + Remove(S), +} + +impl ChangeTracker { + pub fn get_changes( + &mut self, + files: Vec>, + updated: Vec<(Arc, R)>, + ) -> Vec, R>> { + let files = HashSet::from_iter(files.into_iter().map(FilePathBasedArc)); + let mut changes = vec![]; + + for file in files.difference(&self.last_files) { + changes.push(Change::Add(file.0.clone())); + } + + for file in self.last_files.difference(&files) { + changes.push(Change::Remove(file.0.clone())); + } + + for (updated_file, old_contents) in updated.into_iter() { + let file = FilePathBasedArc(updated_file); + if files.contains(&file) { + changes.push(Change::Update(file.0, old_contents)) + } + } + + self.last_files = files; + changes + } +} diff --git a/remote-config/src/file_storage.rs b/remote-config/src/file_storage.rs new file mode 100644 index 000000000..62f01e283 --- /dev/null +++ b/remote-config/src/file_storage.rs @@ -0,0 +1,124 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::fetch::FileStorage; +use crate::file_change_tracker::{FilePath, UpdatedFiles}; +use crate::{RemoteConfigData, RemoteConfigPath}; +use std::ops::Deref; +use std::sync::{Arc, Mutex, MutexGuard}; + +/// A trivial local storage for remote config files. +pub struct RawFileStorage { + updated: Mutex>, P)>>, +} + +impl Default for RawFileStorage

{ + fn default() -> Self { + RawFileStorage { + updated: Mutex::default(), + } + } +} + +pub trait ParseFile +where + Self: Sized, +{ + fn parse(path: &RemoteConfigPath, contents: Vec) -> Self; +} + +impl UpdatedFiles, P> for RawFileStorage

{ + fn updated(&self) -> Vec<(Arc>, P)> { + std::mem::take(&mut *self.updated.lock().unwrap()) + } +} + +/// Mutable data: version and contents. +struct RawFileData

{ + version: u64, + contents: P, +} + +/// File contents and file metadata +pub struct RawFile

{ + path: RemoteConfigPath, + data: Mutex>, +} + +pub struct RawFileContentsGuard<'a, P>(MutexGuard<'a, RawFileData

>); + +impl<'a, P> Deref for RawFileContentsGuard<'a, P> { + type Target = P; + + fn deref(&self) -> &Self::Target { + &self.0.contents + } +} + +impl

RawFile

{ + /// Gets the contents behind a Deref impl (guarding a Mutex). + pub fn contents(&self) -> RawFileContentsGuard

{ + RawFileContentsGuard(self.data.lock().unwrap()) + } + + pub fn version(&self) -> u64 { + self.data.lock().unwrap().version + } +} + +impl

FilePath for RawFile

{ + fn path(&self) -> &RemoteConfigPath { + &self.path + } +} + +impl FileStorage for RawFileStorage

{ + type StoredFile = RawFile

; + + fn store( + &self, + version: u64, + path: RemoteConfigPath, + contents: Vec, + ) -> anyhow::Result> { + Ok(Arc::new(RawFile { + data: Mutex::new(RawFileData { + version, + contents: P::parse(&path, contents), + }), + path, + })) + } + + fn update( + &self, + file: &Arc, + version: u64, + contents: Vec, + ) -> anyhow::Result<()> { + let mut contents = P::parse(&file.path, contents); + let mut data = file.data.lock().unwrap(); + std::mem::swap(&mut data.contents, &mut contents); + self.updated.lock().unwrap().push((file.clone(), contents)); + data.version = version; + Ok(()) + } +} + +/// It simply stores the raw remote config file contents. +pub type SimpleFileStorage = RawFileStorage>; + +impl ParseFile for Vec { + fn parse(_path: &RemoteConfigPath, contents: Vec) -> Self { + contents + } +} + +/// Storing the remote config file contents in parsed form +pub type ParsedFileStorage = RawFileStorage>; + +impl ParseFile for anyhow::Result { + fn parse(path: &RemoteConfigPath, contents: Vec) -> Self { + RemoteConfigData::try_parse(path.product, contents.as_slice()) + } +} diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs index f6ca1a364..378ee1d85 100644 --- a/remote-config/src/lib.rs +++ b/remote-config/src/lib.rs @@ -3,6 +3,8 @@ pub mod dynamic_configuration; pub mod fetch; +pub mod file_change_tracker; +pub mod file_storage; mod parse; mod targets; From 965324f29a5d23ca49257d66080444fa9aee2300 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 27 Jun 2024 20:24:05 +0200 Subject: [PATCH 14/26] Avoid multiple QueueId HashMaps for RuntimeInfo Signed-off-by: Bob Weinand --- sidecar/src/service/runtime_info.rs | 40 +++--- sidecar/src/service/sidecar_server.rs | 181 +++++++++++++++----------- sidecar/src/service/telemetry/mod.rs | 3 + 3 files changed, 126 insertions(+), 98 deletions(-) diff --git a/sidecar/src/service/runtime_info.rs b/sidecar/src/service/runtime_info.rs index 9f2f8215d..4a4817845 100644 --- a/sidecar/src/service/runtime_info.rs +++ b/sidecar/src/service/runtime_info.rs @@ -27,18 +27,26 @@ pub(crate) struct SharedAppManualFut { /// `RuntimeInfo` is a struct that contains information about a runtime. /// It contains a map of apps and a map of app or actions. -/// Each app is represented by a shared future that may contain an `Option`. -/// Each action is represented by an `AppOrQueue` enum. Combining apps and actions are necessary -/// because service and env names are not known until later in the initialization process. #[derive(Clone, Default)] pub(crate) struct RuntimeInfo { pub(crate) apps: Arc>, - app_or_actions: Arc>>, + applications: Arc>>, #[cfg(feature = "tracing")] - remote_config_guards: Arc>>, pub(crate) instance_id: InstanceId, } +/// `ActiveApplications` is a struct the contains information about a known in flight application. +/// Telemetry lifecycles (see `app_or_actions`) and remote_config `remote_config_guard` are bound to +/// it. +/// Each app is represented by a shared future that may contain an `Option`. +/// Each action is represented by an `AppOrQueue` enum. Combining apps and actions are necessary +/// because service and env names are not known until later in the initialization process. +#[derive(Default)] +pub(crate) struct ActiveApplication { + pub app_or_actions: AppOrQueue, + pub remote_config_guard: Option, +} + impl RuntimeInfo { /// Retrieves the `AppInstance` for a given service name and environment name. /// @@ -115,26 +123,14 @@ impl RuntimeInfo { self.apps.lock().unwrap() } - /// Locks the app or actions map and returns a mutable reference to it. - /// - /// # Returns - /// - /// * `MutexGuard>` - A mutable reference to the app or actions - /// map. - pub(crate) fn lock_app_or_actions(&self) -> MutexGuard> { - self.app_or_actions.lock().unwrap() - } - - /// Locks the remote config guards map and returns a mutable reference to it. + /// Locks the applications map and returns a mutable reference to it. /// /// # Returns /// - /// * `MutexGuard>` - A mutable reference to the remote - /// config guards map. - pub(crate) fn lock_remote_config_guards( - &self, - ) -> MutexGuard> { - self.remote_config_guards.lock().unwrap() + /// * `MutexGuard>` - A mutable reference to the + /// applications map. + pub(crate) fn lock_applications(&self) -> MutexGuard> { + self.applications.lock().unwrap() } } diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index 1aede69d4..74c8023b2 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -41,6 +41,7 @@ use tokio::task::{JoinError, JoinHandle}; use crate::dogstatsd::DogStatsDAction; use crate::service::remote_configs::{RemoteConfigNotifyTarget, RemoteConfigs}; +use crate::service::runtime_info::ActiveApplication; use crate::service::telemetry::enqueued_telemetry_stats::EnqueuedTelemetryStats; use crate::service::tracing::trace_flusher::TraceFlusherStats; use datadog_ipc::platform::FileBackedHandle; @@ -63,6 +64,7 @@ struct SidecarStats { active_apps: u32, enqueued_apps: u32, enqueued_telemetry_data: EnqueuedTelemetryStats, + remote_config_clients: u32, telemetry_metrics_contexts: u32, telemetry_worker: TelemetryWorkerStats, telemetry_worker_errors: u32, @@ -168,7 +170,7 @@ impl SidecarServer { async fn process_interceptor_response( &self, - result: Result<(HashSet, HashSet), tokio::task::JoinError>, + result: Result<(HashSet, HashSet), JoinError>, ) { match result { Ok((sessions, instances)) => { @@ -398,7 +400,7 @@ impl SidecarServer { .map(|s| { s.lock_runtimes() .values() - .map(|r| r.lock_app_or_actions().len() as u32) + .map(|r| r.lock_applications().len() as u32) .sum::() }) .sum(), @@ -408,9 +410,9 @@ impl SidecarServer { s.lock_runtimes() .values() .map(|r| { - r.lock_app_or_actions() + r.lock_applications() .values() - .filter(|a| matches!(a, AppOrQueue::Queue(_))) + .filter(|a| matches!(a.app_or_actions, AppOrQueue::Queue(_))) .count() as u32 }) .sum::() @@ -422,9 +424,9 @@ impl SidecarServer { s.lock_runtimes() .values() .map(|r| { - r.lock_app_or_actions() + r.lock_applications() .values() - .filter_map(|a| match a { + .filter_map(|a| match &a.app_or_actions { AppOrQueue::Queue(q) => Some(q.stats()), _ => None, }) @@ -433,6 +435,20 @@ impl SidecarServer { .sum() }) .sum(), + remote_config_clients: sessions + .values() + .map(|s| { + s.lock_runtimes() + .values() + .map(|r| { + r.lock_applications() + .values() + .filter_map(|a| a.remote_config_guard.as_ref()) + .count() as u32 + }) + .sum::() + }) + .sum(), telemetry_metrics_contexts: sessions .values() .map(|s| { @@ -478,58 +494,68 @@ impl SidecarInterface for SidecarServer { actions: Vec, ) -> Self::EnqueueActionsFut { let rt_info = self.get_runtime(&instance_id); - let mut queue = rt_info.lock_app_or_actions(); - match queue.entry(queue_id) { - Entry::Occupied(mut entry) => match entry.get_mut() { - AppOrQueue::Queue(ref mut data) => { - data.process(actions); - } - AppOrQueue::App(service_future) => { - let service_future = service_future.clone(); - // drop on stop - if actions.iter().any(|action| { - matches!( - action, - SidecarAction::Telemetry(TelemetryActions::Lifecycle( - LifecycleAction::Stop - )) - ) - }) { - entry.remove(); - rt_info.lock_remote_config_guards().remove(&queue_id); + let mut applications = rt_info.lock_applications(); + match applications.entry(queue_id) { + Entry::Occupied(mut entry) => { + let value = entry.get_mut(); + match value.app_or_actions { + AppOrQueue::Inactive => { + value.app_or_actions = + AppOrQueue::Queue(EnqueuedTelemetryData::processed(actions)); } - let apps = rt_info.apps.clone(); - tokio::spawn(async move { - let service = service_future.await; - let app_future = if let Some(fut) = apps - .lock() - .expect("Unable to acquire lock on apps") - .get(&service) - { - fut.clone() - } else { - return; - }; - if let Some(mut app) = app_future.await { - let actions = - EnqueuedTelemetryData::process_immediately(actions, &mut app).await; - app.telemetry.send_msgs(actions).await.ok(); + AppOrQueue::Queue(ref mut data) => { + data.process(actions); + } + AppOrQueue::App(ref service_future) => { + let service_future = service_future.clone(); + // drop on stop + if actions.iter().any(|action| { + matches!( + action, + SidecarAction::Telemetry(TelemetryActions::Lifecycle( + LifecycleAction::Stop + )) + ) + }) { + entry.remove(); } - }); + let apps = rt_info.apps.clone(); + tokio::spawn(async move { + let service = service_future.await; + let app_future = if let Some(fut) = apps + .lock() + .expect("Unable to acquire lock on apps") + .get(&service) + { + fut.clone() + } else { + return; + }; + if let Some(mut app) = app_future.await { + let actions = + EnqueuedTelemetryData::process_immediately(actions, &mut app) + .await; + app.telemetry.send_msgs(actions).await.ok(); + } + }); + } } - }, + } Entry::Vacant(entry) => { - if actions.len() == 1 - && matches!( + if actions.len() != 1 + || !matches!( actions[0], SidecarAction::Telemetry(TelemetryActions::Lifecycle( LifecycleAction::Stop )) ) { - rt_info.lock_remote_config_guards().remove(&queue_id); - } else { - entry.insert(AppOrQueue::Queue(EnqueuedTelemetryData::processed(actions))); + entry.insert(ActiveApplication { + app_or_actions: AppOrQueue::Queue(EnqueuedTelemetryData::processed( + actions, + )), + ..Default::default() + }); } } } @@ -551,12 +577,17 @@ impl SidecarInterface for SidecarServer { let (future, completer) = ManualFuture::new(); let app_or_queue = { let rt_info = self.get_runtime(&instance_id); - let mut app_or_actions = rt_info.lock_app_or_actions(); - match app_or_actions.get(&queue_id) { - Some(AppOrQueue::Queue(_)) => { - app_or_actions.insert(queue_id, AppOrQueue::App(future.shared())) - } - None => Some(AppOrQueue::Queue(EnqueuedTelemetryData::default())), + let mut applications = rt_info.lock_applications(); + match applications.get_mut(&queue_id) { + Some(ActiveApplication { + app_or_actions: ref mut app @ AppOrQueue::Queue(_), + .. + }) => Some(std::mem::replace(app, AppOrQueue::App(future.shared()))), + None + | Some(ActiveApplication { + app_or_actions: AppOrQueue::Inactive, + .. + }) => Some(AppOrQueue::Queue(EnqueuedTelemetryData::default())), _ => None, } }; @@ -592,10 +623,7 @@ impl SidecarInterface for SidecarServer { matches!(action, TelemetryActions::Lifecycle(LifecycleAction::Stop)) }) { self.get_runtime(&instance_id) - .lock_app_or_actions() - .remove(&queue_id); - self.get_runtime(&instance_id) - .lock_remote_config_guards() + .lock_applications() .remove(&queue_id); } @@ -787,24 +815,25 @@ impl SidecarInterface for SidecarServer { let notify_target = RemoteConfigNotifyTarget { pid: session.pid.load(Ordering::Relaxed), }; - session - .get_runtime(&instance_id.runtime_id) - .lock_remote_config_guards() - .insert( - queue_id, - self.remote_configs.add_runtime( - session - .get_remote_config_invariants() - .as_ref() - .expect("Expecting remote config invariants to be set early") - .clone(), - instance_id.runtime_id, - notify_target, - env_name, - service_name, - app_version, - ), - ); + let runtime_info = session.get_runtime(&instance_id.runtime_id); + runtime_info + .lock_applications() + .entry(queue_id) + .or_default() + .remote_config_guard = Some( + self.remote_configs.add_runtime( + session + .get_remote_config_invariants() + .as_ref() + .expect("Expecting remote config invariants to be set early") + .clone(), + instance_id.runtime_id, + notify_target, + env_name, + service_name, + app_version, + ), + ); no_response() } diff --git a/sidecar/src/service/telemetry/mod.rs b/sidecar/src/service/telemetry/mod.rs index a609f5d6c..99d61a218 100644 --- a/sidecar/src/service/telemetry/mod.rs +++ b/sidecar/src/service/telemetry/mod.rs @@ -11,7 +11,10 @@ pub mod enqueued_telemetry_data; pub mod enqueued_telemetry_stats; #[allow(clippy::large_enum_variant)] +#[derive(Default)] pub(crate) enum AppOrQueue { + #[default] + Inactive, App(Shared>), Queue(EnqueuedTelemetryData), } From d12524611c62563f2adea2e5261cd3ca3025ecdb Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Mon, 8 Jul 2024 18:05:27 +0200 Subject: [PATCH 15/26] Move dynamic-configuration into its own crate Signed-off-by: Bob Weinand --- Cargo.lock | 10 ++++ Cargo.toml | 1 + dynamic-configuration/Cargo.toml | 12 ++++ .../src}/data.rs | 55 +++---------------- dynamic-configuration/src/lib.rs | 47 ++++++++++++++++ remote-config/Cargo.toml | 1 + remote-config/examples/remote_config_fetch.rs | 1 + .../src/dynamic_configuration/mod.rs | 4 -- remote-config/src/lib.rs | 1 - remote-config/src/parse.rs | 2 +- sidecar/Cargo.toml | 1 + sidecar/src/shm_remote_config.rs | 4 +- tools/docker/Dockerfile.build | 1 + 13 files changed, 85 insertions(+), 55 deletions(-) create mode 100644 dynamic-configuration/Cargo.toml rename {remote-config/src/dynamic_configuration => dynamic-configuration/src}/data.rs (52%) create mode 100644 dynamic-configuration/src/lib.rs delete mode 100644 remote-config/src/dynamic_configuration/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 69e0118db..b2c869b1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1296,6 +1296,14 @@ dependencies = [ "protoc-bin-vendored", ] +[[package]] +name = "datadog-dynamic-configuration" +version = "0.0.1" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "datadog-ipc" version = "0.1.0" @@ -1412,6 +1420,7 @@ version = "0.0.1" dependencies = [ "anyhow", "base64 0.21.7", + "datadog-dynamic-configuration", "datadog-trace-protobuf", "ddcommon", "futures", @@ -1453,6 +1462,7 @@ dependencies = [ "cadence", "chrono", "console-subscriber", + "datadog-dynamic-configuration", "datadog-ipc", "datadog-ipc-macros", "datadog-remote-config", diff --git a/Cargo.toml b/Cargo.toml index 9f1d27b7f..6a008b81d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "ddcommon-ffi", "ddtelemetry", "ddtelemetry-ffi", + "dynamic-configuration", "tools", "ipc", "ipc/macros", diff --git a/dynamic-configuration/Cargo.toml b/dynamic-configuration/Cargo.toml new file mode 100644 index 000000000..78659a656 --- /dev/null +++ b/dynamic-configuration/Cargo.toml @@ -0,0 +1,12 @@ +[package] +edition = "2021" +license = "Apache 2.0" +name = "datadog-dynamic-configuration" +version = "0.0.1" + +[features] +test = [] + +[dependencies] +serde = "1.0" +serde_json = { version = "1.0", features = ["raw_value"] } diff --git a/remote-config/src/dynamic_configuration/data.rs b/dynamic-configuration/src/data.rs similarity index 52% rename from remote-config/src/dynamic_configuration/data.rs rename to dynamic-configuration/src/data.rs index aa0ca8f45..320cc7878 100644 --- a/remote-config/src/dynamic_configuration/data.rs +++ b/dynamic-configuration/src/data.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 use serde::{Deserialize, Serialize}; -use std::collections::HashMap; #[derive(Debug, Deserialize)] #[cfg_attr(feature = "test", derive(Default, Serialize))] @@ -21,9 +20,9 @@ pub struct DynamicConfigFile { #[derive(Debug, Deserialize)] #[cfg_attr(feature = "test", derive(Serialize))] -struct TracingHeaderTag { - header: String, - tag_name: String, +pub(crate) struct TracingHeaderTag { + pub header: String, + pub tag_name: String, } #[derive(Debug, Copy, Clone, Serialize, Deserialize)] @@ -55,48 +54,12 @@ pub struct TracingSamplingRule { #[derive(Debug, Deserialize)] #[cfg_attr(feature = "test", derive(Default, Serialize))] pub struct DynamicConfig { - tracing_header_tags: Option>, - tracing_sample_rate: Option, - log_injection_enabled: Option, - tracing_tags: Option>, - tracing_enabled: Option, - tracing_sampling_rules: Option>, -} - -impl From for Vec { - fn from(value: DynamicConfig) -> Self { - let mut vec = vec![]; - if let Some(tags) = value.tracing_header_tags { - vec.push(Configs::TracingHeaderTags( - tags.into_iter().map(|t| (t.header, t.tag_name)).collect(), - )) - } - if let Some(sample_rate) = value.tracing_sample_rate { - vec.push(Configs::TracingSampleRate(sample_rate)); - } - if let Some(log_injection) = value.log_injection_enabled { - vec.push(Configs::LogInjectionEnabled(log_injection)); - } - if let Some(tags) = value.tracing_tags { - vec.push(Configs::TracingTags(tags)); - } - if let Some(enabled) = value.tracing_enabled { - vec.push(Configs::TracingEnabled(enabled)); - } - if let Some(sampling_rules) = value.tracing_sampling_rules { - vec.push(Configs::TracingSamplingRules(sampling_rules)); - } - vec - } -} - -pub enum Configs { - TracingHeaderTags(HashMap), - TracingSampleRate(f64), - LogInjectionEnabled(bool), - TracingTags(Vec), // "key:val" format - TracingEnabled(bool), - TracingSamplingRules(Vec), + pub(crate) tracing_header_tags: Option>, + pub(crate) tracing_sample_rate: Option, + pub(crate) log_injection_enabled: Option, + pub(crate) tracing_tags: Option>, + pub(crate) tracing_enabled: Option, + pub(crate) tracing_sampling_rules: Option>, } #[cfg(feature = "test")] diff --git a/dynamic-configuration/src/lib.rs b/dynamic-configuration/src/lib.rs new file mode 100644 index 000000000..7d2fdb518 --- /dev/null +++ b/dynamic-configuration/src/lib.rs @@ -0,0 +1,47 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::data::{DynamicConfig, DynamicConfigFile, TracingSamplingRule}; +use std::collections::HashMap; + +pub mod data; + +impl From for Vec { + fn from(value: DynamicConfig) -> Self { + let mut vec = vec![]; + if let Some(tags) = value.tracing_header_tags { + vec.push(Configs::TracingHeaderTags( + tags.into_iter().map(|t| (t.header, t.tag_name)).collect(), + )) + } + if let Some(sample_rate) = value.tracing_sample_rate { + vec.push(Configs::TracingSampleRate(sample_rate)); + } + if let Some(log_injection) = value.log_injection_enabled { + vec.push(Configs::LogInjectionEnabled(log_injection)); + } + if let Some(tags) = value.tracing_tags { + vec.push(Configs::TracingTags(tags)); + } + if let Some(enabled) = value.tracing_enabled { + vec.push(Configs::TracingEnabled(enabled)); + } + if let Some(sampling_rules) = value.tracing_sampling_rules { + vec.push(Configs::TracingSamplingRules(sampling_rules)); + } + vec + } +} + +pub enum Configs { + TracingHeaderTags(HashMap), + TracingSampleRate(f64), + LogInjectionEnabled(bool), + TracingTags(Vec), // "key:val" format + TracingEnabled(bool), + TracingSamplingRules(Vec), +} + +pub fn parse_json(data: &[u8]) -> serde_json::error::Result { + serde_json::from_slice(data) +} diff --git a/remote-config/Cargo.toml b/remote-config/Cargo.toml index ea7a09e96..f9aa206f3 100644 --- a/remote-config/Cargo.toml +++ b/remote-config/Cargo.toml @@ -10,6 +10,7 @@ test = [] [dependencies] anyhow = { version = "1.0" } ddcommon = { path = "../ddcommon" } +datadog-dynamic-configuration = { path = "../dynamic-configuration" } datadog-trace-protobuf = { path = "../trace-protobuf" } hyper = { version = "0.14", features = ["client"], default-features = false } http = "0.2" diff --git a/remote-config/examples/remote_config_fetch.rs b/remote-config/examples/remote_config_fetch.rs index 8e59290b5..ba06a6ede 100644 --- a/remote-config/examples/remote_config_fetch.rs +++ b/remote-config/examples/remote_config_fetch.rs @@ -83,6 +83,7 @@ async fn main() { } fn print_file_contents(contents: &anyhow::Result) { + // Note: these contents may be large. Do not actually print it fully in a non-dev env. match contents { Ok(data) => { println!("File contents: {:?}", data); diff --git a/remote-config/src/dynamic_configuration/mod.rs b/remote-config/src/dynamic_configuration/mod.rs deleted file mode 100644 index e0018372e..000000000 --- a/remote-config/src/dynamic_configuration/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ -// SPDX-License-Identifier: Apache-2.0 - -pub mod data; diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs index 378ee1d85..c3f0c7960 100644 --- a/remote-config/src/lib.rs +++ b/remote-config/src/lib.rs @@ -1,7 +1,6 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -pub mod dynamic_configuration; pub mod fetch; pub mod file_change_tracker; pub mod file_storage; diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index b82a5446b..7929c676a 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -1,7 +1,7 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::dynamic_configuration::data::DynamicConfigFile; +use datadog_dynamic_configuration::data::DynamicConfigFile; use serde::{Deserialize, Serialize}; use std::fmt::Display; diff --git a/sidecar/Cargo.toml b/sidecar/Cargo.toml index e016d5c33..02e5eb59e 100644 --- a/sidecar/Cargo.toml +++ b/sidecar/Cargo.toml @@ -96,3 +96,4 @@ tempfile = { version = "3.3" } httpmock = "0.7.0" datadog-remote-config = { path = "../remote-config", features = ["test"] } datadog-trace-utils = { path = "../trace-utils", features = ["test-utils"] } +datadog-dynamic-configuration = { path = "../dynamic-configuration", features = ["test"] } diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index f8955da83..d30064a17 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -465,9 +465,7 @@ impl RemoteConfigManager { #[cfg(test)] mod tests { use super::*; - use datadog_remote_config::dynamic_configuration::data::{ - tests::dummy_dynamic_config, Configs, - }; + use datadog_dynamic_configuration::data::{tests::dummy_dynamic_config, Configs}; use datadog_remote_config::fetch::test_server::RemoteConfigServer; use datadog_remote_config::{RemoteConfigData, RemoteConfigProduct, RemoteConfigSource}; use lazy_static::lazy_static; diff --git a/tools/docker/Dockerfile.build b/tools/docker/Dockerfile.build index d9f49ac3c..5905fb1e5 100644 --- a/tools/docker/Dockerfile.build +++ b/tools/docker/Dockerfile.build @@ -82,6 +82,7 @@ COPY "ddcommon-ffi/Cargo.toml" "ddcommon-ffi/" COPY "ddtelemetry/Cargo.toml" "ddtelemetry/" COPY "ddtelemetry-ffi/Cargo.toml" "ddtelemetry-ffi/" COPY "ddsketch/Cargo.toml" "ddsketch/" +COPY "dynamic-configuration/Cargo.toml" "dynamic-configuration/" COPY "profiling/Cargo.toml" "profiling/" COPY "profiling-ffi/Cargo.toml" "profiling-ffi/" COPY "profiling-replayer/Cargo.toml" "profiling-replayer/" From fa4e2511cc09d000ec7fe6a562a821e3249ec849 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Mon, 8 Jul 2024 18:13:08 +0200 Subject: [PATCH 16/26] Clippy Signed-off-by: Bob Weinand --- .github/CODEOWNERS | 4 +- dynamic-configuration/Cargo.toml | 2 +- remote-config/src/parse.rs | 2 +- sidecar/src/service/remote_configs.rs | 4 +- sidecar/src/shm_remote_config.rs | 4 +- trace-protobuf/src/lib.rs | 2 +- trace-protobuf/src/pb.rs | 81 ++++++++++++++------------- trace-protobuf/src/serde.rs | 36 ++++++------ 8 files changed, 73 insertions(+), 62 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7308d8d6b..3dc9293fd 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -18,7 +18,9 @@ trace-protobuf @Datadog/serverless @Datadog/libdatadog-apm trace-mini-agent @Datadog/serverless trace-utils @Datadog/serverless @Datadog/libdatadog-apm serverless @Datadog/serverless +dynamic-configuration @Datadog/libdatadog-php @Datadog/libdatadog-apm +remote-config @Datadog/libdatadog-php @Datadog/libdatadog-apm @Datadog/remote-config sidecar @Datadog/libdatadog-php @Datadog/libdatadog-apm sidecar-ffi @Datadog/libdatadog-php @Datadog/libdatadog-apm data-pipeline*/ @Datadog/libdatadog-apm -ddsketch @Datadog/libdatadog-apm @Datadog/libdatadog-telemetry +ddsketch @Datadog/libdatadog-apm @Datadog/libdatadog-telemetry diff --git a/dynamic-configuration/Cargo.toml b/dynamic-configuration/Cargo.toml index 78659a656..ec0761f4a 100644 --- a/dynamic-configuration/Cargo.toml +++ b/dynamic-configuration/Cargo.toml @@ -9,4 +9,4 @@ test = [] [dependencies] serde = "1.0" -serde_json = { version = "1.0", features = ["raw_value"] } +serde_json = { version = "1.0" } diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index 7929c676a..cdf1122e7 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -98,7 +98,7 @@ impl RemoteConfigData { ) -> anyhow::Result { Ok(match product { RemoteConfigProduct::ApmTracing => { - RemoteConfigData::DynamicConfig(serde_json::from_slice(data)?) + RemoteConfigData::DynamicConfig(datadog_dynamic_configuration::parse_json(data)?) } RemoteConfigProduct::LiveDebugger => { RemoteConfigData::LiveDebugger(/* placeholder */ ()) diff --git a/sidecar/src/service/remote_configs.rs b/sidecar/src/service/remote_configs.rs index 1890a5473..f4ab0419f 100644 --- a/sidecar/src/service/remote_configs.rs +++ b/sidecar/src/service/remote_configs.rs @@ -18,7 +18,7 @@ unsafe impl Sync for RemoteConfigNotifyFunction {} #[cfg(windows)] impl Default for RemoteConfigNotifyFunction { fn default() -> Self { - return RemoteConfigNotifyFunction(std::ptr::null_mut()); + RemoteConfigNotifyFunction(std::ptr::null_mut()) } } @@ -69,8 +69,8 @@ impl NotifyTarget for RemoteConfigNotifyTarget { } #[cfg(windows)] + #[allow(clippy::missing_transmute_annotations)] fn notify(&self) { - // TODO: CreateRemoteThread -> ddtrace_set_all_thread_vm_interrupt unsafe { let dummy = 0; kernel32::CreateRemoteThread( diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index d30064a17..82043bb42 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -144,7 +144,7 @@ fn store_shm( let mut target_slice = handle.as_slice_mut(); #[cfg(windows)] { - target_slice.write(&(file.len() as u32).to_ne_bytes())?; + target_slice.write_all(&(file.len() as u32).to_ne_bytes())?; } target_slice.copy_from_slice(file.as_slice()); @@ -465,7 +465,7 @@ impl RemoteConfigManager { #[cfg(test)] mod tests { use super::*; - use datadog_dynamic_configuration::data::{tests::dummy_dynamic_config, Configs}; + use datadog_dynamic_configuration::{data::tests::dummy_dynamic_config, Configs}; use datadog_remote_config::fetch::test_server::RemoteConfigServer; use datadog_remote_config::{RemoteConfigData, RemoteConfigProduct, RemoteConfigSource}; use lazy_static::lazy_static; diff --git a/trace-protobuf/src/lib.rs b/trace-protobuf/src/lib.rs index c393294d3..6b8e63f1e 100644 --- a/trace-protobuf/src/lib.rs +++ b/trace-protobuf/src/lib.rs @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 #[rustfmt::skip] -mod serde; pub mod pb; pub mod remoteconfig; +mod serde; #[cfg(test)] mod pb_test; diff --git a/trace-protobuf/src/pb.rs b/trace-protobuf/src/pb.rs index a2c5f71c9..422a9d7a5 100644 --- a/trace-protobuf/src/pb.rs +++ b/trace-protobuf/src/pb.rs @@ -39,8 +39,10 @@ pub struct SpanLink { /// /// Optional. Simple mapping of keys to string values. #[prost(map = "string, string", tag = "4")] - pub attributes: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub attributes: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// @gotags: msg:"tracestate,omitempty" /// /// Optional. W3C tracestate. @@ -68,8 +70,8 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub name: ::prost::alloc::string::String, - /// resource is the resource name of this span, also sometimes called the endpoint (for web - /// spans). @gotags: json:"resource" msg:"resource" + /// resource is the resource name of this span, also sometimes called the endpoint (for web spans). + /// @gotags: json:"resource" msg:"resource" #[prost(string, tag = "3")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -116,16 +118,18 @@ pub struct Span { #[prost(map = "string, string", tag = "10")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] - pub meta: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub meta: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// metrics is a mapping from tag name to tag value for numeric-valued tags. /// @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" #[prost(map = "string, double", tag = "11")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] pub metrics: ::std::collections::HashMap<::prost::alloc::string::String, f64>, - /// type is the type of the service with which this span is associated. Example values: web, - /// db, lambda. @gotags: json:"type" msg:"type" + /// type is the type of the service with which this span is associated. Example values: web, db, lambda. + /// @gotags: json:"type" msg:"type" #[prost(string, tag = "12")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] @@ -136,18 +140,19 @@ pub struct Span { #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::std::collections::HashMap::is_empty")] - pub meta_struct: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec>, - /// span_links represents a collection of links, where each link defines a causal relationship - /// between two spans. @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" + pub meta_struct: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::vec::Vec, + >, + /// span_links represents a collection of links, where each link defines a causal relationship between two spans. + /// @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" #[prost(message, repeated, tag = "14")] #[serde(default)] #[serde(deserialize_with = "deserialize_null_into_default")] #[serde(skip_serializing_if = "::prost::alloc::vec::Vec::is_empty")] pub span_links: ::prost::alloc::vec::Vec, } -/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a -/// trace. +/// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. #[derive(Deserialize, Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -167,8 +172,10 @@ pub struct TraceChunk { /// tags specifies tags common in all `spans`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "4")] - pub tags: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// droppedTrace specifies whether the trace was dropped by samplers or not. /// @gotags: json:"dropped_trace" msg:"dropped_trace" #[prost(bool, tag = "5")] @@ -206,8 +213,10 @@ pub struct TracerPayload { /// tags specifies tags common in all `chunks`. /// @gotags: json:"tags" msg:"tags" #[prost(map = "string, string", tag = "7")] - pub tags: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// env specifies `env` tag that set with the tracer. /// @gotags: json:"env" msg:"env" #[prost(string, tag = "8")] @@ -236,8 +245,10 @@ pub struct AgentPayload { pub tracer_payloads: ::prost::alloc::vec::Vec, /// tags specifies tags common in all `tracerPayloads`. #[prost(map = "string, string", tag = "6")] - pub tags: - ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, /// agentVersion specifies version of the agent. #[prost(string, tag = "7")] pub agent_version: ::prost::alloc::string::String, @@ -268,9 +279,8 @@ pub struct StatsPayload { pub agent_version: ::prost::alloc::string::String, #[prost(bool, tag = "5")] pub client_computed: bool, - /// splitPayload indicates if the payload is actually one of several payloads split out from a - /// larger payload. This field can be used in the backend to signal if re-aggregation is - /// necessary. + /// splitPayload indicates if the payload is actually one of several payloads split out from a larger payload. + /// This field can be used in the backend to signal if re-aggregation is necessary. #[prost(bool, tag = "6")] pub split_payload: bool, } @@ -315,8 +325,8 @@ pub struct ClientStatsPayload { #[prost(uint64, tag = "8")] #[serde(default)] pub sequence: u64, - /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation - /// layer characterizes counts only and distributions only payloads + /// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer + /// characterizes counts only and distributions only payloads #[prost(string, tag = "9")] #[serde(default)] pub agent_aggregation: ::prost::alloc::string::String, @@ -325,20 +335,18 @@ pub struct ClientStatsPayload { #[prost(string, tag = "10")] #[serde(default)] pub service: ::prost::alloc::string::String, - /// ContainerID specifies the origin container ID. It is meant to be populated by the client - /// and may be enhanced by the agent to ensure it is unique. + /// ContainerID specifies the origin container ID. It is meant to be populated by the client and may + /// be enhanced by the agent to ensure it is unique. #[prost(string, tag = "11")] #[serde(default)] #[serde(rename = "ContainerID")] pub container_id: ::prost::alloc::string::String, - /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the - /// specified containerID. This field should be left empty by the client. It only applies - /// to some specific environment. + /// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. + /// This field should be left empty by the client. It only applies to some specific environment. #[prost(string, repeated, tag = "12")] #[serde(default)] pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> - /// source code integration. + /// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration. #[prost(string, tag = "13")] #[serde(default)] pub git_commit_sha: ::prost::alloc::string::String, @@ -368,8 +376,7 @@ pub struct ClientStatsBucket { #[serde(default)] pub agent_time_shift: i64, } -/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, -/// type +/// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type #[derive(Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] #[allow(clippy::derive_partial_eq_without_eq)] @@ -420,8 +427,7 @@ pub struct ClientGroupedStats { #[serde(default)] pub span_kind: ::prost::alloc::string::String, /// peer_tags are supplementary tags that further describe a peer entity - /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the - /// name of peer DB + /// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB #[prost(string, repeated, tag = "16")] #[serde(default)] pub peer_tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, @@ -430,8 +436,7 @@ pub struct ClientGroupedStats { #[serde(default)] pub is_trace_root: i32, } -/// Trilean is an expanded boolean type that is meant to differentiate between being unset and -/// false. +/// Trilean is an expanded boolean type that is meant to differentiate between being unset and false. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Trilean { diff --git a/trace-protobuf/src/serde.rs b/trace-protobuf/src/serde.rs index cc0c36f90..b9dc683b0 100644 --- a/trace-protobuf/src/serde.rs +++ b/trace-protobuf/src/serde.rs @@ -6,52 +6,56 @@ use serde_bytes::ByteBuf; pub trait Deserialize<'de>: Sized { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>; + where + D: Deserializer<'de>; } impl<'de> Deserialize<'de> for Vec> { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { - Deserialize::deserialize(deserializer).map(|v: Vec| v.into_iter().map(ByteBuf::into_vec).collect()) + Deserialize::deserialize(deserializer) + .map(|v: Vec| v.into_iter().map(ByteBuf::into_vec).collect()) } } impl<'de> Deserialize<'de> for Vec { fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, + where + D: Deserializer<'de>, { serde::Deserialize::deserialize(deserializer) } } pub fn deserialize<'de, T, D>(deserializer: D) -> Result - where - T: Deserialize<'de>, - D: Deserializer<'de>, +where + T: Deserialize<'de>, + D: Deserializer<'de>, { Deserialize::deserialize(deserializer) } pub trait Serialize: Sized { fn serialize(&self, serializer: S) -> Result - where - S: Serializer; + where + S: Serializer; } impl Serialize for &Vec> { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.collect_seq(self.iter()) } } pub fn serialize(value: T, serializer: S) -> Result - where - T: Serialize, - S: Serializer, +where + T: Serialize, + S: Serializer, { value.serialize(serializer) } From e5a4ec8e2471ed49c8877db455a0941777c6d49a Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 10 Jul 2024 21:45:18 +0200 Subject: [PATCH 17/26] Fix file expiration logic Signed-off-by: Bob Weinand --- remote-config/src/fetch/fetcher.rs | 47 +++++++++++++++++---- remote-config/src/fetch/shared.rs | 65 +++++++++++++++++------------- 2 files changed, 77 insertions(+), 35 deletions(-) diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index e8d470e3a..d4d937b78 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -12,10 +12,11 @@ use ddcommon::{connector, Endpoint}; use hyper::http::uri::{PathAndQuery, Scheme}; use hyper::{Client, StatusCode}; use sha2::{Digest, Sha256, Sha512}; +use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; -use tracing::{debug, trace, warn}; +use tracing::{debug, error, trace, warn}; const PROD_INTAKE_SUBDOMAIN: &str = "config"; @@ -58,6 +59,7 @@ struct StoredTargetFile { handle: Arc, state: ConfigState, meta: TargetFileMeta, + expiring: bool, } pub struct ConfigFetcherState { @@ -72,8 +74,21 @@ pub struct ConfigFetcherFilesLock<'a, S> { } impl<'a, S> ConfigFetcherFilesLock<'a, S> { + /// Actually remove the file from the known files. + /// It may only be expired if already marked as expiring. pub fn expire_file(&mut self, path: &RemoteConfigPath) { - self.inner.remove(&path.to_string()); + if let Entry::Occupied(entry) = self.inner.entry(path.to_string()) { + if entry.get().expiring { + entry.remove(); + } + } + } + + /// Stop advertising the file as known. It's the predecessor to expire_file(). + pub fn mark_expiring(&mut self, path: &RemoteConfigPath) { + if let Some(target_file) = self.inner.get_mut(&path.to_string()) { + target_file.expiring = true; + } } } @@ -113,6 +128,7 @@ pub struct ConfigFetcher { #[derive(Default)] pub struct OpaqueState { client_state: Vec, + last_response: Option, } impl ConfigFetcher { @@ -157,11 +173,21 @@ impl ConfigFetcher { let mut cached_target_files = vec![]; let mut config_states = vec![]; - for StoredTargetFile { state, meta, .. } in - self.state.target_files_by_path.lock().unwrap().values() { - config_states.push(state.clone()); - cached_target_files.push(meta.clone()); + let target_files = self.state.target_files_by_path.lock().unwrap(); + for StoredTargetFile { meta, expiring, .. } in target_files.values() { + if !expiring { + cached_target_files.push(meta.clone()); + } + } + + if let Some(ref response) = opaque_state.last_response { + for config in response.client_configs.iter() { + if let Some(StoredTargetFile { state, .. }) = target_files.get(config) { + config_states.push(state.clone()); + } + } + } } let config_req = ClientGetConfigsRequest { @@ -362,6 +388,7 @@ impl ConfigFetcher { decoded, )? }, + expiring: false, }, ); } else { @@ -388,11 +415,15 @@ impl ConfigFetcher { let mut configs = Vec::with_capacity(response.client_configs.len()); for config in response.client_configs.iter() { - if let Some(StoredTargetFile { handle, .. }) = target_files.get(config) { - configs.push(handle.clone()); + if let Some(target_file) = target_files.get_mut(config) { + target_file.expiring = false; + configs.push(target_file.handle.clone()); + } else { + error!("Found {config} in client_configs response, but it isn't stored. Skipping."); } } + opaque_state.last_response = Some(response); Ok(Some(configs)) } } diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index a4de6e65b..100979e50 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -73,11 +73,11 @@ pub trait RefcountedFile { self.refcount().rc.store(val, Ordering::SeqCst) } - fn set_dropped_run_id(&self, val: u64) { + fn set_expiring_run_id(&self, val: u64) { self.refcount().dropped_run_id.store(val, Ordering::SeqCst) } - fn get_dropped_run_id(&self) -> u64 { + fn get_expiring_run_id(&self) -> u64 { self.refcount().dropped_run_id.load(Ordering::Relaxed) } } @@ -169,13 +169,14 @@ where if file.refcount().rc.load(Ordering::Relaxed) != 0 { return; // Don't do anything if refcount was increased while acquiring the lock } - expire_lock.expire_file(&file.refcount().path); - drop(expire_lock); // early release + expire_lock.mark_expiring(&file.refcount().path); let (runners, run_id) = self.run_id.runners_and_run_id(); if runners > 0 { file.setref(runners); - file.set_dropped_run_id(run_id); + file.set_expiring_run_id(run_id); inactive.insert(file.refcount().path.clone(), file); + } else { + expire_lock.expire_file(&file.refcount().path); } } @@ -196,18 +197,7 @@ where path: RemoteConfigPath, contents: Vec, ) -> anyhow::Result> { - let mut inactive = self.inactive.lock().unwrap(); - if let Some(existing) = inactive.remove(&path) { - if version <= existing.refcount().version { - existing.set_dropped_run_id(0); - existing.setref(0); - } else { - self.storage.update(&existing, version, contents)?; - } - Ok(existing) - } else { - self.storage.store(version, path, contents) - } + self.storage.store(version, path, contents) } fn update( @@ -268,24 +258,40 @@ impl SharedFetcher { ) .await; - let last_run_id = fetcher.file_storage.run_id.dec_runners(); - fetcher - .file_storage - .inactive - .lock() - .unwrap() - .retain(|_, v| { - (first_run_id..last_run_id).contains(&v.get_dropped_run_id()) && v.delref() == 1 + let clean_inactive = || { + let run_range = first_run_id..=fetcher.file_storage.run_id.dec_runners(); + let mut inactive = fetcher.file_storage.inactive.lock().unwrap(); + inactive.retain(|_, v| { + if run_range.contains(&v.get_expiring_run_id()) && v.delref() == 1 { + fetcher + .file_storage + .state + .files_lock() + .expire_file(&v.refcount().path); + false + } else { + true + } }); + }; match fetched { - Ok(None) => { /* unchanged */ } + Ok(None) => clean_inactive(), // nothing changed Ok(Some(files)) => { if !files.is_empty() || !last_files.is_empty() { for file in files.iter() { + if file.get_expiring_run_id() != 0 { + let mut inactive = fetcher.file_storage.inactive.lock().unwrap(); + if inactive.remove(&file.refcount().path).is_some() { + file.setref(0); + file.set_expiring_run_id(0); + } + } file.incref(); } + clean_inactive(); + for file in last_files { if file.delref() == 1 { fetcher.file_storage.expire_file(file); @@ -295,9 +301,14 @@ impl SharedFetcher { last_files = files; last_error = on_fetch(&last_files); + } else { + clean_inactive(); } } - Err(e) => error!("{:?}", e), + Err(e) => { + clean_inactive(); + error!("{:?}", e); + } } select! { From 59fd7d55e05ad6ea20cf8dd994d227728f1a3fb4 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Thu, 11 Jul 2024 19:33:49 +0200 Subject: [PATCH 18/26] Set version on remote config Signed-off-by: Bob Weinand --- remote-config/src/fetch/fetcher.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index d74bbd841..dcc47b3b5 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -129,6 +129,7 @@ pub struct ConfigFetcher { pub struct OpaqueState { client_state: Vec, last_configs: Vec, + targets_version: u64, } impl ConfigFetcher { @@ -192,7 +193,7 @@ impl ConfigFetcher { client: Some(datadog_trace_protobuf::remoteconfig::Client { state: Some(ClientState { root_version: 1, - targets_version: 0, + targets_version: opaque_state.targets_version, config_states, has_error: last_error.is_some(), error: last_error.unwrap_or_default(), @@ -361,7 +362,7 @@ impl ConfigFetcher { id: parsed_path.config_id.to_string(), version, product: parsed_path.product.to_string(), - apply_state: 0, + apply_state: 2, // Acknowledged apply_error: "".to_string(), }, meta: TargetFileMeta { @@ -421,6 +422,7 @@ impl ConfigFetcher { } } + opaque_state.targets_version = targets_list.signed.version as u64; opaque_state.last_configs = response.client_configs; Ok(Some(configs)) } From 5af589c63993a9ad9a03f14dbf30fc731b3d55d8 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 12 Jul 2024 13:16:31 +0200 Subject: [PATCH 19/26] Increase log-level of remote config received message Signed-off-by: Bob Weinand --- remote-config/src/fetch/fetcher.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index dcc47b3b5..120bccd51 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -231,6 +231,8 @@ impl ConfigFetcher { }), cached_target_files, }; + + trace!("Submitting remote config request: {config_req:?}"); let req = self .state @@ -290,7 +292,7 @@ impl ConfigFetcher { self.interval.store(interval, Ordering::Relaxed); } - trace!( + debug!( "Received remote config of length {}, containing {:?} paths for target {:?}", body_bytes.len(), targets_list.signed.targets.keys().collect::>(), From 1f501005889f28d336d97304e9be7d32099c345c Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 17 Jul 2024 18:35:13 +0200 Subject: [PATCH 20/26] Allow setting config apply state explicitly And avoid computing the RemoteConfigPath string with every HashMap operation, but do some rust magic so that it will consider owned RemoteConfigPaths and unowned RemoteConfigPathRefs equivalent. Signed-off-by: Bob Weinand --- remote-config/src/fetch/fetcher.rs | 201 ++++++++++++-------- remote-config/src/fetch/multitarget.rs | 11 +- remote-config/src/fetch/shared.rs | 24 ++- remote-config/src/fetch/single.rs | 16 +- remote-config/src/file_storage.rs | 4 +- remote-config/src/lib.rs | 2 + remote-config/src/parse.rs | 87 +-------- remote-config/src/path.rs | 243 +++++++++++++++++++++++++ sidecar/src/shm_remote_config.rs | 4 +- 9 files changed, 417 insertions(+), 175 deletions(-) create mode 100644 remote-config/src/path.rs diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 120bccd51..b99822169 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -2,7 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::targets::TargetsList; -use crate::{RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigProduct, Target}; +use crate::{ + RemoteConfigCapabilities, RemoteConfigPath, RemoteConfigPathRef, RemoteConfigPathType, + RemoteConfigProduct, Target, +}; use base64::Engine; use datadog_trace_protobuf::remoteconfig::{ ClientGetConfigsRequest, ClientGetConfigsResponse, ClientState, ClientTracer, ConfigState, @@ -12,8 +15,8 @@ use ddcommon::{connector, Endpoint}; use hyper::http::uri::{PathAndQuery, Scheme}; use hyper::{Client, StatusCode}; use sha2::{Digest, Sha256, Sha512}; -use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::mem::transmute; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; use tracing::{debug, error, trace, warn}; @@ -31,7 +34,7 @@ pub trait FileStorage { fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, contents: Vec, ) -> anyhow::Result>; @@ -62,31 +65,40 @@ struct StoredTargetFile { expiring: bool, } +pub enum ConfigApplyState { + Unacknowledged, + Acknowledged, + Error(String), +} + pub struct ConfigFetcherState { - target_files_by_path: Mutex>>, + target_files_by_path: Mutex, StoredTargetFile>>, pub invariants: ConfigInvariants, endpoint: Endpoint, pub expire_unused_files: bool, } pub struct ConfigFetcherFilesLock<'a, S> { - inner: MutexGuard<'a, HashMap>>, + inner: MutexGuard<'a, HashMap, StoredTargetFile>>, } impl<'a, S> ConfigFetcherFilesLock<'a, S> { /// Actually remove the file from the known files. /// It may only be expired if already marked as expiring. pub fn expire_file(&mut self, path: &RemoteConfigPath) { - if let Entry::Occupied(entry) = self.inner.entry(path.to_string()) { - if entry.get().expiring { - entry.remove(); + if let Some(target_file) = self.inner.get(path) { + if !target_file.expiring { + return; } + } else { + return; } + self.inner.remove(path); } /// Stop advertising the file as known. It's the predecessor to expire_file(). pub fn mark_expiring(&mut self, path: &RemoteConfigPath) { - if let Some(target_file) = self.inner.get_mut(&path.to_string()) { + if let Some(target_file) = self.inner.get_mut(path) { target_file.expiring = true; } } @@ -113,6 +125,26 @@ impl ConfigFetcherState { inner: self.target_files_by_path.lock().unwrap(), } } + + /// Sets the apply state on a stored file. + pub fn set_config_state(&self, file: &RemoteConfigPath, state: ConfigApplyState) { + if let Some(target_file) = self.target_files_by_path.lock().unwrap().get_mut(file) { + match state { + ConfigApplyState::Unacknowledged => { + target_file.state.apply_state = 1; + target_file.state.apply_error = "".to_string(); + } + ConfigApplyState::Acknowledged => { + target_file.state.apply_state = 1; + target_file.state.apply_error = "".to_string(); + } + ConfigApplyState::Error(error) => { + target_file.state.apply_state = 1; + target_file.state.apply_error = error; + } + } + } + } } pub struct ConfigFetcher { @@ -129,6 +161,8 @@ pub struct ConfigFetcher { pub struct OpaqueState { client_state: Vec, last_configs: Vec, + // 'static because it actually depends on last_configs, and rust doesn't like self-referencing + last_config_paths: HashSet>, targets_version: u64, } @@ -142,6 +176,11 @@ impl ConfigFetcher { } } + /// Sets the apply state on a stored file. + pub fn set_config_state(&self, file: &RemoteConfigPath, state: ConfigApplyState) { + self.state.set_config_state(file, state) + } + /// Quite generic fetching implementation: /// - runs a request against the Remote Config Server, /// - validates the data, @@ -182,8 +221,10 @@ impl ConfigFetcher { } } - for config in opaque_state.last_configs.iter() { - if let Some(StoredTargetFile { state, .. }) = target_files.get(config) { + for config in opaque_state.last_config_paths.iter() { + if let Some(StoredTargetFile { state, .. }) = + target_files.get(config as &dyn RemoteConfigPathType) + { config_states.push(state.clone()); } } @@ -231,7 +272,7 @@ impl ConfigFetcher { }), cached_target_files, }; - + trace!("Submitting remote config request: {config_req:?}"); let req = self @@ -310,9 +351,22 @@ impl ConfigFetcher { // continuously let mut target_files = self.state.target_files_by_path.lock().unwrap(); + let mut config_paths: HashSet> = HashSet::new(); + for path in response.client_configs.iter() { + match RemoteConfigPath::try_parse(path) { + // SAFTEY: The lifetime of RemoteConfigPathRef is tied to the config_paths + // Vec + Ok(parsed) => { + config_paths.insert(unsafe { + transmute::, RemoteConfigPathRef<'_>>(parsed) + }); + } + Err(e) => warn!("Failed parsing remote config path: {path} - {e:?}"), + } + } + if self.state.expire_unused_files { - let retain: HashSet<_> = response.client_configs.iter().collect(); - target_files.retain(|k, _| retain.contains(k)); + target_files.retain(|k, _| config_paths.contains(&(&**k).into())); } for (path, target_file) in targets_list.signed.targets { @@ -330,11 +384,18 @@ impl ConfigFetcher { warn!("Found a target file without hashes at path {path}"); continue; }; + let parsed_path = match RemoteConfigPath::try_parse(path) { + Ok(parsed_path) => parsed_path, + Err(e) => { + warn!("Failed parsing remote config path: {path} - {e:?}"); + continue; + } + }; let handle = if let Some(StoredTargetFile { hash: old_hash, handle, .. - }) = target_files.get(path) + }) = target_files.get(&parsed_path as &dyn RemoteConfigPathType) { if old_hash == hash { continue; @@ -350,55 +411,46 @@ impl ConfigFetcher { warn!("Computed hash of file {computed_hash} did not match remote config targets file hash {hash} for path {path}: file: {}", String::from_utf8_lossy(decoded.as_slice())); continue; } - - match RemoteConfigPath::try_parse(path) { - Ok(parsed_path) => { - if let Some(version) = target_file.try_parse_version() { - debug!("Fetched new remote config file at path {path} targeting {target:?}"); - - target_files.insert( - path.to_string(), - StoredTargetFile { - hash: computed_hash, - state: ConfigState { - id: parsed_path.config_id.to_string(), - version, - product: parsed_path.product.to_string(), - apply_state: 2, // Acknowledged - apply_error: "".to_string(), - }, - meta: TargetFileMeta { - path: path.to_string(), - length: decoded.len() as i64, - hashes: target_file - .hashes - .iter() - .map(|(algorithm, hash)| TargetFileHash { - algorithm: algorithm.to_string(), - hash: hash.to_string(), - }) - .collect(), - }, - handle: if let Some(handle) = handle { - self.file_storage.update(&handle, version, decoded)?; - handle - } else { - self.file_storage.store( - version, - parsed_path, - decoded, - )? - }, - expiring: false, - }, - ); - } else { - warn!("Failed parsing version from remote config path {path}"); - } - } - Err(e) => { - warn!("Failed parsing remote config path: {path} - {e:?}"); - } + if let Some(version) = target_file.try_parse_version() { + debug!( + "Fetched new remote config file at path {path} targeting {target:?}" + ); + + let parsed_path: Arc = Arc::new(parsed_path.into()); + target_files.insert( + parsed_path.clone(), + StoredTargetFile { + hash: computed_hash, + state: ConfigState { + id: parsed_path.config_id.to_string(), + version, + product: parsed_path.product.to_string(), + apply_state: 2, // Acknowledged + apply_error: "".to_string(), + }, + meta: TargetFileMeta { + path: path.to_string(), + length: decoded.len() as i64, + hashes: target_file + .hashes + .iter() + .map(|(algorithm, hash)| TargetFileHash { + algorithm: algorithm.to_string(), + hash: hash.to_string(), + }) + .collect(), + }, + handle: if let Some(handle) = handle { + self.file_storage.update(&handle, version, decoded)?; + handle + } else { + self.file_storage.store(version, parsed_path, decoded)? + }, + expiring: false, + }, + ); + } else { + warn!("Failed parsing version from remote config path {path}"); } } else { warn!( @@ -414,9 +466,9 @@ impl ConfigFetcher { } } - let mut configs = Vec::with_capacity(response.client_configs.len()); - for config in response.client_configs.iter() { - if let Some(target_file) = target_files.get_mut(config) { + let mut configs = Vec::with_capacity(config_paths.len()); + for config in config_paths.iter() { + if let Some(target_file) = target_files.get_mut(config as &dyn RemoteConfigPathType) { target_file.expiring = false; configs.push(target_file.handle.clone()); } else { @@ -426,6 +478,7 @@ impl ConfigFetcher { opaque_state.targets_version = targets_list.signed.version as u64; opaque_state.last_configs = response.client_configs; + opaque_state.last_config_paths = config_paths; Ok(Some(configs)) } } @@ -484,11 +537,11 @@ pub mod tests { #[derive(Default)] pub struct Storage { - pub files: Mutex>>>, + pub files: Mutex, Arc>>>, } pub struct PathStore { - path: RemoteConfigPath, + path: Arc, storage: Arc, pub data: Arc>, } @@ -511,7 +564,7 @@ pub mod tests { fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, contents: Vec, ) -> anyhow::Result> { let data = Arc::new(Mutex::new(DataStore { @@ -653,7 +706,7 @@ pub mod tests { assert!(Arc::ptr_eq( &fetched[0].data, - storage.files.lock().unwrap().get(&PATH_FIRST).unwrap() + storage.files.lock().unwrap().get(&*PATH_FIRST).unwrap() )); assert_eq!(fetched[0].data.lock().unwrap().contents, "v1"); assert_eq!(fetched[0].data.lock().unwrap().version, 1); @@ -729,14 +782,14 @@ pub mod tests { assert!(Arc::ptr_eq( &fetched[first].data, - storage.files.lock().unwrap().get(&PATH_FIRST).unwrap() + storage.files.lock().unwrap().get(&*PATH_FIRST).unwrap() )); assert_eq!(fetched[first].data.lock().unwrap().contents, "v2"); assert_eq!(fetched[first].data.lock().unwrap().version, 2); assert!(Arc::ptr_eq( &fetched[second].data, - storage.files.lock().unwrap().get(&PATH_SECOND).unwrap() + storage.files.lock().unwrap().get(&*PATH_SECOND).unwrap() )); assert_eq!(fetched[second].data.lock().unwrap().contents, "X"); assert_eq!(fetched[second].data.lock().unwrap().version, 1); @@ -756,7 +809,7 @@ pub mod tests { assert!(fetched.is_none()); // no change } - server.files.lock().unwrap().remove(&PATH_FIRST); + server.files.lock().unwrap().remove(&*PATH_FIRST); { let fetched = fetcher diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index 59b27e725..7486755d8 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::fetch::{ - ConfigFetcherState, ConfigInvariants, FileStorage, RefcountedFile, RefcountingStorage, - SharedFetcher, + ConfigApplyState, ConfigFetcherState, ConfigInvariants, FileStorage, RefcountedFile, + RefcountingStorage, SharedFetcher, }; use crate::Target; use futures_util::future::Shared; @@ -334,6 +334,11 @@ where Self::remove_target(self, runtime_id, target); } + /// Sets the apply state on a stored file. + pub fn set_config_state(&self, file: &S::StoredFile, state: ConfigApplyState) { + self.storage.set_config_state(file, state) + } + fn start_fetcher(self: &Arc, known_target: &KnownTarget) { let this = self.clone(); let fetcher = known_target.fetcher.clone(); @@ -563,7 +568,7 @@ mod tests { fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, contents: Vec, ) -> anyhow::Result> { self.rc.store(version, path, contents) diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 100979e50..76f390dd1 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -1,7 +1,9 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; +use crate::fetch::{ + ConfigApplyState, ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState, +}; use crate::{RemoteConfigPath, Target}; use std::collections::HashMap; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; @@ -43,12 +45,12 @@ pub struct FileRefcountData { rc: AtomicU32, /// 0, or point in time (see RunnersGeneration) where the file was moved to inactive. dropped_run_id: AtomicU64, - pub path: RemoteConfigPath, + pub path: Arc, pub version: u64, } impl FileRefcountData { - pub fn new(version: u64, path: RemoteConfigPath) -> Self { + pub fn new(version: u64, path: Arc) -> Self { FileRefcountData { rc: AtomicU32::new(0), dropped_run_id: AtomicU64::new(0), @@ -130,7 +132,8 @@ where /// the remote config server that we know about these files. Thus, as long as these requests /// are being processed, we must retain the files, as these would not be resent, leaving us /// with a potentially incomplete configuration. - inactive: Arc>>>, + #[allow(clippy::type_complexity)] + inactive: Arc, Arc>>>, /// times ConfigFetcher::::fetch_once() is currently being run run_id: Arc, } @@ -180,6 +183,11 @@ where } } + /// Sets the apply state on a stored file. + pub fn set_config_state(&self, file: &S::StoredFile, state: ConfigApplyState) { + self.state.set_config_state(&file.refcount().path, state) + } + pub fn invariants(&self) -> &ConfigInvariants { &self.state.invariants } @@ -194,7 +202,7 @@ where fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, contents: Vec, ) -> anyhow::Result> { self.storage.store(version, path, contents) @@ -376,7 +384,7 @@ pub mod tests { fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, contents: Vec, ) -> anyhow::Result> { Ok(Arc::new(RcPathStore { @@ -441,7 +449,7 @@ pub mod tests { let state = client.state.as_ref().unwrap(); assert_eq!(state.error, "error"); - server.files.lock().unwrap().remove(&PATH_SECOND); + server.files.lock().unwrap().remove(&*PATH_SECOND); None } @@ -517,7 +525,7 @@ pub mod tests { PATH_FIRST.clone(), (vec![DUMMY_TARGET.clone()], 2, "v2".to_string()), ); - server_2.files.lock().unwrap().remove(&PATH_SECOND); + server_2.files.lock().unwrap().remove(&*PATH_SECOND); }; let server_second_2 = server_second_1.clone(); diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index bcf0c5344..be711bfa1 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -1,9 +1,11 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 -use crate::fetch::{ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState}; +use crate::fetch::{ + ConfigApplyState, ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState, +}; use crate::file_change_tracker::{Change, ChangeTracker, FilePath, UpdatedFiles}; -use crate::Target; +use crate::{RemoteConfigPath, Target}; use std::sync::atomic::Ordering; use std::sync::Arc; @@ -66,6 +68,11 @@ impl SingleFetcher { pub fn get_config_id(&self) -> &String { &self.config_id } + + /// Sets the apply state on a stored file. + pub fn set_config_state(&self, file: &RemoteConfigPath, state: ConfigApplyState) { + self.fetcher.set_config_state(file, state) + } } pub struct SingleChangesFetcher @@ -124,4 +131,9 @@ where pub fn get_config_id(&self) -> &String { self.fetcher.get_config_id() } + + /// Sets the apply state on a stored file. + pub fn set_config_state(&self, file: &S::StoredFile, state: ConfigApplyState) { + self.fetcher.set_config_state(file.path(), state) + } } diff --git a/remote-config/src/file_storage.rs b/remote-config/src/file_storage.rs index 62f01e283..9060f3e13 100644 --- a/remote-config/src/file_storage.rs +++ b/remote-config/src/file_storage.rs @@ -41,7 +41,7 @@ struct RawFileData

{ /// File contents and file metadata pub struct RawFile

{ - path: RemoteConfigPath, + path: Arc, data: Mutex>, } @@ -78,7 +78,7 @@ impl FileStorage for RawFileStorage

{ fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, contents: Vec, ) -> anyhow::Result> { Ok(Arc::new(RawFile { diff --git a/remote-config/src/lib.rs b/remote-config/src/lib.rs index c3f0c7960..4818fcad4 100644 --- a/remote-config/src/lib.rs +++ b/remote-config/src/lib.rs @@ -5,9 +5,11 @@ pub mod fetch; pub mod file_change_tracker; pub mod file_storage; mod parse; +mod path; mod targets; pub use parse::*; +pub use path::*; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize, Clone, Hash, Ord, PartialOrd, Eq, PartialEq)] diff --git a/remote-config/src/parse.rs b/remote-config/src/parse.rs index cdf1122e7..37cbd4063 100644 --- a/remote-config/src/parse.rs +++ b/remote-config/src/parse.rs @@ -1,89 +1,8 @@ // Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ // SPDX-License-Identifier: Apache-2.0 +use crate::{RemoteConfigPath, RemoteConfigProduct, RemoteConfigSource}; use datadog_dynamic_configuration::data::DynamicConfigFile; -use serde::{Deserialize, Serialize}; -use std::fmt::Display; - -#[derive(Debug, Clone, Eq, Hash, PartialEq)] -pub enum RemoteConfigSource { - Datadog(u64 /* org_id */), - Employee, -} - -#[repr(C)] -#[derive(Debug, Copy, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] -pub enum RemoteConfigProduct { - ApmTracing, - LiveDebugger, -} - -impl Display for RemoteConfigProduct { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let str = match self { - RemoteConfigProduct::ApmTracing => "APM_TRACING", - RemoteConfigProduct::LiveDebugger => "LIVE_DEBUGGING", - }; - write!(f, "{}", str) - } -} - -#[derive(Debug, Clone, Eq, Hash, PartialEq)] -pub struct RemoteConfigPath { - pub source: RemoteConfigSource, - pub product: RemoteConfigProduct, - pub config_id: String, - pub name: String, -} - -impl RemoteConfigPath { - pub fn try_parse(path: &str) -> anyhow::Result { - let parts: Vec<_> = path.split('/').collect(); - Ok(RemoteConfigPath { - source: match parts[0] { - "datadog" => { - if parts.len() != 5 { - anyhow::bail!("{} is datadog and does not have exactly 5 parts", path); - } - RemoteConfigSource::Datadog(parts[1].parse()?) - } - "employee" => { - if parts.len() != 4 { - anyhow::bail!("{} is employee and does not have exactly 4 parts", path); - } - RemoteConfigSource::Employee - } - source => anyhow::bail!("Unknown source {}", source), - }, - product: match parts[parts.len() - 3] { - "APM_TRACING" => RemoteConfigProduct::ApmTracing, - "LIVE_DEBUGGING" => RemoteConfigProduct::LiveDebugger, - product => anyhow::bail!("Unknown product {}", product), - }, - config_id: parts[parts.len() - 2].to_string(), - name: parts[parts.len() - 1].to_string(), - }) - } -} - -impl Display for RemoteConfigPath { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.source { - RemoteConfigSource::Datadog(id) => write!( - f, - "datadog/{}/{}/{}/{}", - id, self.product, self.config_id, self.name - ), - RemoteConfigSource::Employee => { - write!( - f, - "employee/{}/{}/{}", - self.product, self.config_id, self.name - ) - } - } - } -} #[derive(Debug)] pub enum RemoteConfigData { @@ -131,8 +50,8 @@ impl RemoteConfigValue { Ok(RemoteConfigValue { source: path.source, data, - config_id: path.config_id, - name: path.name, + config_id: path.config_id.to_string(), + name: path.name.to_string(), }) } } diff --git a/remote-config/src/path.rs b/remote-config/src/path.rs new file mode 100644 index 000000000..4c1eb2f11 --- /dev/null +++ b/remote-config/src/path.rs @@ -0,0 +1,243 @@ +// Copyright 2021-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; +use std::fmt::{Display, Formatter}; +use std::hash::{Hash, Hasher}; +use std::sync::Arc; + +#[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] +pub enum RemoteConfigSource { + Datadog(u64 /* org_id */), + Employee, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] +pub enum RemoteConfigProduct { + ApmTracing, + LiveDebugger, +} + +impl Display for RemoteConfigProduct { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let str = match self { + RemoteConfigProduct::ApmTracing => "APM_TRACING", + RemoteConfigProduct::LiveDebugger => "LIVE_DEBUGGING", + }; + write!(f, "{}", str) + } +} + +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +pub struct RemoteConfigPath { + pub source: RemoteConfigSource, + pub product: RemoteConfigProduct, + pub config_id: String, + pub name: String, +} + +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +pub struct RemoteConfigPathRef<'a> { + pub source: RemoteConfigSource, + pub product: RemoteConfigProduct, + pub config_id: &'a str, + pub name: &'a str, +} + +impl RemoteConfigPath { + pub fn try_parse(path: &str) -> anyhow::Result { + let parts: Vec<_> = path.split('/').collect(); + Ok(RemoteConfigPathRef { + source: match parts[0] { + "datadog" => { + if parts.len() != 5 { + anyhow::bail!("{} is datadog and does not have exactly 5 parts", path); + } + RemoteConfigSource::Datadog(parts[1].parse()?) + } + "employee" => { + if parts.len() != 4 { + anyhow::bail!("{} is employee and does not have exactly 4 parts", path); + } + RemoteConfigSource::Employee + } + source => anyhow::bail!("Unknown source {}", source), + }, + product: match parts[parts.len() - 3] { + "APM_TRACING" => RemoteConfigProduct::ApmTracing, + "LIVE_DEBUGGING" => RemoteConfigProduct::LiveDebugger, + product => anyhow::bail!("Unknown product {}", product), + }, + config_id: parts[parts.len() - 2], + name: parts[parts.len() - 1], + }) + } +} + +impl<'a> Display for RemoteConfigPathRef<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self.source { + RemoteConfigSource::Datadog(id) => write!( + f, + "datadog/{}/{}/{}/{}", + id, self.product, self.config_id, self.name + ), + RemoteConfigSource::Employee => { + write!( + f, + "employee/{}/{}/{}", + self.product, self.config_id, self.name + ) + } + } + } +} + +impl Display for RemoteConfigPath { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + RemoteConfigPathRef::from(self).fmt(f) + } +} + +impl<'a> From<&RemoteConfigPathRef<'a>> for RemoteConfigPath { + fn from(from: &RemoteConfigPathRef<'a>) -> RemoteConfigPath { + RemoteConfigPath { + source: from.source, + product: from.product, + config_id: from.config_id.to_owned(), + name: from.name.to_owned(), + } + } +} +impl<'a> From> for RemoteConfigPath { + fn from(from: RemoteConfigPathRef<'a>) -> RemoteConfigPath { + (&from).into() + } +} + +impl<'a> From<&'a RemoteConfigPath> for RemoteConfigPathRef<'a> { + fn from(from: &'a RemoteConfigPath) -> RemoteConfigPathRef<'a> { + RemoteConfigPathRef { + source: from.source, + product: from.product, + config_id: from.config_id.as_str(), + name: from.name.as_str(), + } + } +} + +impl RemoteConfigPathType for RemoteConfigPath { + fn source(&self) -> RemoteConfigSource { + self.source + } + + fn product(&self) -> RemoteConfigProduct { + self.product + } + + fn config_id(&self) -> &str { + self.config_id.as_str() + } + + fn name(&self) -> &str { + self.name.as_str() + } + + fn to_owned(&self) -> RemoteConfigPath { + self.clone() + } +} + +impl<'a> RemoteConfigPathType for &RemoteConfigPathRef<'a> { + fn source(&self) -> RemoteConfigSource { + self.source + } + + fn product(&self) -> RemoteConfigProduct { + self.product + } + + fn config_id(&self) -> &'a str { + self.config_id + } + + fn name(&self) -> &'a str { + self.name + } + + fn to_owned(&self) -> RemoteConfigPath { + (*self).into() + } +} + +impl<'a> RemoteConfigPathType for RemoteConfigPathRef<'a> { + fn source(&self) -> RemoteConfigSource { + self.source + } + + fn product(&self) -> RemoteConfigProduct { + self.product + } + + fn config_id(&self) -> &'a str { + self.config_id + } + + fn name(&self) -> &'a str { + self.name + } + + fn to_owned(&self) -> RemoteConfigPath { + self.into() + } +} + +pub trait RemoteConfigPathType { + fn source(&self) -> RemoteConfigSource; + fn product(&self) -> RemoteConfigProduct; + fn config_id(&self) -> &str; + fn name(&self) -> &str; + fn to_owned(&self) -> RemoteConfigPath; +} + +impl ToOwned for dyn RemoteConfigPathType + '_ { + type Owned = RemoteConfigPath; + + fn to_owned(&self) -> Self::Owned { + self.to_owned() + } +} + +impl<'a> Borrow for RemoteConfigPath { + fn borrow(&self) -> &(dyn RemoteConfigPathType + 'a) { + self + } +} + +impl<'a> Borrow for Arc { + fn borrow(&self) -> &(dyn RemoteConfigPathType + 'a) { + &**self + } +} + +impl Hash for dyn RemoteConfigPathType + '_ { + fn hash(&self, state: &mut H) { + self.source().hash(state); + self.product().hash(state); + self.config_id().hash(state); + self.name().hash(state); + } +} + +impl PartialEq for dyn RemoteConfigPathType + '_ { + fn eq(&self, other: &Self) -> bool { + self.config_id() == other.config_id() + && self.name() == other.name() + && self.source() == other.source() + && self.product() == other.product() + } +} + +impl Eq for dyn RemoteConfigPathType + '_ {} diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index 82043bb42..38fed3766 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -102,7 +102,7 @@ impl FileStorage for ConfigFileStorage { fn store( &self, version: u64, - path: RemoteConfigPath, + path: Arc, file: Vec, ) -> anyhow::Result> { Ok(Arc::new(StoredShmFile { @@ -395,7 +395,7 @@ impl RemoteConfigManager { Ok(parsed) => { trace!("Adding remote config file {}: {:?}", entry.key(), parsed); entry.insert(RemoteConfigPath { - source: parsed.source.clone(), + source: parsed.source, product: (&parsed.data).into(), config_id: parsed.config_id.clone(), name: parsed.name.clone(), From 37a4fea67073cf7339ac79ffc108cc824350a701 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 19 Jul 2024 16:40:52 +0200 Subject: [PATCH 21/26] Implement timeouts Signed-off-by: Bob Weinand --- remote-config/examples/remote_config_fetch.rs | 4 +--- remote-config/src/fetch/fetcher.rs | 22 +++++++++---------- remote-config/src/fetch/multitarget.rs | 3 --- remote-config/src/fetch/shared.rs | 6 ----- remote-config/src/fetch/single.rs | 10 --------- remote-config/src/fetch/test_server.rs | 5 +---- 6 files changed, 13 insertions(+), 37 deletions(-) diff --git a/remote-config/examples/remote_config_fetch.rs b/remote-config/examples/remote_config_fetch.rs index ba06a6ede..299e238ee 100644 --- a/remote-config/examples/remote_config_fetch.rs +++ b/remote-config/examples/remote_config_fetch.rs @@ -39,15 +39,13 @@ async fn main() { endpoint: Endpoint { url: hyper::Uri::from_static("http://localhost:8126"), api_key: None, + timeout_ms: 5000, // custom timeout, defaults to 3 seconds }, products: vec![ApmTracing], capabilities: vec![], }, ); - // Custom timeout, defaults to 5 seconds. - fetcher.set_timeout(2000); - loop { match fetcher.fetch_changes().await { Ok(changes) => { diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 6f0399f81..9024ca62a 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -17,8 +17,9 @@ use hyper::{Client, StatusCode}; use sha2::{Digest, Sha256, Sha512}; use std::collections::{HashMap, HashSet}; use std::mem::transmute; -use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; +use std::time::Duration; use tracing::{debug, error, trace, warn}; const PROD_INTAKE_SUBDOMAIN: &str = "config"; @@ -150,8 +151,6 @@ impl ConfigFetcherState { pub struct ConfigFetcher { pub file_storage: S, state: Arc>, - /// Timeout after which to report failure, in milliseconds. - pub timeout: AtomicU32, /// Collected interval. May be zero if not provided by the remote config server or fetched yet. /// Given in nanoseconds. pub interval: AtomicU64, @@ -171,7 +170,6 @@ impl ConfigFetcher { ConfigFetcher { file_storage, state, - timeout: AtomicU32::new(5000), interval: AtomicU64::new(0), } } @@ -285,13 +283,15 @@ impl ConfigFetcher { ddcommon::header::APPLICATION_JSON, ) .body(serde_json::to_string(&config_req)?)?; - let response = Client::builder() - .build(connector::Connector::default()) - .request(req) - .await - .map_err(|e| { - anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)) - })?; + let response = tokio::time::timeout( + Duration::from_millis(self.state.endpoint.timeout_ms), + Client::builder() + .build(connector::Connector::default()) + .request(req), + ) + .await + .map_err(|e| anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)))? + .map_err(|e| anyhow::Error::msg(e).context(format!("Url: {:?}", self.state.endpoint)))?; let status = response.status(); let body_bytes = hyper::body::to_bytes(response.into_body()).await?; if status != StatusCode::OK { diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index 7486755d8..0fb0e0050 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -41,8 +41,6 @@ where runtimes: Mutex>>, /// Interval used if the remote server does not specify a refetch interval, in nanoseconds. pub remote_config_interval: AtomicU64, - /// Timeout after which to report failure, in milliseconds. - pub remote_config_timeout: AtomicU32, /// All services by target in use services: Mutex, KnownTarget>>, pending_async_insertions: AtomicU32, @@ -106,7 +104,6 @@ where target_runtimes: Mutex::new(Default::default()), runtimes: Mutex::new(Default::default()), remote_config_interval: AtomicU64::new(5_000_000_000), - remote_config_timeout: AtomicU32::new(5_000), services: Mutex::new(Default::default()), pending_async_insertions: AtomicU32::new(0), fetcher_semaphore: Semaphore::new(Self::DEFAULT_CLIENTS_LIMIT as usize), diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 76f390dd1..89bfd88c4 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -32,8 +32,6 @@ pub struct SharedFetcher { cancellation: CancellationToken, /// Interval used if the remote server does not specify a refetch interval, in nanoseconds. pub default_interval: AtomicU64, - /// Timeout after which to report failure, in milliseconds. - pub timeout: AtomicU32, } pub struct FileRefcountData { @@ -226,7 +224,6 @@ impl SharedFetcher { client_id: uuid::Uuid::new_v4().to_string(), cancellation: CancellationToken::new(), default_interval: AtomicU64::new(5_000_000_000), - timeout: AtomicU32::new(5000), } } @@ -243,9 +240,6 @@ impl SharedFetcher { { let state = storage.state.clone(); let mut fetcher = ConfigFetcher::new(storage, state); - fetcher - .timeout - .store(self.timeout.load(Ordering::Relaxed), Ordering::Relaxed); let mut opaque_state = OpaqueState::default(); diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index be711bfa1..a12e44d67 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -49,11 +49,6 @@ impl SingleFetcher { .await } - /// Timeout after which to report failure, in milliseconds. - pub fn set_timeout(&self, milliseconds: u32) { - self.fetcher.timeout.store(milliseconds, Ordering::Relaxed); - } - /// Collected interval. May be zero if not provided by the remote config server or fetched yet. /// Given in nanoseconds. pub fn get_interval(&self) -> u64 { @@ -112,11 +107,6 @@ where }) } - /// Timeout after which to report failure, in milliseconds. - pub fn set_timeout(&self, milliseconds: u32) { - self.fetcher.set_timeout(milliseconds) - } - /// Collected interval. May be zero if not provided by the remote config server or fetched yet. /// Given in nanoseconds. pub fn get_interval(&self) -> u64 { diff --git a/remote-config/src/fetch/test_server.rs b/remote-config/src/fetch/test_server.rs index c39412cb7..2928469ec 100644 --- a/remote-config/src/fetch/test_server.rs +++ b/remote-config/src/fetch/test_server.rs @@ -41,10 +41,7 @@ impl RemoteConfigServer { last_request: Mutex::new(None), files: Default::default(), next_response: Mutex::new(None), - endpoint: Endpoint { - url: format!("http://127.0.0.1:{port}/").parse().unwrap(), - api_key: None, - }, + endpoint: Endpoint::from_slice(&format!("http://127.0.0.1:{port}/")), shutdown_complete_tx, }); let this = server.clone(); From 9e3ef204321c81d507f9e7d634e482ca4aae3656 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 2 Aug 2024 11:55:57 +0200 Subject: [PATCH 22/26] Do not submit client_agent ever Signed-off-by: Bob Weinand --- trace-protobuf/build.rs | 4 ++++ trace-protobuf/src/remoteconfig.rs | 1 + 2 files changed, 5 insertions(+) diff --git a/trace-protobuf/build.rs b/trace-protobuf/build.rs index 258af77d6..f713e2eb0 100644 --- a/trace-protobuf/build.rs +++ b/trace-protobuf/build.rs @@ -129,6 +129,10 @@ fn generate_protobuf() { "#[derive(Deserialize, Serialize)]", ); config.type_attribute("Client", "#[derive(Deserialize, Serialize)]"); + config.field_attribute( + "Client.client_agent", + "#[serde(skip_serializing_if = \"Option::is_none\")]", + ); config.type_attribute("ClientState", "#[derive(Deserialize, Serialize)]"); config.type_attribute("ClientTracer", "#[derive(Deserialize, Serialize)]"); config.type_attribute("ClientAgent", "#[derive(Deserialize, Serialize)]"); diff --git a/trace-protobuf/src/remoteconfig.rs b/trace-protobuf/src/remoteconfig.rs index 43ad94452..d7c7b05d7 100644 --- a/trace-protobuf/src/remoteconfig.rs +++ b/trace-protobuf/src/remoteconfig.rs @@ -30,6 +30,7 @@ pub struct Client { #[prost(bool, tag = "8")] pub is_agent: bool, #[prost(message, optional, tag = "9")] + #[serde(skip_serializing_if = "Option::is_none")] pub client_agent: ::core::option::Option, #[prost(uint64, tag = "10")] pub last_seen: u64, From a8ebc2ebf7f93fcc27661b40b3089dddb0603419 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 2 Aug 2024 15:01:04 +0200 Subject: [PATCH 23/26] Fully reject a partially invalid RC payload Signed-off-by: Bob Weinand --- remote-config/src/fetch/fetcher.rs | 23 +++++++++-------------- remote-config/src/fetch/multitarget.rs | 10 ++++------ remote-config/src/fetch/shared.rs | 19 ++----------------- remote-config/src/fetch/single.rs | 13 ------------- sidecar/src/shm_remote_config.rs | 12 ++++-------- 5 files changed, 19 insertions(+), 58 deletions(-) diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 9024ca62a..ae2da7bb4 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -20,7 +20,7 @@ use std::mem::transmute; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, trace, warn}; const PROD_INTAKE_SUBDOMAIN: &str = "config"; @@ -163,6 +163,7 @@ pub struct OpaqueState { // 'static because it actually depends on last_configs, and rust doesn't like self-referencing last_config_paths: HashSet>, targets_version: u64, + last_error: Option, } impl ConfigFetcher { @@ -194,7 +195,6 @@ impl ConfigFetcher { runtime_id: &str, target: Arc, config_id: &str, - last_error: Option, opaque_state: &mut OpaqueState, ) -> anyhow::Result>>> { if self.state.endpoint.api_key.is_some() { @@ -234,8 +234,8 @@ impl ConfigFetcher { root_version: 1, targets_version: opaque_state.targets_version, config_states, - has_error: last_error.is_some(), - error: last_error.unwrap_or_default(), + has_error: opaque_state.last_error.is_some(), + error: opaque_state.last_error.take().unwrap_or_default(), backend_client_state: std::mem::take(&mut opaque_state.client_state), }), id: config_id.into(), @@ -450,16 +450,16 @@ impl ConfigFetcher { }, ); } else { - warn!("Failed parsing version from remote config path {path}"); + anyhow::bail!("Failed parsing version from remote config path {path}"); } } else { - warn!( + anyhow::bail!( "Failed base64 decoding config for path {path}: {}", String::from_utf8_lossy(raw_file) ) } } else { - warn!( + anyhow::bail!( "Found changed config data for path {path}, but no file; existing files: {:?}", incoming_files.keys().collect::>() ) @@ -472,7 +472,7 @@ impl ConfigFetcher { target_file.expiring = false; configs.push(target_file.handle.clone()); } else { - error!("Found {config} in client_configs response, but it isn't stored. Skipping."); + anyhow::bail!("Found {config} in client_configs response, but it isn't stored."); } } @@ -619,7 +619,6 @@ pub mod tests { DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", - Some("test".to_string()), &mut opaque_state, ) .await @@ -658,12 +657,12 @@ pub mod tests { let mut opaque_state = OpaqueState::default(); { + opaque_state.last_error = Some("test".to_string()); let fetched = fetcher .fetch_once( DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", - Some("test".to_string()), &mut opaque_state, ) .await @@ -719,7 +718,6 @@ pub mod tests { DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", - None, &mut opaque_state, ) .await @@ -766,7 +764,6 @@ pub mod tests { DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", - None, &mut opaque_state, ) .await @@ -802,7 +799,6 @@ pub mod tests { DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", - None, &mut opaque_state, ) .await @@ -818,7 +814,6 @@ pub mod tests { DUMMY_RUNTIME_ID, DUMMY_TARGET.clone(), "foo", - None, &mut opaque_state, ) .await diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index 0fb0e0050..b8764cf48 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -78,7 +78,7 @@ pub trait NotifyTarget: Sync + Send + Sized + Hash + Eq + Clone + Debug { } pub trait MultiTargetHandlers { - fn fetched(&self, target: &Arc, files: &[Arc]) -> (Option, bool); + fn fetched(&self, target: &Arc, files: &[Arc]) -> bool; fn expired(&self, target: &Arc); @@ -366,7 +366,7 @@ where .run( this.storage.clone(), Box::new(move |files| { - let (error, notify) = inner_this + let notify = inner_this .storage .storage .fetched(&inner_fetcher.target, files); @@ -395,8 +395,6 @@ where notify_target.notify(); } } - - error }), ) .shared(); @@ -510,7 +508,7 @@ mod tests { &self, target: &Arc, files: &[Arc], - ) -> (Option, bool) { + ) -> bool { match self.recent_fetches.lock().unwrap().entry(target.clone()) { Entry::Occupied(_) => panic!("Double fetch without recent_fetches clear"), Entry::Vacant(e) => { @@ -533,7 +531,7 @@ mod tests { ..=0 => panic!("Got unexpected fetch"), } - (None, true) + true } fn expired(&self, target: &Arc) { diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 89bfd88c4..0c84b1a33 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -234,7 +234,7 @@ impl SharedFetcher { pub async fn run( &self, storage: RefcountingStorage, - on_fetch: Box>) -> Option>, + on_fetch: Box>)>, ) where S::StoredFile: RefcountedFile, { @@ -244,7 +244,6 @@ impl SharedFetcher { let mut opaque_state = OpaqueState::default(); let mut last_files: Vec> = vec![]; - let mut last_error = None; loop { let first_run_id = fetcher.file_storage.run_id.inc_runners(); @@ -255,7 +254,6 @@ impl SharedFetcher { runtime_id.as_str(), self.target.clone(), self.client_id.as_str(), - last_error.take(), &mut opaque_state, ) .await; @@ -302,7 +300,7 @@ impl SharedFetcher { last_files = files; - last_error = on_fetch(&last_files); + on_fetch(&last_files); } else { clean_inactive(); } @@ -432,20 +430,11 @@ pub mod tests { PATH_SECOND.clone(), (vec![DUMMY_TARGET.clone()], 1, "X".to_string()), ); - - Some("error".to_string()) } 1 => { assert_eq!(fetched.len(), 2); - let req = server.last_request.lock().unwrap(); - let req = req.as_ref().unwrap(); - let client = req.client.as_ref().unwrap(); - let state = client.state.as_ref().unwrap(); - assert_eq!(state.error, "error"); server.files.lock().unwrap().remove(&*PATH_SECOND); - - None } 2 => { assert_eq!(fetched.len(), 1); @@ -457,8 +446,6 @@ pub mod tests { assert!(!state.has_error); inner_fetcher.cancel(); - - None } _ => panic!("Unexpected"), }, @@ -595,7 +582,6 @@ pub mod tests { } _ => panic!("Unexpected"), } - None }), ), fetcher_2.run( @@ -628,7 +614,6 @@ pub mod tests { } _ => panic!("Unexpected"), } - None }), ), ]) diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index a12e44d67..ad2d6a812 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -15,7 +15,6 @@ pub struct SingleFetcher { target: Arc, runtime_id: String, config_id: String, - last_error: Option, opaque_state: OpaqueState, } @@ -26,7 +25,6 @@ impl SingleFetcher { target: Arc::new(target), runtime_id, config_id: uuid::Uuid::new_v4().to_string(), - last_error: None, opaque_state: OpaqueState::default(), } } @@ -43,7 +41,6 @@ impl SingleFetcher { self.runtime_id.as_str(), self.target.clone(), self.config_id.as_str(), - self.last_error.take(), &mut self.opaque_state, ) .await @@ -55,11 +52,6 @@ impl SingleFetcher { self.fetcher.interval.load(Ordering::Relaxed) } - /// Sets the error to be reported to the backend. - pub fn set_last_error(&mut self, error: String) { - self.last_error = Some(error); - } - pub fn get_config_id(&self) -> &String { &self.config_id } @@ -113,11 +105,6 @@ where self.fetcher.get_interval() } - /// Sets the error to be reported to the backend. - pub fn set_last_error(&mut self, error: String) { - self.fetcher.set_last_error(error); - } - pub fn get_config_id(&self) -> &String { self.fetcher.get_config_id() } diff --git a/sidecar/src/shm_remote_config.rs b/sidecar/src/shm_remote_config.rs index 38fed3766..62f51468d 100644 --- a/sidecar/src/shm_remote_config.rs +++ b/sidecar/src/shm_remote_config.rs @@ -152,11 +152,7 @@ fn store_shm( } impl MultiTargetHandlers for ConfigFileStorage { - fn fetched( - &self, - target: &Arc, - files: &[Arc], - ) -> (Option, bool) { + fn fetched(&self, target: &Arc, files: &[Arc]) -> bool { let mut writers = self.writers.lock().unwrap(); let writer = match writers.entry(target.clone()) { Entry::Occupied(e) => e.into_mut(), @@ -165,7 +161,7 @@ impl MultiTargetHandlers for ConfigFileStorage { Err(e) => { let msg = format!("Failed acquiring a remote config shm writer: {:?}", e); error!(msg); - return (Some(msg), false); + return false; } }), }; @@ -194,9 +190,9 @@ impl MultiTargetHandlers for ConfigFileStorage { String::from_utf8_lossy(&serialized) ); - (None, true) + true } else { - (None, false) + false } } From 60ca0f65258085bfe9542749f54ba61ae73d7320 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Wed, 7 Aug 2024 17:29:11 +0200 Subject: [PATCH 24/26] Adjust comment slightly Signed-off-by: Bob Weinand --- remote-config/src/fetch/shared.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index 0c84b1a33..d9ed2e14c 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -84,11 +84,12 @@ pub trait RefcountedFile { #[derive(Default)] struct RunnersGeneration { + /// This atomic contains both run_id and runners count; saving us from needing a Mutex. val: AtomicU64, } /// Atomic structure to represent the exact amount of remote config fetching runners at a specific -/// point in time represented by the generation, an integer which is only ever incremented. +/// point in time represented by the generation (run_id), an integer which is only ever incremented. /// This data structure helps contain which inactive files are pending deletion. impl RunnersGeneration { const RUN_ID_SHIFT: i32 = 20; From 1916a6bcd7bd7e22c7dbcf89da4843029eba07aa Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 9 Aug 2024 23:07:10 +0200 Subject: [PATCH 25/26] Apply suggestions from code review Signed-off-by: Bob Weinand --- remote-config/examples/remote_config_fetch.rs | 3 +- remote-config/src/fetch/fetcher.rs | 49 ++++++------------- remote-config/src/fetch/multitarget.rs | 12 ++--- remote-config/src/fetch/shared.rs | 35 ++++++------- remote-config/src/fetch/single.rs | 41 ++++++---------- 5 files changed, 49 insertions(+), 91 deletions(-) diff --git a/remote-config/examples/remote_config_fetch.rs b/remote-config/examples/remote_config_fetch.rs index 299e238ee..1042134c9 100644 --- a/remote-config/examples/remote_config_fetch.rs +++ b/remote-config/examples/remote_config_fetch.rs @@ -72,11 +72,10 @@ async fn main() { } Err(e) => { eprintln!("Fetch failed with {e}"); - fetcher.set_last_error(e.to_string()); } } - sleep(Duration::from_nanos(fetcher.get_interval()).max(Duration::from_secs(1))).await; + sleep(Duration::from_secs(1)).await; } } diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index ae2da7bb4..21abf3340 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -12,12 +12,11 @@ use datadog_trace_protobuf::remoteconfig::{ TargetFileHash, TargetFileMeta, }; use ddcommon::{connector, Endpoint}; -use hyper::http::uri::{PathAndQuery, Scheme}; +use hyper::http::uri::PathAndQuery; use hyper::{Client, StatusCode}; use sha2::{Digest, Sha256, Sha512}; use std::collections::{HashMap, HashSet}; use std::mem::transmute; -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use tracing::{debug, trace, warn}; @@ -151,14 +150,11 @@ impl ConfigFetcherState { pub struct ConfigFetcher { pub file_storage: S, state: Arc>, - /// Collected interval. May be zero if not provided by the remote config server or fetched yet. - /// Given in nanoseconds. - pub interval: AtomicU64, } #[derive(Default)] -pub struct OpaqueState { - client_state: Vec, +pub struct ConfigClientState { + opaque_backend_state: Vec, last_configs: Vec, // 'static because it actually depends on last_configs, and rust doesn't like self-referencing last_config_paths: HashSet>, @@ -171,7 +167,6 @@ impl ConfigFetcher { ConfigFetcher { file_storage, state, - interval: AtomicU64::new(0), } } @@ -194,8 +189,8 @@ impl ConfigFetcher { &mut self, runtime_id: &str, target: Arc, - config_id: &str, - opaque_state: &mut OpaqueState, + client_id: &str, + opaque_state: &mut ConfigClientState, ) -> anyhow::Result>>> { if self.state.endpoint.api_key.is_some() { // Using remote config talking to the backend directly is not supported. @@ -236,9 +231,9 @@ impl ConfigFetcher { config_states, has_error: opaque_state.last_error.is_some(), error: opaque_state.last_error.take().unwrap_or_default(), - backend_client_state: std::mem::take(&mut opaque_state.client_state), + backend_client_state: std::mem::take(&mut opaque_state.opaque_backend_state), }), - id: config_id.into(), + id: client_id.into(), products: self .state .invariants @@ -323,15 +318,12 @@ impl ConfigFetcher { )) })?; - opaque_state.client_state = targets_list + opaque_state.opaque_backend_state = targets_list .signed .custom .opaque_backend_state .as_bytes() .to_vec(); - if let Some(interval) = targets_list.signed.custom.agent_refresh_interval { - self.interval.store(interval, Ordering::Relaxed); - } debug!( "Received remote config of length {}, containing {:?} paths for target {:?}", @@ -408,8 +400,7 @@ impl ConfigFetcher { if let Ok(decoded) = base64::engine::general_purpose::STANDARD.decode(raw_file) { let computed_hash = hasher(decoded.as_slice()); if hash != computed_hash { - warn!("Computed hash of file {computed_hash} did not match remote config targets file hash {hash} for path {path}: file: {}", String::from_utf8_lossy(decoded.as_slice())); - continue; + anyhow::bail!("Computed hash of file {computed_hash} did not match remote config targets file hash {hash} for path {path}: file: {}", String::from_utf8_lossy(decoded.as_slice())); } if let Some(version) = target_file.try_parse_version() { debug!( @@ -483,21 +474,9 @@ impl ConfigFetcher { } } -fn get_product_endpoint(subdomain: &str, endpoint: &Endpoint) -> Endpoint { +fn get_product_endpoint(_subdomain: &str, endpoint: &Endpoint) -> Endpoint { let mut parts = endpoint.url.clone().into_parts(); - if endpoint.api_key.is_some() { - if parts.scheme.is_none() { - parts.scheme = Some(Scheme::HTTPS); - parts.authority = Some( - format!("{}.{}", subdomain, parts.authority.unwrap()) - .parse() - .unwrap(), - ); - } - parts.path_and_query = Some(PathAndQuery::from_static("/api/v0.1/configurations")); - } else { - parts.path_and_query = Some(PathAndQuery::from_static("/v0.7/config")); - } + parts.path_and_query = Some(PathAndQuery::from_static("/v0.7/config")); Endpoint { url: hyper::Uri::from_parts(parts).unwrap(), api_key: endpoint.api_key.clone(), @@ -608,7 +587,7 @@ pub mod tests { storage.clone(), Arc::new(ConfigFetcherState::new(server.dummy_invariants())), ); - let mut opaque_state = OpaqueState::default(); + let mut opaque_state = ConfigClientState::default(); let mut response = Response::new(Body::from("")); *response.status_mut() = StatusCode::NOT_FOUND; @@ -654,7 +633,7 @@ pub mod tests { storage.clone(), Arc::new(ConfigFetcherState::new(invariants)), ); - let mut opaque_state = OpaqueState::default(); + let mut opaque_state = ConfigClientState::default(); { opaque_state.last_error = Some("test".to_string()); @@ -698,7 +677,7 @@ pub mod tests { assert_eq!(tracer.tracer_version, "1.2.3"); assert_eq!( - String::from_utf8_lossy(&opaque_state.client_state), + String::from_utf8_lossy(&opaque_state.opaque_backend_state), "some state" ); assert_eq!(fetched.len(), 1); diff --git a/remote-config/src/fetch/multitarget.rs b/remote-config/src/fetch/multitarget.rs index b8764cf48..f87b134c8 100644 --- a/remote-config/src/fetch/multitarget.rs +++ b/remote-config/src/fetch/multitarget.rs @@ -39,7 +39,7 @@ where target_runtimes: Mutex, HashSet>>, /// Keyed by runtime_id runtimes: Mutex>>, - /// Interval used if the remote server does not specify a refetch interval, in nanoseconds. + /// Refetch interval in nanoseconds. pub remote_config_interval: AtomicU64, /// All services by target in use services: Mutex, KnownTarget>>, @@ -340,7 +340,7 @@ where let this = self.clone(); let fetcher = known_target.fetcher.clone(); let status = known_target.status.clone(); - fetcher.default_interval.store( + fetcher.interval.store( self.remote_config_interval.load(Ordering::Relaxed), Ordering::Relaxed, ); @@ -420,7 +420,7 @@ where } // unlock mutex select! { - _ = tokio::time::sleep(Duration::from_nanos(fetcher.default_interval.load(Ordering::Relaxed))) => {}, + _ = tokio::time::sleep(Duration::from_nanos(fetcher.interval.load(Ordering::Relaxed))) => {}, _ = fetcher_fut.clone() => { break; } @@ -504,11 +504,7 @@ mod tests { } impl MultiTargetHandlers for MultiFileStorage { - fn fetched( - &self, - target: &Arc, - files: &[Arc], - ) -> bool { + fn fetched(&self, target: &Arc, files: &[Arc]) -> bool { match self.recent_fetches.lock().unwrap().entry(target.clone()) { Entry::Occupied(_) => panic!("Double fetch without recent_fetches clear"), Entry::Vacant(e) => { diff --git a/remote-config/src/fetch/shared.rs b/remote-config/src/fetch/shared.rs index d9ed2e14c..8c5d266ac 100644 --- a/remote-config/src/fetch/shared.rs +++ b/remote-config/src/fetch/shared.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::fetch::{ - ConfigApplyState, ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState, + ConfigApplyState, ConfigClientState, ConfigFetcher, ConfigFetcherState, ConfigInvariants, + FileStorage, }; use crate::{RemoteConfigPath, Target}; use std::collections::HashMap; @@ -30,8 +31,8 @@ pub struct SharedFetcher { /// Each fetcher must have an unique id. Defaults to a random UUID. pub client_id: String, cancellation: CancellationToken, - /// Interval used if the remote server does not specify a refetch interval, in nanoseconds. - pub default_interval: AtomicU64, + /// Refetch interval in nanoseconds. + pub interval: AtomicU64, } pub struct FileRefcountData { @@ -224,7 +225,7 @@ impl SharedFetcher { runtime_id: Arc::new(Mutex::new(runtime_id)), client_id: uuid::Uuid::new_v4().to_string(), cancellation: CancellationToken::new(), - default_interval: AtomicU64::new(5_000_000_000), + interval: AtomicU64::new(5_000_000_000), } } @@ -242,7 +243,7 @@ impl SharedFetcher { let state = storage.state.clone(); let mut fetcher = ConfigFetcher::new(storage, state); - let mut opaque_state = OpaqueState::default(); + let mut opaque_state = ConfigClientState::default(); let mut last_files: Vec> = vec![]; @@ -314,13 +315,7 @@ impl SharedFetcher { select! { _ = self.cancellation.cancelled() => { break; } - _ = { - let mut ns = fetcher.interval.load(Ordering::Relaxed); - if ns == 0 { - ns = self.default_interval.load(Ordering::Relaxed); - } - sleep(Duration::from_nanos(ns)) - } => {} + _ = sleep(Duration::from_nanos(self.interval.load(Ordering::Relaxed))) => {} } } @@ -551,8 +546,8 @@ pub mod tests { join_all(vec![ fetcher_1.run( rc_storage.clone(), - Box::new(move |fetched| { - match iteration_1.fetch_add(1, Ordering::SeqCst) { + Box::new( + move |fetched| match iteration_1.fetch_add(1, Ordering::SeqCst) { i @ 0 | i @ 1 => { assert_eq!(fetched.len(), 2); @@ -582,13 +577,13 @@ pub mod tests { inner_fetcher_1.cancel(); } _ => panic!("Unexpected"), - } - }), + }, + ), ), fetcher_2.run( rc_storage, - Box::new(move |fetched| { - match iteration_2.fetch_add(1, Ordering::SeqCst) { + Box::new( + move |fetched| match iteration_2.fetch_add(1, Ordering::SeqCst) { i @ 0 | i @ 1 => { assert_eq!(fetched.len(), 1); assert_eq!(fetched[0].store.data.lock().unwrap().contents, "v1"); @@ -614,8 +609,8 @@ pub mod tests { inner_fetcher_2.cancel(); } _ => panic!("Unexpected"), - } - }), + }, + ), ), ]) .await; diff --git a/remote-config/src/fetch/single.rs b/remote-config/src/fetch/single.rs index ad2d6a812..c07471b36 100644 --- a/remote-config/src/fetch/single.rs +++ b/remote-config/src/fetch/single.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::fetch::{ - ConfigApplyState, ConfigFetcher, ConfigFetcherState, ConfigInvariants, FileStorage, OpaqueState, + ConfigApplyState, ConfigClientState, ConfigFetcher, ConfigFetcherState, ConfigInvariants, + FileStorage, }; use crate::file_change_tracker::{Change, ChangeTracker, FilePath, UpdatedFiles}; use crate::{RemoteConfigPath, Target}; @@ -14,8 +15,8 @@ pub struct SingleFetcher { fetcher: ConfigFetcher, target: Arc, runtime_id: String, - config_id: String, - opaque_state: OpaqueState, + client_id: String, + opaque_state: ConfigClientState, } impl SingleFetcher { @@ -24,13 +25,13 @@ impl SingleFetcher { fetcher: ConfigFetcher::new(sink, Arc::new(ConfigFetcherState::new(invariants))), target: Arc::new(target), runtime_id, - config_id: uuid::Uuid::new_v4().to_string(), - opaque_state: OpaqueState::default(), + client_id: uuid::Uuid::new_v4().to_string(), + opaque_state: ConfigClientState::default(), } } - pub fn with_config_id(mut self, config_id: String) -> Self { - self.config_id = config_id; + pub fn with_client_id(mut self, client_id: String) -> Self { + self.client_id = client_id; self } @@ -40,20 +41,14 @@ impl SingleFetcher { .fetch_once( self.runtime_id.as_str(), self.target.clone(), - self.config_id.as_str(), + self.client_id.as_str(), &mut self.opaque_state, ) .await } - /// Collected interval. May be zero if not provided by the remote config server or fetched yet. - /// Given in nanoseconds. - pub fn get_interval(&self) -> u64 { - self.fetcher.interval.load(Ordering::Relaxed) - } - - pub fn get_config_id(&self) -> &String { - &self.config_id + pub fn get_client_id(&self) -> &String { + &self.client_id } /// Sets the apply state on a stored file. @@ -81,8 +76,8 @@ where } } - pub fn with_config_id(mut self, config_id: String) -> Self { - self.fetcher = self.fetcher.with_config_id(config_id); + pub fn with_client_id(mut self, client_id: String) -> Self { + self.fetcher = self.fetcher.with_client_id(client_id); self } @@ -99,14 +94,8 @@ where }) } - /// Collected interval. May be zero if not provided by the remote config server or fetched yet. - /// Given in nanoseconds. - pub fn get_interval(&self) -> u64 { - self.fetcher.get_interval() - } - - pub fn get_config_id(&self) -> &String { - self.fetcher.get_config_id() + pub fn get_client_id(&self) -> &String { + self.fetcher.get_client_id() } /// Sets the apply state on a stored file. From da37f2e48bf8e461b7191cb2915944099b1a9513 Mon Sep 17 00:00:00 2001 From: Bob Weinand Date: Fri, 16 Aug 2024 13:16:14 +0200 Subject: [PATCH 26/26] Fix windows build --- remote-config/examples/remote_config_fetch.rs | 1 + remote-config/src/fetch/fetcher.rs | 1 + sidecar/src/service/remote_configs.rs | 4 ++-- sidecar/src/service/sidecar_server.rs | 4 ++-- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/remote-config/examples/remote_config_fetch.rs b/remote-config/examples/remote_config_fetch.rs index 1042134c9..15de1660c 100644 --- a/remote-config/examples/remote_config_fetch.rs +++ b/remote-config/examples/remote_config_fetch.rs @@ -40,6 +40,7 @@ async fn main() { url: hyper::Uri::from_static("http://localhost:8126"), api_key: None, timeout_ms: 5000, // custom timeout, defaults to 3 seconds + test_token: None, }, products: vec![ApmTracing], capabilities: vec![], diff --git a/remote-config/src/fetch/fetcher.rs b/remote-config/src/fetch/fetcher.rs index 59c0b345e..d988096c1 100644 --- a/remote-config/src/fetch/fetcher.rs +++ b/remote-config/src/fetch/fetcher.rs @@ -481,6 +481,7 @@ fn get_product_endpoint(_subdomain: &str, endpoint: &Endpoint) -> Endpoint { Endpoint { url: hyper::Uri::from_parts(parts).unwrap(), api_key: endpoint.api_key.clone(), + test_token: endpoint.test_token.clone(), ..*endpoint } } diff --git a/sidecar/src/service/remote_configs.rs b/sidecar/src/service/remote_configs.rs index f4ab0419f..e2d3499e9 100644 --- a/sidecar/src/service/remote_configs.rs +++ b/sidecar/src/service/remote_configs.rs @@ -73,12 +73,12 @@ impl NotifyTarget for RemoteConfigNotifyTarget { fn notify(&self) { unsafe { let dummy = 0; - kernel32::CreateRemoteThread( + winapi::um::processthreadsapi::CreateRemoteThread( self.process_handle.0, std::ptr::null_mut(), 0, Some(std::mem::transmute(self.notify_function.0)), - &dummy as *const i32 as winapi::LPVOID, + &dummy as *const i32 as winapi::shared::minwindef::LPVOID, 0, std::ptr::null_mut(), ); diff --git a/sidecar/src/service/sidecar_server.rs b/sidecar/src/service/sidecar_server.rs index 6f7281407..fb65646b3 100644 --- a/sidecar/src/service/sidecar_server.rs +++ b/sidecar/src/service/sidecar_server.rs @@ -75,7 +75,7 @@ struct SidecarStats { #[cfg(windows)] #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] -pub struct ProcessHandle(pub winapi::HANDLE); +pub struct ProcessHandle(pub winapi::um::winnt::HANDLE); #[cfg(windows)] unsafe impl Send for ProcessHandle {} @@ -126,7 +126,7 @@ impl SidecarServer { .lock() .unwrap() .process_handle() - .map(|p| ProcessHandle(p as winapi::HANDLE)); + .map(|p| ProcessHandle(p as winapi::um::winnt::HANDLE)); } let server = tarpc::server::BaseChannel::new( tarpc::server::Config {